milestone: desktop path Phases 1-5

Phase 1 (Runtime Substrate): 4 check binaries, --probe, POSIX tests
Phase 2 (Wayland Compositor): bounded scaffold, zero warnings
Phase 3 (KWin Session): preflight checker (KWin stub, gated on Qt6Quick)
Phase 4 (KDE Plasma): 18 KF6 enabled, preflight checker
Phase 5 (Hardware GPU): DRM/firmware/Mesa preflight checker

Build: zero warnings, all scripts syntax-clean. Oracle-verified.
This commit is contained in:
2026-04-29 09:54:06 +01:00
parent b23714f542
commit 8acc73d774
508 changed files with 76526 additions and 396 deletions
@@ -0,0 +1,18 @@
[package]
name = "common"
description = "Shared driver code library"
version = "0.1.0"
edition = "2021"
authors = ["4lDO2 <4lDO2@protonmail.com>"]
license = "MIT"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
libredox.workspace = true
log.workspace = true
redox_syscall = { workspace = true, features = ["std"] }
redox-log.workspace = true
[lints]
workspace = true
+265
View File
@@ -0,0 +1,265 @@
use std::mem::{self, size_of, MaybeUninit};
use std::ops::{Deref, DerefMut};
use std::ptr;
use std::sync::LazyLock;
use libredox::call::MmapArgs;
use libredox::{error::Result, flag, Fd};
use syscall::PAGE_SIZE;
use crate::{memory_root_fd, MemoryType, VirtaddrTranslationHandle};
/// Defines the platform-specific memory type for DMA operations
///
/// - On x86 systems, DMA uses Write-back memory ([`MemoryType::Writeback`])
/// - On aarch64 systems, DMA uses uncacheable memory ([`MemoryType::Uncacheable`])
const DMA_MEMTY: MemoryType = {
if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
// x86 ensures cache coherence with DMA memory
MemoryType::Writeback
} else if cfg!(target_arch = "aarch64") {
// aarch64 currently must map DMA memory without caching to ensure coherence
MemoryType::Uncacheable
} else if cfg!(target_arch = "riscv64") {
// FIXME check this out more
MemoryType::Uncacheable
} else {
panic!("invalid arch")
}
};
/// Returns a file descriptor for zeroized physically-contiguous DMA memory.
///
/// # Returns
///
/// A [Result] containing:
/// - '[Ok]' - A [Fd] (file descriptor) to zeroized, physically continuous DMA usable memory
/// - '[Err]' - The error returned by the provider of the /scheme/memory/zeroed scheme.
///
/// # Errors
///
/// This function can return an error in the following case:
///
/// - The request for the physical memory fails.
pub(crate) fn phys_contiguous_fd() -> Result<Fd> {
memory_root_fd().openat(
&format!("zeroed@{DMA_MEMTY}?phys_contiguous"),
flag::O_CLOEXEC,
0,
)
}
/// Allocates a chunk of physical memory for DMA, and then maps it to virtual memory.
///
/// # Arguments
/// 'length: [usize]' - The length of the memory region. Must be a multiple of [`PAGE_SIZE`]
///
/// # Returns
///
/// This function returns a [Result] containing the following:
/// - A '[Ok]([usize], *[mut] ())' containing a Tuple of the physical address of the region, and a raw pointer to that region in virtual memory.
/// - An '[Err]' - containing the error for the operation.
///
/// # Errors
///
/// This function asserts if:
/// - length is not a multiple of [`PAGE_SIZE`]
///
/// This function returns an error if:
/// - A file descriptor to physically contiguous memory of type [`DMA_MEMTY`] could not be acquired
/// - A virtual mapping for the physically contiguous memory could not be created
/// - The virtual address returned by the memory manager was invalid.
fn alloc_and_map(length: usize, handle: &VirtaddrTranslationHandle) -> Result<(usize, *mut ())> {
assert_eq!(length % PAGE_SIZE, 0);
unsafe {
let fd = phys_contiguous_fd()?;
let virt = libredox::call::mmap(MmapArgs {
fd: fd.raw(),
offset: 0, // ignored
addr: core::ptr::null_mut(), // ignored
length,
flags: flag::MAP_PRIVATE,
prot: flag::PROT_READ | flag::PROT_WRITE,
})?;
let phys = handle.translate(virt as usize)?;
for i in 1..length.div_ceil(PAGE_SIZE) {
debug_assert_eq!(
handle.translate(virt as usize + i * PAGE_SIZE),
Ok(phys + i * PAGE_SIZE),
"NOT CONTIGUOUS"
);
}
Ok((phys, virt as *mut ()))
}
}
/// A safe accessor for DMA memory.
pub struct Dma<T: ?Sized> {
/// The physical address of the memory
phys: usize,
/// The page-aligned length of the memory. Will be a multiple of [`PAGE_SIZE`]
aligned_len: usize,
/// The pointer to the Dma memory in the virtual address space.
virt: *mut T,
}
impl<T> Dma<T> {
/// [Dma] constructor that allocates and initializes a region of DMA memory with the page-aligned
/// size and initial value of some T
///
/// # Arguments
/// 'value: T' - The initial value to write to the allocated region
///
/// # Returns
///
/// This function returns a [Result] containing the following:
///
/// - A '[Ok] (`[Dma]<T>`)' containing the initialized region
/// - An '[Err]' containing an error.
pub fn new(value: T) -> Result<Self> {
unsafe {
let mut zeroed = Self::zeroed()?;
zeroed.as_mut_ptr().write(value);
Ok(zeroed.assume_init())
}
}
/// [Dma] constructor that allocates and zeroizes a memory region of the page-aligned size of T
///
/// # Returns
///
/// This function returns a [Result] containing the following:
///
/// - A '[Ok] (`[Dma]<[MaybeUninit]<T>>`)' containing the allocated and zeroized memory
/// - An '[Err]' containing an error.
pub fn zeroed() -> Result<Dma<MaybeUninit<T>>> {
let aligned_len = size_of::<T>().next_multiple_of(PAGE_SIZE);
let (phys, virt) = alloc_and_map(aligned_len, &*VIRTTOPHYS_HANDLE)?;
Ok(Dma {
phys,
virt: virt.cast(),
aligned_len,
})
}
}
impl<T> Dma<MaybeUninit<T>> {
/// Assumes that possibly uninitialized DMA memory has been initialized, and returns a new
/// instance of an object of type `[Dma]<T>`.
///
/// # Returns
/// - `[Dma]<T>` - The original structure without the [`MaybeUninit`] wrapper around its contents.
///
/// # Notes
/// - This is unsafe because it assumes that the memory stored within the `[Dma]<T>` is a valid
/// instance of T. If it isn't (for example -- if it was initialized with [`Dma::zeroed`]),
/// then the underlying memory may not contain the expected T structure.
pub unsafe fn assume_init(self) -> Dma<T> {
let Dma {
phys,
aligned_len,
virt,
} = self;
mem::forget(self);
Dma {
phys,
aligned_len,
virt: virt.cast(),
}
}
}
impl<T: ?Sized> Dma<T> {
/// Returns the physical address of the physical memory that this [Dma] structure references.
///
/// # Returns
/// [usize] - The physical address of the memory.
pub fn physical(&self) -> usize {
self.phys
}
}
// TODO: there should exist a "context" struct that drivers create at start, which would be passed
// to the respective functions
static VIRTTOPHYS_HANDLE: LazyLock<VirtaddrTranslationHandle> = LazyLock::new(|| {
VirtaddrTranslationHandle::new().expect("failed to acquire virttophys translation handle")
});
impl<T> Dma<[T]> {
/// Returns a [Dma] object containing a zeroized slice of T with a given count.
///
/// # Arguments
///
/// - 'count: [usize]' - The number of elements of type T in the allocated slice.
pub fn zeroed_slice(count: usize) -> Result<Dma<[MaybeUninit<T>]>> {
let aligned_len = count
.checked_mul(size_of::<T>())
.unwrap()
.next_multiple_of(PAGE_SIZE);
let (phys, virt) = alloc_and_map(aligned_len, &*VIRTTOPHYS_HANDLE)?;
Ok(Dma {
phys,
aligned_len,
virt: ptr::slice_from_raw_parts_mut(virt.cast(), count),
})
}
/// Casts the slice from type T to type U.
///
/// # Returns
/// '`[DMA]<U>`' - A cast handle to the Dma memory.
pub unsafe fn cast_slice<U>(self) -> Dma<[U]> {
let Dma {
phys,
virt,
aligned_len,
} = self;
core::mem::forget(self);
Dma {
phys,
virt: virt as *mut [U],
aligned_len,
}
}
}
impl<T> Dma<[MaybeUninit<T>]> {
/// See [`Dma<MaybeUninit<T>>::assume_init`]
pub unsafe fn assume_init(self) -> Dma<[T]> {
let &Dma {
phys,
aligned_len,
virt,
} = &self;
mem::forget(self);
Dma {
phys,
aligned_len,
virt: virt as *mut [T],
}
}
}
impl<T: ?Sized> Deref for Dma<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.virt }
}
}
impl<T: ?Sized> DerefMut for Dma<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.virt }
}
}
impl<T: ?Sized> Drop for Dma<T> {
fn drop(&mut self) {
unsafe {
ptr::drop_in_place(self.virt);
let _ = libredox::call::munmap(self.virt as *mut (), self.aligned_len);
}
}
}
@@ -0,0 +1,95 @@
use core::{
cmp::PartialEq,
ops::{BitAnd, BitOr, Not},
};
mod mmio;
mod mmio_ptr;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod pio;
pub use mmio::*;
pub use mmio_ptr::*;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub use pio::*;
/// IO abstraction
pub trait Io {
/// Value type for IO, usually some unsigned number
type Value: Copy
+ PartialEq
+ BitAnd<Output = Self::Value>
+ BitOr<Output = Self::Value>
+ Not<Output = Self::Value>;
/// Read the underlying valu2e
fn read(&self) -> Self::Value;
/// Write the underlying value
fn write(&mut self, value: Self::Value);
/// Check whether the underlying value contains bit flags
#[inline(always)]
fn readf(&self, flags: Self::Value) -> bool {
(self.read() & flags) as Self::Value == flags
}
/// Enable or disable specific bit flags
#[inline(always)]
fn writef(&mut self, flags: Self::Value, value: bool) {
let tmp: Self::Value = match value {
true => self.read() | flags,
false => self.read() & !flags,
};
self.write(tmp);
}
}
/// Read-only IO
#[repr(transparent)]
pub struct ReadOnly<I> {
inner: I,
}
impl<I: Io> ReadOnly<I> {
/// Wraps IO
pub const fn new(inner: I) -> ReadOnly<I> {
ReadOnly { inner }
}
}
impl<I: Io> ReadOnly<I> {
/// Calls [`Io::read`]
#[inline(always)]
pub fn read(&self) -> I::Value {
self.inner.read()
}
/// Calls [`Io::readf`]
#[inline(always)]
pub fn readf(&self, flags: I::Value) -> bool {
self.inner.readf(flags)
}
}
#[repr(transparent)]
/// Write-only IO
pub struct WriteOnly<I> {
inner: I,
}
impl<I: Io> WriteOnly<I> {
/// Wraps IO
pub const fn new(inner: I) -> WriteOnly<I> {
WriteOnly { inner }
}
}
impl<I: Io> WriteOnly<I> {
/// Calls [`Io::write`]
#[inline(always)]
pub fn write(&mut self, value: I::Value) {
self.inner.write(value)
}
// writef requires read which is not valid when write-only
}
@@ -0,0 +1,173 @@
use core::{mem::MaybeUninit, ptr};
use super::Io;
/// MMIO abstraction
#[repr(C, packed)]
pub struct Mmio<T> {
value: MaybeUninit<T>,
}
impl<T> Mmio<T> {
/// Creates a zeroed instance
pub unsafe fn zeroed() -> Self {
Self {
value: MaybeUninit::zeroed(),
}
}
/// Creates an unitialized instance
pub unsafe fn uninit() -> Self {
Self {
value: MaybeUninit::uninit(),
}
}
/// Creates a new instance
pub const fn new(value: T) -> Self {
Self {
value: MaybeUninit::new(value),
}
}
}
// Generic implementation (WARNING: requires aligned pointers!)
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
impl<T> Io for Mmio<T>
where
T: Copy
+ PartialEq
+ core::ops::BitAnd<Output = T>
+ core::ops::BitOr<Output = T>
+ core::ops::Not<Output = T>,
{
type Value = T;
fn read(&self) -> T {
unsafe { ptr::read_volatile(ptr::addr_of!(self.value).cast::<T>()) }
}
fn write(&mut self, value: T) {
unsafe { ptr::write_volatile(ptr::addr_of_mut!(self.value).cast::<T>(), value) };
}
}
// x86 u8 implementation
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Io for Mmio<u8> {
type Value = u8;
fn read(&self) -> Self::Value {
unsafe {
let value: Self::Value;
let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::<Self::Value>();
core::arch::asm!(
"mov {}, [{}]",
out(reg_byte) value,
in(reg) ptr
);
value
}
}
fn write(&mut self, value: Self::Value) {
unsafe {
let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::<Self::Value>();
core::arch::asm!(
"mov [{}], {}",
in(reg) ptr,
in(reg_byte) value,
);
}
}
}
// x86 u16 implementation
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Io for Mmio<u16> {
type Value = u16;
fn read(&self) -> Self::Value {
unsafe {
let value: Self::Value;
let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::<Self::Value>();
core::arch::asm!(
"mov {:x}, [{}]",
out(reg) value,
in(reg) ptr
);
value
}
}
fn write(&mut self, value: Self::Value) {
unsafe {
let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::<Self::Value>();
core::arch::asm!(
"mov [{}], {:x}",
in(reg) ptr,
in(reg) value,
);
}
}
}
// x86 u32 implementation
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Io for Mmio<u32> {
type Value = u32;
fn read(&self) -> Self::Value {
unsafe {
let value: Self::Value;
let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::<Self::Value>();
core::arch::asm!(
"mov {:e}, [{}]",
out(reg) value,
in(reg) ptr
);
value
}
}
fn write(&mut self, value: Self::Value) {
unsafe {
let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::<Self::Value>();
core::arch::asm!(
"mov [{}], {:e}",
in(reg) ptr,
in(reg) value,
);
}
}
}
// x86 u64 implementation (x86_64 only)
#[cfg(target_arch = "x86_64")]
impl Io for Mmio<u64> {
type Value = u64;
fn read(&self) -> Self::Value {
unsafe {
let value: Self::Value;
let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::<Self::Value>();
core::arch::asm!(
"mov {:r}, [{}]",
out(reg) value,
in(reg) ptr
);
value
}
}
fn write(&mut self, value: Self::Value) {
unsafe {
let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::<Self::Value>();
core::arch::asm!(
"mov [{}], {:r}",
in(reg) ptr,
in(reg) value,
);
}
}
}
@@ -0,0 +1,157 @@
use super::Io;
/// MMIO using pointer instead of wrapped type
pub struct MmioPtr<T> {
ptr: *mut T,
}
impl<T> MmioPtr<T> {
//TODO: reads and writes are unsafe, not new.
/// Creates a `MmioPtr`.
pub unsafe fn new(ptr: *mut T) -> Self {
Self { ptr }
}
/// Creates a const pointer from a `MmioPtr`.
pub const fn as_ptr(&self) -> *const T {
self.ptr
}
/// Creates a mutable pointer from a `MmioPtr`.
pub const fn as_mut_ptr(&mut self) -> *mut T {
self.ptr
}
}
// Generic implementation (WARNING: requires aligned pointers!)
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
impl<T> Io for MmioPtr<T>
where
T: Copy
+ PartialEq
+ core::ops::BitAnd<Output = T>
+ core::ops::BitOr<Output = T>
+ core::ops::Not<Output = T>,
{
type Value = T;
fn read(&self) -> T {
unsafe { core::ptr::read_volatile(self.ptr) }
}
fn write(&mut self, value: T) {
unsafe { core::ptr::write_volatile(self.ptr, value) };
}
}
// x86 u8 implementation
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Io for MmioPtr<u8> {
type Value = u8;
fn read(&self) -> Self::Value {
unsafe {
let value: Self::Value;
core::arch::asm!(
"mov {}, [{}]",
out(reg_byte) value,
in(reg) self.ptr
);
value
}
}
fn write(&mut self, value: Self::Value) {
unsafe {
core::arch::asm!(
"mov [{}], {}",
in(reg) self.ptr,
in(reg_byte) value,
);
}
}
}
// x86 u16 implementation
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Io for MmioPtr<u16> {
type Value = u16;
fn read(&self) -> Self::Value {
unsafe {
let value: Self::Value;
core::arch::asm!(
"mov {:x}, [{}]",
out(reg) value,
in(reg) self.ptr
);
value
}
}
fn write(&mut self, value: Self::Value) {
unsafe {
core::arch::asm!(
"mov [{}], {:x}",
in(reg) self.ptr,
in(reg) value,
);
}
}
}
// x86 u32 implementation
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Io for MmioPtr<u32> {
type Value = u32;
fn read(&self) -> Self::Value {
unsafe {
let value: Self::Value;
core::arch::asm!(
"mov {:e}, [{}]",
out(reg) value,
in(reg) self.ptr
);
value
}
}
fn write(&mut self, value: Self::Value) {
unsafe {
core::arch::asm!(
"mov [{}], {:e}",
in(reg) self.ptr,
in(reg) value,
);
}
}
}
// x86 u64 implementation (x86_64 only)
#[cfg(target_arch = "x86_64")]
impl Io for MmioPtr<u64> {
type Value = u64;
fn read(&self) -> Self::Value {
unsafe {
let value: Self::Value;
core::arch::asm!(
"mov {:r}, [{}]",
out(reg) value,
in(reg) self.ptr
);
value
}
}
fn write(&mut self, value: Self::Value) {
unsafe {
core::arch::asm!(
"mov [{}], {:r}",
in(reg) self.ptr,
in(reg) value,
);
}
}
}
@@ -0,0 +1,89 @@
use core::{arch::asm, marker::PhantomData};
use super::Io;
/// Generic PIO
#[derive(Copy, Clone)]
pub struct Pio<T> {
port: u16,
value: PhantomData<T>,
}
impl<T> Pio<T> {
/// Create a PIO from a given port
pub const fn new(port: u16) -> Self {
Pio::<T> {
port,
value: PhantomData,
}
}
}
/// Read/Write for byte PIO
impl Io for Pio<u8> {
type Value = u8;
/// Read
#[inline(always)]
fn read(&self) -> u8 {
let value: u8;
unsafe {
asm!("in al, dx", in("dx") self.port, out("al") value, options(nostack, nomem, preserves_flags));
}
value
}
/// Write
#[inline(always)]
fn write(&mut self, value: u8) {
unsafe {
asm!("out dx, al", in("dx") self.port, in("al") value, options(nostack, nomem, preserves_flags));
}
}
}
/// Read/Write for word PIO
impl Io for Pio<u16> {
type Value = u16;
/// Read
#[inline(always)]
fn read(&self) -> u16 {
let value: u16;
unsafe {
asm!("in ax, dx", in("dx") self.port, out("ax") value, options(nostack, nomem, preserves_flags));
}
value
}
/// Write
#[inline(always)]
fn write(&mut self, value: u16) {
unsafe {
asm!("out dx, ax", in("dx") self.port, in("ax") value, options(nostack, nomem, preserves_flags));
}
}
}
/// Read/Write for doubleword PIO
impl Io for Pio<u32> {
type Value = u32;
/// Read
#[inline(always)]
fn read(&self) -> u32 {
let value: u32;
unsafe {
asm!("in eax, dx", in("dx") self.port, out("eax") value, options(nostack, nomem, preserves_flags));
}
value
}
/// Write
#[inline(always)]
fn write(&mut self, value: u32) {
unsafe {
asm!("out dx, eax", in("dx") self.port, in("eax") value, options(nostack, nomem, preserves_flags));
}
}
}
+331
View File
@@ -0,0 +1,331 @@
//! This crate provides various abstractions for use by all drivers in the Redox drivers repo.
//!
//! This includes direct memory access via [dma], and Scatter-Gather List support via [sgl]. It also
//! provides various memory management structures for use with drivers, and some logging support.
use libredox::call::MmapArgs;
use libredox::flag::{self, O_CLOEXEC, O_RDONLY, O_RDWR, O_WRONLY};
use libredox::{
errno::EINVAL,
error::{Error, Result},
Fd,
};
use syscall::{ProcSchemeVerb, PAGE_SIZE};
/// The Direct Memory Access (DMA) API for drivers
pub mod dma;
/// MMIO utilities
pub mod io;
mod logger;
/// The Scatter Gather List (SGL) API for drivers.
pub mod sgl;
/// Low latency timeout for driver loops
pub mod timeout;
pub use logger::{file_level, output_level, setup_logging};
use std::sync::OnceLock;
static MEMORY_ROOT_FD: OnceLock<libredox::Fd> = OnceLock::new();
/// Initializes a file descriptor to be used as the root memory for a driver.
///
/// # Panics
///
/// This function will panic if:
/// - `libredox` is unable to open a file descriptor.
/// - The memory root file descriptor has already been set (this function has already been called).
pub fn init() {
if MEMORY_ROOT_FD
.set(
libredox::Fd::open("/scheme/memory/scheme-root", 0, 0)
.expect("drivers common: failed to open memory root fd"),
)
.is_err()
{
panic!("drivers common: failed to set memory root fd");
}
}
/// Gets the memory root file descriptor.
///
/// # Panics
///
/// This function will panic if `init` has not already been called first.
pub fn memory_root_fd() -> &'static libredox::Fd {
MEMORY_ROOT_FD
.get()
.expect("drivers common: memory root fd not initialized. Please call `common::init` in your main function.")
}
/// Specifies the write behavior for a specific region of memory
///
/// These types indicate to the driver how writes to a specific memory region are handled by the
/// system. This usually refers to the caching behavior that the processor or I/O device responsible
/// for that memory implements.
///
/// aarch64 and x86 have very different cache-coherency rules, so this API as written is likely
/// not sufficient to describe the memory caching behavior in a cross-platform manner. As such,
/// consider this API unstable.
#[derive(Clone, Copy, Debug)]
pub enum MemoryType {
/// A region of memory that implements Write-back caching.
///
/// In write-back caching, the processor will first store data in its local cache, and then
/// flush it to the actual storage location at regular intervals, or as applications access
/// the data.
Writeback,
/// A region of memory that does not implement caching. Writes to these regions are immediate.
Uncacheable,
/// A region of memory that implements write combining.
///
/// Write combining memory regions store all writes in a temporary buffer called a Write
/// Combine Buffer. Multiple writes to the location are stored in a single buffer, and then
/// released to the memory location in an unspecified order. Write-Combine memory does not
/// guarantee that the order at which you write to it is the order at which those writes are
/// committed to memory.
WriteCombining,
/// Memory stored in an intermediate Write Combine Buffer and released later
/// Memory-Mapped I/O. This is an aarch64-specific term.
DeviceMemory,
}
impl Default for MemoryType {
fn default() -> Self {
Self::Writeback
}
}
/// Represents the protection level of an area of memory.
///
/// This structure shouldn't be used directly -- instead, use the [`Prot::RO`] (Read-Only),
/// [`Prot::WO`] (Write-Only) and [`Prot::RW`] (Read-Write) constants to specify the memory's protection
/// level.
#[derive(Clone, Copy, Debug)]
pub struct Prot {
/// The memory is readable
pub read: bool,
/// The memory is writeable
pub write: bool,
}
/// Implements the memory protection level constants
impl Prot {
/// A constant representing Read-Only memory.
pub const RO: Self = Self {
read: true,
write: false,
};
/// A constant representing Write-Only memory
pub const WO: Self = Self {
read: false,
write: true,
};
/// A constant representing Read-Write memory
pub const RW: Self = Self {
read: true,
write: true,
};
}
/// Maps physical memory to virtual memory
///
/// # Arguments
///
/// * '`base_phys`: [usize]' - The base address of the physical memory to map.
/// * 'len: [usize]' - The length of the physical memory to map (Should be a multiple of [`PAGE_SIZE`]
/// * '_: [Prot]' - The memory protection level of the mapping.
/// * 'type: [`MemoryType`]' - The caching behavior specification of the memory.
///
/// # Returns
///
/// A '[Result]<*mut ()>' which is:
/// - '[Ok]' containing a raw pointer to the mapped memory.
/// - '[Err]' which contains an error on failure.
///
/// # Errors
///
/// This function will return an error if:
/// - An invalid value is provided to 'read' or 'write'
/// - The system could not open a file descriptor to the memory scheme for the specified [`MemoryType`].
/// - The system failed to map the physical address to a virtual address. See [`libredox::call::mmap`]
///
/// # Safety
///
/// Safe, as the kernel ensures it doesn't conflict with any other memory described in the memory
/// map for regular RAM.
///
/// # Notes
/// - This function is unsafe, and upon using it you will be responsible for freeing the memory with
/// [`libredox::call::munmap`]. If you want a safe accessor, use [`PhysBorrowed`] instead.
/// - The `MemoryType` specified is used to tell the function which memory scheme to access. (i.e
/// /scheme/memory/physical@wb, /scheme/memory/physical@uc, etc).
pub unsafe fn physmap(
base_phys: usize,
len: usize,
Prot { read, write }: Prot,
ty: MemoryType,
) -> Result<*mut ()> {
// TODO: arraystring?
//Return an error rather than potentially crash the kernel.
if base_phys == 0 {
return Err(Error::new(EINVAL));
}
let path = format!(
"physical@{}",
match ty {
MemoryType::Writeback => "wb",
MemoryType::Uncacheable => "uc",
MemoryType::WriteCombining => "wc",
MemoryType::DeviceMemory => "dev",
}
);
let mode = match (read, write) {
(true, true) => O_RDWR,
(true, false) => O_RDONLY,
(false, true) => O_WRONLY,
(false, false) => return Err(Error::new(EINVAL)),
};
let mut prot = 0;
if read {
prot |= flag::PROT_READ;
}
if write {
prot |= flag::PROT_WRITE;
}
let fd = memory_root_fd().openat(&path, O_CLOEXEC | mode, 0)?;
Ok(libredox::call::mmap(MmapArgs {
fd: fd.raw(),
offset: base_phys as u64,
length: len.next_multiple_of(PAGE_SIZE),
flags: flag::MAP_SHARED,
prot,
addr: core::ptr::null_mut(),
})? as *mut ())
}
impl std::fmt::Display for MemoryType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
Self::Writeback => "wb",
Self::Uncacheable => "uc",
Self::WriteCombining => "wc",
Self::DeviceMemory => "dev",
}
)
}
}
/// A safe virtual mapping to physical memory that unmaps the memory when the structure goes out
/// of scope.
///
/// This function provides a safe binding to [physmap]. It implements Drop to free the mapped memory
/// when the structure goes out of scope.
pub struct PhysBorrowed {
mem: *mut (),
len: usize,
}
impl PhysBorrowed {
/// Constructs a `PhysBorrowed` instance.
///
/// # Arguments
/// See [physmap] for a description of the parameters.
///
/// # Returns
/// A '[Result]' which contains the following:
/// - A '[`PhysBorrowed`]' which represents the newly mapped region.
/// - An 'Err' if a memory mapping error occurs.
///
/// # Errors
/// See [physmap] for a description of the error cases.
pub fn map(base_phys: usize, len: usize, prot: Prot, ty: MemoryType) -> Result<Self> {
let mem = unsafe { physmap(base_phys, len, prot, ty)? };
Ok(Self {
mem,
len: len.next_multiple_of(PAGE_SIZE),
})
}
/// Gets a raw pointer to the borrowed region.
///
/// # Returns
/// - self.mem - A pointer to the mapped region in virtual memory.
///
/// # Notes
/// - The pointer may live beyond the lifetime of [`PhysBorrowed`], so dereferences to the pointer
/// must be treated as unsafe.
///
pub fn as_ptr(&self) -> *mut () {
self.mem
}
/// Gets the length of the mapped region.
///
/// # Returns
/// - self.len - The length of the mapped region. It should be a multiple of [`PAGE_SIZE`]
pub fn mapped_len(&self) -> usize {
self.len
}
}
impl Drop for PhysBorrowed {
/// Frees the mapped memory region.
fn drop(&mut self) {
unsafe {
let _ = libredox::call::munmap(self.mem, self.len);
}
}
}
/// Instructs the kernel to enable I/O ports for this (usermode) process (x86-specific).
///
/// On Redox, x86 privilege ring 3 represents userspace. Most Redox drivers run in userspace to
/// prevent system instability caused by a faulty driver. Processes with (bitmap-enabled) IO port
/// rights can use the IN/OUT instructions. This is not the same as IOPL 3; the CLI instruction is
/// still not allowed.
pub fn acquire_port_io_rights() -> Result<()> {
extern "C" {
fn redox_cur_thrfd_v0() -> usize;
}
let kernel_fd = syscall::dup(unsafe { redox_cur_thrfd_v0() }, b"open_via_dup")?;
let res = libredox::call::call_wo(
kernel_fd,
&[],
syscall::CallFlags::empty(),
&[ProcSchemeVerb::Iopl as u64],
);
let _ = syscall::close(kernel_fd);
res?;
Ok(())
}
/// Kernel handle for translating virtual addresses in the current address space, to their
/// underlying physical addresses.
///
/// It is currently unspecified whether this handle is specific to the address space at the time it
/// was created, or whether all calls reference the currently active address space.
pub struct VirtaddrTranslationHandle {
fd: Fd,
}
impl VirtaddrTranslationHandle {
/// Create a new handle, requires uid=0 but this may change.
pub fn new() -> Result<Self> {
Ok(Self {
fd: memory_root_fd().openat("translation", O_CLOEXEC, 0)?,
})
}
/// Translate physical => virtual.
pub fn translate(&self, physical: usize) -> Result<usize> {
let mut buf = physical.to_ne_bytes();
libredox::call::call_ro(self.fd.raw(), &mut buf, syscall::CallFlags::empty(), &[])?;
Ok(usize::from_ne_bytes(buf))
}
}
@@ -0,0 +1,108 @@
use std::str::FromStr;
use libredox::{flag, Fd};
use redox_log::{OutputBuilder, RedoxLogger};
/// Get the log verbosity for the output level.
pub fn output_level() -> log::LevelFilter {
log::LevelFilter::Info
}
/// Get the log verbosity for the file level.
pub fn file_level() -> log::LevelFilter {
log::LevelFilter::Info
}
/// Configures logging for a single driver.
#[cfg_attr(not(target_os = "redox"), allow(unused_variables, unused_mut))]
pub fn setup_logging(
category: &str,
subcategory: &str,
logfile_base: &str,
mut output_level: log::LevelFilter,
file_level: log::LevelFilter,
) {
RedoxLogger::init_timezone();
if let Some(log_level) = read_bootloader_log_level_env(category, subcategory) {
output_level = log_level;
}
let mut logger = RedoxLogger::new().with_output(
OutputBuilder::stderr()
.with_filter(output_level) // limit global output to important info
.with_ansi_escape_codes()
.flush_on_newline(true)
.build(),
);
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme(
category,
subcategory,
format!("{logfile_base}.log"),
) {
Ok(b) => {
logger = logger.with_output(b.with_filter(file_level).flush_on_newline(true).build())
}
Err(error) => eprintln!("Failed to create {logfile_base}.log: {}", error),
}
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme(
category,
subcategory,
format!("{logfile_base}.ansi.log"),
) {
Ok(b) => {
logger = logger.with_output(
b.with_filter(file_level)
.with_ansi_escape_codes()
.flush_on_newline(true)
.build(),
)
}
Err(error) => eprintln!("Failed to create {logfile_base}.ansi.log: {}", error),
}
logger.enable().expect("failed to set default logger");
}
fn read_bootloader_log_level_env(category: &str, subcategory: &str) -> Option<log::LevelFilter> {
let mut env_bytes = [0_u8; 4096];
// TODO: Have the kernel env can specify prefixed env key instead of having to read all of them
let envs = {
let Ok(fd) = Fd::open("/scheme/sys/env", flag::O_RDONLY | flag::O_CLOEXEC, 0) else {
return None;
};
let Ok(bytes_read) = fd.read(&mut env_bytes) else {
return None;
};
if bytes_read >= env_bytes.len() {
return None;
}
let env_bytes = &mut env_bytes[..bytes_read];
env_bytes
.split(|&c| c == b'\n')
.filter(|var| var.starts_with(b"DRIVER_"))
.collect::<Vec<_>>()
};
let log_env_keys = [
format!("DRIVER_{}_LOG_LEVEL=", subcategory.to_ascii_uppercase()),
format!("DRIVER_{}_LOG_LEVEL=", category.to_ascii_uppercase()),
"DRIVER_LOG_LEVEL=".to_string(),
];
for log_env_key in log_env_keys {
let log_env_key = log_env_key.as_bytes();
if let Some(log_env) = envs.iter().find_map(|var| var.strip_prefix(log_env_key)) {
if let Ok(Ok(log_level)) = str::from_utf8(&log_env).map(log::LevelFilter::from_str) {
return Some(log_level);
}
}
}
None
}
+130
View File
@@ -0,0 +1,130 @@
use std::num::NonZeroUsize;
use libredox::call::MmapArgs;
use libredox::errno::EINVAL;
use libredox::error::{Error, Result};
use libredox::flag::{MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE};
use syscall::{MAP_FIXED, PAGE_SIZE};
use crate::dma::phys_contiguous_fd;
use crate::VirtaddrTranslationHandle;
/// A Scatter-Gather List data structure
///
/// See: <https://en.wikipedia.org/wiki/Gather/scatter_(vector_addressing)>
#[derive(Debug)]
pub struct Sgl {
/// A raw pointer to the SGL in virtual memory
virt: *mut u8,
/// The length of the allocated memory, guaranteed to be a multiple of [`PAGE_SIZE`].
aligned_length: usize,
/// The length of the allocated memory. This value is NOT guaranteed to be a multiple of [`PAGE_SIZE`]
unaligned_length: NonZeroUsize,
/// The vector of chunks tracked by this [Sgl] object. This is the sparsely-populated vector in the SGL algorithm.
chunks: Vec<Chunk>,
}
/// A structure representing a chunk of memory in the sparsely-populated vector of the SGL
#[derive(Debug)]
pub struct Chunk {
/// The offset of the chunk in the sparsely-populated vector.
pub offset: usize,
/// The physical address of the chunk
pub phys: usize,
/// A raw pointer to the chunk in virtual memory
pub virt: *mut u8,
/// The length of the chunk in bytes.
pub length: usize,
}
impl Sgl {
/// Constructor for the scatter/gather list.
///
/// # Arguments
///
/// '`unaligned_length`: [usize]' - The length of the SGL, not necessarily aligned to the nearest
/// page.
pub fn new(unaligned_length: usize) -> Result<Self> {
let unaligned_length = NonZeroUsize::new(unaligned_length).ok_or(Error::new(EINVAL))?;
// TODO: Both PAGE_SIZE and MAX_ALLOC_SIZE should be dynamic.
let aligned_length = unaligned_length.get().next_multiple_of(PAGE_SIZE);
const MAX_ALLOC_SIZE: usize = 1 << 22;
unsafe {
let virt = libredox::call::mmap(MmapArgs {
flags: MAP_PRIVATE,
prot: PROT_NONE,
length: aligned_length,
offset: 0,
fd: !0,
addr: core::ptr::null_mut(),
})?
.cast::<u8>();
let mut this = Self {
virt,
aligned_length,
unaligned_length,
chunks: Vec::new(),
};
// TODO: SglContext to avoid reopening these fds?
let phys_contiguous_fd = phys_contiguous_fd()?;
let virttophys_handle = VirtaddrTranslationHandle::new()?;
let mut offset = 0;
while offset < aligned_length {
let preferred_chunk_length = (aligned_length - offset)
.min(MAX_ALLOC_SIZE)
.next_power_of_two();
let chunk_length = if preferred_chunk_length > aligned_length - offset {
preferred_chunk_length / 2
} else {
preferred_chunk_length
};
libredox::call::mmap(MmapArgs {
addr: virt.add(offset).cast(),
flags: MAP_PRIVATE | (MAP_FIXED.bits() as u32),
prot: PROT_READ | PROT_WRITE,
length: chunk_length,
fd: phys_contiguous_fd.raw(),
offset: 0,
})?;
let phys = virttophys_handle.translate(virt as usize + offset)?;
this.chunks.push(Chunk {
offset,
phys,
length: (unaligned_length.get() - offset).min(chunk_length),
virt: virt.add(offset),
});
offset += chunk_length;
}
Ok(this)
}
}
/// Returns an immutable reference to the vector of chunks
pub fn chunks(&self) -> &[Chunk] {
&self.chunks
}
/// Returns a raw pointer to the vector of chunks in virtual memory
pub fn as_ptr(&self) -> *mut u8 {
self.virt
}
/// Returns the length of the scatter-gather list.
pub fn len(&self) -> usize {
self.unaligned_length.get()
}
}
impl Drop for Sgl {
fn drop(&mut self) {
unsafe {
let _ = libredox::call::munmap(self.virt.cast(), self.aligned_length);
}
}
}
@@ -0,0 +1,56 @@
use std::time::{Duration, Instant};
/// Represents an amount of time for a driver to give up to the OS scheduler.
pub struct Timeout {
instant: Instant,
duration: Duration,
}
impl Timeout {
/// Create a new `Timeout` from a `Duration`.
#[inline]
pub fn new(duration: Duration) -> Self {
Self {
instant: Instant::now(),
duration,
}
}
/// Create a new `Timeout` by specifying the amount of microseconds.
#[inline]
pub fn from_micros(micros: u64) -> Self {
Self::new(Duration::from_micros(micros))
}
/// Create a new `Timeout` by specifying the amount of milliseconds.
#[inline]
pub fn from_millis(millis: u64) -> Self {
Self::new(Duration::from_millis(millis))
}
/// Create a new `Timeout` by specifying the amount of seconds.
#[inline]
pub fn from_secs(secs: u64) -> Self {
Self::new(Duration::from_secs(secs))
}
/// Execute the `Timeout`.
///
/// # Errors
///
/// Returns an `Err` if the duration of the `Timeout` has already elapsed
/// between creating the `Timeout` and calling this function.
#[inline]
pub fn run(&self) -> Result<(), ()> {
if self.instant.elapsed() < self.duration {
// Sleeps in Redox are only evaluated on PIT ticks (a few ms), which is not
// short enough for a reasonably responsive timeout. However, the clock is
// highly accurate. So, we yield instead of sleep to reduce latency.
//TODO: allow timeout that spins instead of yields?
std::thread::yield_now();
Ok(())
} else {
Err(())
}
}
}