feat: IOMMU-aware DmaAllocator + comprehensive DMA/thread audit

dma.rs: IommuDmaAllocator (145 lines)
- New struct wires existing IOMMU daemon (1003 lines) to existing DmaBuffer (261)
- allocate(): phys-contiguous alloc via scheme:memory, then MAP through IOMMU domain
- unmap(): sends UNMAP to IOMMU domain, releases IOVA
- Inlined IOMMU protocol constants — no new crate dependency
- encode_iommu_request/decode_iommu_response for scheme write/read cycle

Documentation updates:
- IMPLEMENTATION-MASTER-PLAN.md: K2 DMA/IOMMU section expanded from 3-line gap
  list to full audit with component inventory, gap analysis, implementation plan
  (D2.1-D2.5), Linux reference table. Added K2b thread/fork audit.
- CPU-DMA-IRQ-MSI-SCHEDULER-FIX-PLAN.md: Phase 1 (MSI) marked complete with
  per-task status. Phase 2 (DMA) re-scoped from 'create' to 'wire' based on
  audit. Phase 3 (scheduler) marked mostly done.
- IRQ-AND-LOWLEVEL-CONTROLLERS-ENHANCEMENT-PLAN.md: kernel MSI support noted
  as materially strong with P8-msi.patch reference.

Audit findings:
- IOMMU daemon is solid: 1003-line lib.rs with full scheme protocol,
  427-line amd_vi.rs, host-runnable tests. Needs wiring, not rewriting.
- DmaBuffer exists but is IOMMU-unaware — IommuDmaAllocator bridges this.
- relibc rlct_clone is correct for threads (shares addr space implicitly).
  '3 IPC hops' claim is microkernel-architectural, not a real perf issue.
- No stale docs to archive at this time.
This commit is contained in:
2026-05-04 18:18:04 +01:00
parent 678980521c
commit 029472d5e3
4 changed files with 285 additions and 81 deletions
@@ -8,6 +8,55 @@ use syscall as redox_syscall;
use crate::{DriverError, Result};
// IOMMU protocol constants (mirrored from iommu daemon's scheme protocol)
const IOMMU_REQ_SIZE: usize = 32;
const IOMMU_RSP_SIZE: usize = 36;
const IOMMU_VERSION: u16 = 1;
const IOMMU_OP_MAP: u16 = 0x0010;
const IOMMU_OP_UNMAP: u16 = 0x0011;
fn encode_iommu_request(opcode: u16, arg0: u32, arg1: u64, arg2: u64, arg3: u64) -> [u8; IOMMU_REQ_SIZE] {
let mut bytes = [0u8; IOMMU_REQ_SIZE];
bytes[0..2].copy_from_slice(&opcode.to_le_bytes());
bytes[2..4].copy_from_slice(&IOMMU_VERSION.to_le_bytes());
bytes[4..8].copy_from_slice(&arg0.to_le_bytes());
bytes[8..16].copy_from_slice(&arg1.to_le_bytes());
bytes[16..24].copy_from_slice(&arg2.to_le_bytes());
bytes[24..32].copy_from_slice(&arg3.to_le_bytes());
bytes
}
#[derive(Debug)]
struct IommuResponse {
status: i32,
arg1: u64,
}
fn decode_iommu_response(bytes: &[u8]) -> Option<IommuResponse> {
if bytes.len() < IOMMU_RSP_SIZE { return None; }
let status = i32::from_le_bytes(bytes[0..4].try_into().ok()?);
let arg1 = u64::from_le_bytes(bytes[12..20].try_into().ok()?);
Some(IommuResponse { status, arg1 })
}
fn write_iommu_request(fd: i32, opcode: u16, arg0: u32, arg1: u64, arg2: u64, arg3: u64) -> Result<IommuResponse> {
let req_bytes = encode_iommu_request(opcode, arg0, arg1, arg2, arg3);
let written = libredox::call::write(fd as usize, &req_bytes)
.map_err(|e| DriverError::Io(std::io::Error::from_raw_os_error(e.errno())))?;
if written < IOMMU_REQ_SIZE {
return Err(DriverError::Other(format!("IOMMU short write: {} < {}", written, IOMMU_REQ_SIZE)));
}
let mut rsp_bytes = [0u8; IOMMU_RSP_SIZE];
// Read response from the IOMMU scheme handle
let nread = libredox::call::read(fd as usize, &mut rsp_bytes)
.map_err(|e| DriverError::Io(std::io::Error::from_raw_os_error(e.errno())))?;
if nread == 0 {
return Err(DriverError::Other("IOMMU empty response".into()));
}
decode_iommu_response(&rsp_bytes[..nread])
.ok_or_else(|| DriverError::Other("IOMMU malformed response".into()))
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum DmaMemoryType {
Writeback,
@@ -259,3 +308,99 @@ impl Drop for DmaBuffer {
unsafe impl Send for DmaBuffer {}
unsafe impl Sync for DmaBuffer {}
/// IOMMU-backed DMA allocator.
///
/// Provides DMA buffers that are mapped through an IOMMU domain, giving each
/// device an isolated IOVA (I/O Virtual Address) space. The underlying
/// physical pages are allocated via scheme:memory, and the IOMMU domain
/// translates device-visible IOVAs to real physical addresses.
pub struct IommuDmaAllocator {
domain_fd: i32,
alloc_count: usize,
}
impl IommuDmaAllocator {
/// Create a new IOMMU-backed DMA allocator.
///
/// `domain_fd` must be a file descriptor to `scheme:iommu/domain/N` obtained
/// via `libredox::call::open("iommu:domain/N", ...)`.
pub fn new(domain_fd: i32) -> Self {
Self {
domain_fd,
alloc_count: 0,
}
}
/// Allocate a DMA buffer and map it into the IOMMU domain.
///
/// Returns both the `DmaBuffer` (holding the virt/phys addresses) and the
/// `iova` (I/O Virtual Address) that the device should use for DMA.
pub fn allocate(&mut self, size: usize, align: usize) -> Result<(DmaBuffer, u64)> {
let buffer = DmaBuffer::allocate(size, align)?;
let phys = buffer.physical_address();
let iova = self.map_to_iommu(phys as u64, buffer.len() as u64)?;
self.alloc_count += 1;
log::debug!(
"IommuDmaAllocator: alloc #{}: phys={:#x} iova={:#x} size={}",
self.alloc_count,
phys,
iova,
size
);
Ok((buffer, iova))
}
/// Map a physical address range into the IOMMU domain and return the IOVA.
fn map_to_iommu(&self, phys: u64, size: u64) -> Result<u64> {
let response = write_iommu_request(
self.domain_fd,
IOMMU_OP_MAP,
0x3, // readable + writable
phys,
size,
0, // auto-allocate IOVA
)?;
if response.status != 0 {
return Err(DriverError::Other(format!(
"IOMMU MAP failed: phys={:#x} size={} status={}",
phys, size, response.status
)));
}
Ok(response.arg1)
}
/// Unmap an IOVA range from the IOMMU domain.
pub fn unmap(&self, iova: u64) {
if let Err(e) = write_iommu_request(
self.domain_fd,
IOMMU_OP_UNMAP,
0,
iova,
0,
0,
) {
log::warn!("IommuDmaAllocator: UNMAP iova={:#x} failed: {}", iova, e);
}
}
/// Number of active allocations through this allocator.
pub fn alloc_count(&self) -> usize {
self.alloc_count
}
}
impl Drop for IommuDmaAllocator {
fn drop(&mut self) {
if self.alloc_count > 0 {
log::info!(
"IommuDmaAllocator: dropping with {} active allocations (IOMMU domain will clean up)",
self.alloc_count
);
}
let _ = libredox::call::close(self.domain_fd as usize);
}
}