Advance firmware and IOMMU support

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-openagent)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
This commit is contained in:
2026-04-15 12:57:45 +01:00
parent 01ce7a649b
commit 9dd372ad14
7 changed files with 518 additions and 134 deletions
@@ -35,6 +35,14 @@ pub struct FirmwareRegistry {
}
impl FirmwareRegistry {
pub fn empty(base_dir: &Path) -> Self {
FirmwareRegistry {
base_dir: base_dir.to_path_buf(),
blobs: HashMap::new(),
cache: Arc::new(Mutex::new(HashMap::new())),
}
}
pub fn new(base_dir: &Path) -> Result<Self, BlobError> {
if !base_dir.exists() {
return Err(BlobError::DirNotFound(base_dir.to_path_buf()));
@@ -116,10 +116,20 @@ fn main() {
firmware_dir.display()
);
let registry = FirmwareRegistry::new(&firmware_dir).unwrap_or_else(|e| {
error!("firmware-loader: fatal error: failed to initialize firmware registry: {e}");
process::exit(1);
});
let registry = match FirmwareRegistry::new(&firmware_dir) {
Ok(registry) => registry,
Err(blob::BlobError::DirNotFound(_)) => {
error!(
"firmware-loader: firmware directory not found, starting with an empty registry: {}",
firmware_dir.display()
);
FirmwareRegistry::empty(&firmware_dir)
}
Err(e) => {
error!("firmware-loader: fatal error: failed to initialize firmware registry: {e}");
process::exit(1);
}
};
info!(
"firmware-loader: indexed {} firmware blob(s) from {}",
@@ -229,6 +229,7 @@ pub fn parse_ivrs(bytes: &[u8]) -> Result<IvrsInfo, IvrsError> {
let mut units = Vec::new();
let mut offset = IVRS_HEADER_BYTES;
let mut skipped_qemu_padding = false;
while offset < table.len() {
if offset + 4 > table.len() {
return Err(IvrsError::TruncatedEntry { offset });
@@ -239,6 +240,16 @@ pub fn parse_ivrs(bytes: &[u8]) -> Result<IvrsInfo, IvrsError> {
read_u16(table, offset + 2).ok_or(IvrsError::TruncatedEntry { offset })? as usize;
if entry_length < 4 {
if !skipped_qemu_padding
&& offset == IVRS_HEADER_BYTES
&& table.get(offset..offset + 8) == Some(&[0; 8])
{
// QEMU's AMD IOMMU model can place an extra 8-byte reserved region between the
// IVInfo field and the first IVHD entry. Skip it once and continue parsing.
skipped_qemu_padding = true;
offset += 8;
continue;
}
return Err(IvrsError::InvalidEntryLength {
offset,
length: entry_length,
@@ -453,6 +464,31 @@ mod tests {
bytes
}
fn build_ivrs_with_qemu_padding(units: &[Vec<u8>]) -> Vec<u8> {
let padding = 8;
let length =
(IVRS_HEADER_BYTES + padding + units.iter().map(Vec::len).sum::<usize>()) as u32;
let mut bytes = vec![0u8; length as usize];
bytes[0..4].copy_from_slice(b"IVRS");
bytes[4..8].copy_from_slice(&length.to_le_bytes());
bytes[8] = 3;
bytes[10..16].copy_from_slice(b"RDBEAR");
bytes[16..24].copy_from_slice(b"AMDVI ");
bytes[36..40].copy_from_slice(&0x0123_4567u32.to_le_bytes());
let mut offset = IVRS_HEADER_BYTES + padding;
for unit in units {
bytes[offset..offset + unit.len()].copy_from_slice(unit);
offset += unit.len();
}
let checksum =
(!bytes.iter().fold(0u8, |sum, byte| sum.wrapping_add(*byte))).wrapping_add(1);
bytes[9] = checksum;
bytes
}
fn build_ivhd(mmio_base: u64, iommu_bdf: Bdf, entries: &[u8]) -> Vec<u8> {
let length = (0x18 + entries.len()) as u16;
let mut bytes = vec![0u8; length as usize];
@@ -504,6 +540,20 @@ mod tests {
assert_eq!(unit.msi_number(), 2);
}
#[test]
fn parses_ivrs_with_qemu_style_reserved_padding() {
let unit_entries = [0x00, 0x00, 0x00, 0x00];
let table = build_ivrs_with_qemu_padding(&[build_ivhd(
0xfee0_0000,
Bdf::new(0, 0x18, 2),
&unit_entries,
)]);
let parsed = parse_ivrs(&table).unwrap_or_else(|err| panic!("IVRS parse failed: {err}"));
assert_eq!(parsed.units.len(), 1);
assert_eq!(parsed.units[0].mmio_base, 0xfee0_0000);
}
#[test]
fn all_entry_covers_entire_bus_space() {
let unit = IommuUnitInfo {
+100 -101
View File
@@ -1,13 +1,11 @@
use core::ptr::{read_volatile, write_volatile};
use log::{debug, warn};
use log::{debug, info};
use redox_driver_sys::memory::{CacheType, MmioProt, MmioRegion};
use crate::acpi::{parse_ivrs, Bdf, IommuUnitInfo, IvrsError};
use crate::command_buffer::{CommandBuffer, CommandEntry, EventLog, EventLogEntry};
use crate::device_table::{DeviceTable, DeviceTableEntry, DEVICE_TABLE_ENTRIES};
use crate::device_table::{DeviceTable, DeviceTableEntry};
use crate::interrupt::InterruptRemapTable;
use crate::mmio::{control, ext_feature, status, AmdViMmio, AMD_VI_MMIO_BYTES};
use crate::mmio::{control, ext_feature, offsets, status, AMD_VI_MMIO_BYTES};
use crate::page_table::DomainPageTables;
const CMD_BUF_LEN_ENCODING: u64 = 0x09;
@@ -20,7 +18,6 @@ const COMPLETION_TOKEN: u32 = 0xA11D_F00D;
struct MmioMapping {
region: MmioRegion,
base: *mut AmdViMmio,
}
pub struct AmdViUnit {
@@ -30,7 +27,6 @@ pub struct AmdViUnit {
command_buffer: Option<CommandBuffer>,
event_log: Option<EventLog>,
interrupt_table: Option<InterruptRemapTable>,
completion_store: Option<redox_driver_sys::dma::DmaBuffer>,
command_tail: usize,
event_head: usize,
initialized: bool,
@@ -59,7 +55,6 @@ impl AmdViUnit {
command_buffer: None,
event_log: None,
interrupt_table: None,
completion_store: None,
command_tail: 0,
event_head: 0,
initialized: false,
@@ -86,7 +81,7 @@ impl AmdViUnit {
let region = MmioRegion::map(
self.info.mmio_base,
AMD_VI_MMIO_BYTES,
CacheType::DeviceMemory,
CacheType::Uncacheable,
MmioProt::READ_WRITE,
)
.map_err(|err| {
@@ -95,8 +90,16 @@ impl AmdViUnit {
self.info.mmio_base
)
})?;
let base = region.as_ptr() as *mut AmdViMmio;
self.mmio = Some(MmioMapping { region, base });
self.mmio = Some(MmioMapping { region });
let control_initial = self.mmio_read32(offsets::CONTROL)?;
let status_initial = self.mmio_read32(offsets::STATUS)?;
info!(
"amd-vi: unit {} initial control={:#x} status={:#x}",
self.info.unit_id(),
control_initial,
status_initial
);
self.disable_unit()?;
@@ -123,15 +126,15 @@ impl AmdViUnit {
if ext & ext_feature::NX_SUP != 0 {
control_value |= control::NX_EN;
}
unsafe {
AmdViMmio::write_control(self.mmio_base()?, control_value);
}
let control_before = self.mmio_read32(offsets::CONTROL)?;
info!(
"amd-vi: unit {} control register before enable write = {:#x}",
self.info.unit_id(),
control_before
);
self.mmio_write32(offsets::CONTROL, control_value)?;
self.flush_configuration()?;
unsafe {
AmdViMmio::write_control(self.mmio_base()?, control_value | control::IOMMU_ENABLE);
}
self.mmio_write32(offsets::CONTROL, control_value | control::IOMMU_ENABLE)?;
self.wait_for_running(true)?;
self.initialized = true;
Ok(())
@@ -183,12 +186,11 @@ impl AmdViUnit {
return Ok(drained);
}
let base = self.mmio_base()?;
let event_log = self
.event_log
.as_ref()
.ok_or_else(|| "event log not initialized".to_string())?;
let tail = unsafe { AmdViMmio::read_evt_log_tail(base) as usize % event_log.capacity() };
let tail = (self.mmio_read64(offsets::EVT_LOG_TAIL)? as usize) % event_log.capacity();
while self.event_head != tail {
let event = event_log.read_entry(self.event_head);
@@ -196,9 +198,7 @@ impl AmdViUnit {
self.event_head = (self.event_head + 1) % event_log.capacity();
}
unsafe {
AmdViMmio::write_evt_log_head(base, self.event_head as u64);
}
self.mmio_write64(offsets::EVT_LOG_HEAD, self.event_head as u64)?;
Ok(drained)
}
@@ -213,17 +213,13 @@ impl AmdViUnit {
}
fn disable_unit(&mut self) -> Result<(), String> {
let base = self.mmio_base()?;
unsafe {
AmdViMmio::write_control(base, 0);
}
self.mmio_write32(offsets::CONTROL, 0)?;
self.wait_for_running(false)
}
fn wait_for_running(&self, expected: bool) -> Result<(), String> {
let base = self.mmio_base()?;
for _ in 0..100_000 {
let running = unsafe { AmdViMmio::read_status(base) } & status::IOMMU_RUNNING != 0;
let running = self.mmio_read32(offsets::STATUS)? & status::IOMMU_RUNNING != 0;
if running == expected {
return Ok(());
}
@@ -242,122 +238,125 @@ impl AmdViUnit {
command_buffer: &CommandBuffer,
event_log: &EventLog,
) -> Result<(), String> {
let base = self.mmio_base()?;
unsafe {
AmdViMmio::write_dev_table_bar(
base,
(device_table.physical_address() as u64 & !0xFFF) | DEV_TABLE_SIZE_ENCODING,
);
AmdViMmio::write_cmd_buf_bar(
base,
(command_buffer.physical_address() as u64 & !0xFFF) | CMD_BUF_LEN_ENCODING,
);
AmdViMmio::write_evt_log_bar(
base,
(event_log.physical_address() as u64 & !0xFFF) | EVT_LOG_LEN_ENCODING,
);
AmdViMmio::write_exclusion_base(base, 0);
AmdViMmio::write_exclusion_limit(base, 0);
}
self.mmio_write64(
offsets::DEV_TABLE_BAR,
(device_table.physical_address() as u64 & !0xFFF) | DEV_TABLE_SIZE_ENCODING,
)?;
self.mmio_write64(
offsets::CMD_BUF_BAR,
(command_buffer.physical_address() as u64 & !0xFFF) | CMD_BUF_LEN_ENCODING,
)?;
self.mmio_write64(
offsets::EVT_LOG_BAR,
(event_log.physical_address() as u64 & !0xFFF) | EVT_LOG_LEN_ENCODING,
)?;
self.mmio_write64(offsets::EXCLUSION_BASE, 0)?;
self.mmio_write64(offsets::EXCLUSION_LIMIT, 0)?;
Ok(())
}
fn reset_ring_pointers(&mut self) -> Result<(), String> {
let base = self.mmio_base()?;
unsafe {
AmdViMmio::write_cmd_buf_head(base, 0);
AmdViMmio::write_cmd_buf_tail(base, 0);
AmdViMmio::write_evt_log_head(base, 0);
}
self.command_tail = 0;
self.mmio_write64(
offsets::CMD_BUF_HEAD,
CommandBuffer::FIRST_COMMAND_INDEX as u64,
)?;
self.mmio_write64(
offsets::CMD_BUF_TAIL,
CommandBuffer::FIRST_COMMAND_INDEX as u64,
)?;
self.mmio_write64(offsets::EVT_LOG_HEAD, 0)?;
self.command_tail = CommandBuffer::FIRST_COMMAND_INDEX;
self.event_head = 0;
Ok(())
}
fn flush_configuration(&mut self) -> Result<(), String> {
let ext = self.mmio_read_extended_feature()?;
if ext & ext_feature::IA_SUP != 0 {
self.submit_command(CommandEntry::invalidate_all())?;
} else if let Some(table) = self.device_table.as_ref() {
let mut pending_invalidations = Vec::new();
for device_id in 0..DEVICE_TABLE_ENTRIES {
let entry = table.get_entry(device_id as u16);
if entry.valid() {
pending_invalidations.push(device_id as u16);
}
}
for device_id in pending_invalidations {
self.submit_command(CommandEntry::invalidate_devtab_entry(device_id))?;
}
} else {
warn!("amd-vi: device table not yet allocated while flushing configuration");
}
self.wait_for_completion()
}
fn submit_command(&mut self, command: CommandEntry) -> Result<(), String> {
let base = self.mmio_base()?;
let head_raw = self.mmio_read64(offsets::CMD_BUF_HEAD)? as usize;
let command_buffer = self
.command_buffer
.as_mut()
.ok_or_else(|| "command buffer not initialized".to_string())?;
let head =
unsafe { AmdViMmio::read_cmd_buf_head(base) as usize % command_buffer.capacity() };
let next_tail = (self.command_tail + 1) % command_buffer.capacity();
let head = head_raw % command_buffer.capacity();
let next_tail = if self.command_tail + 1 >= command_buffer.capacity() {
CommandBuffer::FIRST_COMMAND_INDEX
} else {
self.command_tail + 1
};
if next_tail == head {
return Err("AMD-Vi command buffer is full".to_string());
}
command_buffer.write_command(self.command_tail, &command);
self.command_tail = next_tail;
unsafe {
AmdViMmio::write_cmd_buf_tail(base, self.command_tail as u64);
}
self.mmio_write64(offsets::CMD_BUF_TAIL, self.command_tail as u64)?;
Ok(())
}
fn wait_for_completion(&mut self) -> Result<(), String> {
let completion_store = match self.completion_store.take() {
Some(buffer) => buffer,
None => redox_driver_sys::dma::DmaBuffer::allocate(8, 8)
.map_err(|err| format!("failed to allocate completion wait store: {err}"))?,
let completion_dma = {
let command_buffer = self
.command_buffer
.as_mut()
.ok_or_else(|| "command buffer not initialized".to_string())?;
info!(
"amd-vi: unit {} completion store cpu={:#x} dma={:#x} (command-slot-0)",
self.info.unit_id(),
command_buffer.completion_store_cpu_ptr() as usize,
command_buffer.completion_store_dma_addr(),
);
command_buffer.clear_completion_store();
command_buffer.completion_store_dma_addr()
};
let completion_ptr = completion_store.as_ptr() as *const u32;
let completion_mut = completion_store.as_ptr() as *mut u32;
unsafe {
write_volatile(completion_mut, 0);
}
let completion_phys = completion_store.physical_address() as u64;
self.submit_command(CommandEntry::completion_wait(
completion_phys,
completion_dma,
COMPLETION_TOKEN,
))?;
for _ in 0..100_000 {
if unsafe { read_volatile(completion_ptr) } == COMPLETION_TOKEN {
self.completion_store = Some(completion_store);
if self
.command_buffer
.as_ref()
.ok_or_else(|| "command buffer not initialized".to_string())?
.read_completion_store()
== COMPLETION_TOKEN
{
return Ok(());
}
std::hint::spin_loop();
}
self.completion_store = Some(completion_store);
Err("timed out waiting for AMD-Vi command completion".to_string())
}
fn mmio_read_extended_feature(&self) -> Result<u64, String> {
let base = self.mmio_base()?;
Ok(unsafe { AmdViMmio::read_extended_feature(base) })
self.mmio_read64(offsets::EXTENDED_FEATURE)
}
fn mmio_base(&self) -> Result<*mut AmdViMmio, String> {
fn mmio_region(&self) -> Result<&MmioRegion, String> {
self.mmio
.as_ref()
.map(|mapping| mapping.base)
.map(|mapping| &mapping.region)
.ok_or_else(|| "AMD-Vi MMIO is not mapped".to_string())
}
fn mmio_read32(&self, offset: usize) -> Result<u32, String> {
Ok(self.mmio_region()?.read32(offset))
}
fn mmio_write32(&self, offset: usize, value: u32) -> Result<(), String> {
self.mmio_region()?.write32(offset, value);
Ok(())
}
fn mmio_read64(&self, offset: usize) -> Result<u64, String> {
Ok(self.mmio_region()?.read64(offset))
}
fn mmio_write64(&self, offset: usize, value: u64) -> Result<(), String> {
self.mmio_region()?.write64(offset, value);
Ok(())
}
}
impl Drop for AmdViUnit {
@@ -143,9 +143,12 @@ pub struct CommandBuffer {
}
impl CommandBuffer {
pub const RESERVED_COMPLETION_INDEX: usize = 0;
pub const FIRST_COMMAND_INDEX: usize = 1;
pub fn new(entry_count: usize) -> Result<Self, &'static str> {
if entry_count == 0 {
return Err("IOMMU command buffer entry count must be non-zero");
if entry_count <= Self::FIRST_COMMAND_INDEX {
return Err("IOMMU command buffer entry count must leave room for command entries");
}
let byte_len = entry_count
@@ -187,6 +190,22 @@ impl CommandBuffer {
self.capacity
}
pub fn completion_store_dma_addr(&self) -> u64 {
self.buffer.physical_address() as u64
}
pub fn clear_completion_store(&mut self) {
self.commands_mut()[0] = CommandEntry::default();
}
pub fn read_completion_store(&self) -> u32 {
unsafe { core::ptr::read_volatile(self.buffer.as_ptr() as *const u32) }
}
pub fn completion_store_cpu_ptr(&self) -> *mut u32 {
self.buffer.as_ptr() as *mut u32
}
fn commands_mut(&mut self) -> &mut [CommandEntry] {
unsafe {
slice::from_raw_parts_mut(self.buffer.as_mut_ptr() as *mut CommandEntry, self.capacity)
@@ -282,6 +301,31 @@ impl EventLog {
self.capacity
}
pub fn completion_store_dma_addr(&self) -> u64 {
let offset = (self.capacity - 1) * EVENT_LOG_ENTRY_SIZE;
(self.buffer.physical_address() + offset) as u64
}
pub fn completion_store_cpu_ptr(&self) -> *mut u32 {
let offset = (self.capacity - 1) * EVENT_LOG_ENTRY_SIZE;
unsafe { self.buffer.as_ptr().add(offset) as *mut u32 }
}
pub fn clear_completion_store(&mut self) {
let offset = (self.capacity - 1) * EVENT_LOG_ENTRY_SIZE;
unsafe {
core::ptr::write_bytes(
self.buffer.as_mut_ptr().add(offset),
0,
EVENT_LOG_ENTRY_SIZE,
)
};
}
pub fn read_completion_store(&self) -> u32 {
unsafe { core::ptr::read_volatile(self.completion_store_cpu_ptr() as *const u32) }
}
fn entries(&self) -> &[EventLogEntry] {
unsafe {
slice::from_raw_parts(self.buffer.as_ptr() as *const EventLogEntry, self.capacity)
@@ -24,6 +24,7 @@ pub mod opcode {
pub const QUERY: u16 = 0x0000;
pub const CREATE_DOMAIN: u16 = 0x0001;
pub const DESTROY_DOMAIN: u16 = 0x0002;
pub const INIT_UNITS: u16 = 0x0003;
pub const MAP: u16 = 0x0010;
pub const UNMAP: u16 = 0x0011;
pub const ASSIGN_DEVICE: u16 = 0x0020;
@@ -213,6 +214,26 @@ impl IommuScheme {
(1..u16::MAX).find(|domain_id| !self.domains.contains_key(domain_id))
}
fn ensure_unit_initialized(&mut self, unit_index: usize) -> core::result::Result<(), i32> {
let Some(unit) = self.units.get_mut(unit_index) else {
return Err(ENODEV as i32);
};
if unit.initialized() {
return Ok(());
}
unit.init().map_err(|err| {
log::error!(
"iommu: failed to initialize unit {} at MMIO {:#x}: {}",
unit_index,
unit.info().mmio_base,
err
);
EIO as i32
})
}
fn root_listing(&self) -> Vec<u8> {
let mut listing = String::from("control\n");
for (index, unit) in self.units.iter().enumerate() {
@@ -310,6 +331,49 @@ impl IommuScheme {
self.device_assignments.len() as u64,
self.units.iter().filter(|unit| unit.initialized()).count() as u64,
),
opcode::INIT_UNITS => {
let requested_index = if request.arg0 == u32::MAX {
None
} else {
Some(request.arg0 as usize)
};
let mut initialized_now = 0u32;
let mut attempted = 0u64;
for index in 0..self.units.len() {
if requested_index.is_some() && requested_index != Some(index) {
continue;
}
attempted += 1;
let was_initialized = self
.units
.get(index)
.map(|unit| unit.initialized())
.unwrap_or(false);
if let Err(errno) = self.ensure_unit_initialized(index) {
return IommuResponse::error(request.opcode, errno);
}
if !was_initialized {
initialized_now = initialized_now.saturating_add(1);
}
}
let initialized_total =
self.units.iter().filter(|unit| unit.initialized()).count() as u64;
IommuResponse::success(
request.opcode,
initialized_now,
attempted,
initialized_total,
requested_index
.map(|index| index as u64)
.unwrap_or(u64::MAX),
)
}
opcode::CREATE_DOMAIN => {
let domain_id = if request.arg0 == 0 {
match self.next_domain_id() {
@@ -364,6 +428,9 @@ impl IommuScheme {
if requested_index.is_some() && requested_index != Some(index) {
continue;
}
if !unit.initialized() {
continue;
}
match unit.drain_events() {
Ok(events) => {
if let Some(event) = events.first() {
@@ -485,9 +552,14 @@ impl IommuScheme {
Err(errno) => return IommuResponse::error(request.opcode, errno),
};
if let Err(errno) = self.ensure_unit_initialized(unit_index) {
return IommuResponse::error(request.opcode, errno);
}
let Some(domain) = self.domains.get(&domain_id) else {
return IommuResponse::error(request.opcode, ENOENT as i32);
};
let Some(unit) = self.units.get_mut(unit_index) else {
return IommuResponse::error(request.opcode, ENODEV as i32);
};
@@ -824,6 +896,26 @@ mod tests {
assert_eq!(query_response.arg1, 1);
}
#[test]
fn init_units_on_empty_scheme_is_a_noop_success() {
let mut scheme = IommuScheme::new();
let control = scheme
.open("control", 0, 0, 0)
.unwrap_or_else(|err| panic!("open control failed: {err}"))
.unwrap_or_else(|| panic!("control open returned no handle"));
let request = IommuRequest::new(opcode::INIT_UNITS, u32::MAX, 0, 0, 0);
scheme
.write(control, &request.to_bytes())
.unwrap_or_else(|err| panic!("init units write failed: {err}"));
let response = read_response(&mut scheme, control);
assert_eq!(response.status, 0);
assert_eq!(response.arg0, 0);
assert_eq!(response.arg1, 0);
assert_eq!(response.arg2, 0);
}
#[test]
fn domain_handle_can_map_pages() {
let mut scheme = IommuScheme::new();
+208 -27
View File
@@ -2,6 +2,7 @@
use std::env;
use std::fs;
use std::path::PathBuf;
use std::process;
use iommu::amd_vi::AmdViUnit;
@@ -9,7 +10,13 @@ use iommu::amd_vi::AmdViUnit;
use iommu::IommuScheme;
use log::{error, info, LevelFilter, Metadata, Record};
#[cfg(target_os = "redox")]
use redox_driver_sys::memory::{CacheType, MmioProt, MmioRegion};
#[cfg(target_os = "redox")]
use redox_scheme::{SignalBehavior, Socket};
#[cfg(target_os = "redox")]
use syscall::EBADF;
#[cfg(target_os = "redox")]
use syscall::PAGE_SIZE;
struct StderrLogger {
level: LevelFilter,
@@ -37,39 +44,123 @@ fn init_logging(level: LevelFilter) {
log::set_max_level(level);
}
fn detect_units_from_env() -> Result<Vec<AmdViUnit>, String> {
let Some(path) = env::var_os("IOMMU_IVRS_PATH") else {
fn candidate_ivrs_paths() -> Vec<PathBuf> {
vec![
PathBuf::from("/sys/firmware/acpi/tables/IVRS"),
PathBuf::from("/sys/firmware/acpi/tables/data/IVRS"),
PathBuf::from("/boot/acpi/IVRS"),
PathBuf::from("/acpi/tables/IVRS"),
]
}
fn discover_ivrs_path_from_candidates(candidates: &[PathBuf]) -> Option<PathBuf> {
if let Some(path) = env::var_os("IOMMU_IVRS_PATH") {
return Some(PathBuf::from(path));
}
candidates.iter().find(|path| path.exists()).cloned()
}
fn discover_ivrs_path() -> Option<PathBuf> {
discover_ivrs_path_from_candidates(&candidate_ivrs_paths())
}
fn detect_units() -> Result<Vec<AmdViUnit>, String> {
let Some(path) = discover_ivrs_path() else {
return Ok(Vec::new());
};
let bytes = fs::read(&path).map_err(|err| {
format!(
"failed to read IVRS table from {}: {err}",
path.to_string_lossy()
)
})?;
let bytes = fs::read(&path)
.map_err(|err| format!("failed to read IVRS table from {}: {err}", path.display()))?;
let units = AmdViUnit::detect(&bytes).map_err(|err| format!("failed to parse IVRS: {err}"))?;
Ok(units)
}
#[cfg(target_os = "redox")]
fn run() -> Result<(), String> {
let mut units = detect_units_from_env()?;
info!("iommu: detected {} AMD-Vi unit(s)", units.len());
for (index, unit) in units.iter_mut().enumerate() {
match unit.init() {
Ok(()) => info!(
"iommu: initialized unit {} at MMIO {:#x}",
index,
unit.info().mmio_base
),
Err(err) => error!(
"iommu: failed to initialize unit {} at MMIO {:#x}: {}",
index,
unit.info().mmio_base,
err
),
const ACPI_HEADER_LEN: usize = 36;
#[cfg(target_os = "redox")]
fn read_sdt_from_physical(phys_addr: u64) -> Result<Vec<u8>, String> {
let page_base = phys_addr / PAGE_SIZE as u64 * PAGE_SIZE as u64;
let page_offset = (phys_addr - page_base) as usize;
let header_map = MmioRegion::map(page_base, PAGE_SIZE, CacheType::WriteBack, MmioProt::READ)
.map_err(|err| format!("failed to map ACPI header page at {page_base:#x}: {err}"))?;
let mut header = vec![0_u8; ACPI_HEADER_LEN];
for (i, byte) in header.iter_mut().enumerate() {
*byte = header_map.read8(page_offset + i);
}
let length = u32::from_le_bytes([header[4], header[5], header[6], header[7]]) as usize;
if length < ACPI_HEADER_LEN {
return Err(format!(
"invalid ACPI SDT length {length} at {phys_addr:#x}"
));
}
let map_len = (page_offset + length).next_multiple_of(PAGE_SIZE);
let full_map = MmioRegion::map(page_base, map_len, CacheType::WriteBack, MmioProt::READ)
.map_err(|err| format!("failed to map ACPI table at {page_base:#x}: {err}"))?;
let mut bytes = vec![0_u8; length];
for (i, byte) in bytes.iter_mut().enumerate() {
*byte = full_map.read8(page_offset + i);
}
Ok(bytes)
}
#[cfg(target_os = "redox")]
fn detect_units_from_kernel_acpi() -> Result<Vec<AmdViUnit>, String> {
let rxsdt = match fs::read("/scheme/kernel.acpi/rxsdt") {
Ok(bytes) => bytes,
Err(err) => {
return Err(format!("failed to read /scheme/kernel.acpi/rxsdt: {err}"));
}
};
if rxsdt.len() < ACPI_HEADER_LEN {
return Ok(Vec::new());
}
let signature = &rxsdt[0..4];
let entry_size = match signature {
b"RSDT" => 4,
b"XSDT" => 8,
_ => return Ok(Vec::new()),
};
let mut offset = ACPI_HEADER_LEN;
while offset + entry_size <= rxsdt.len() {
let phys_addr = if entry_size == 4 {
u32::from_le_bytes(rxsdt[offset..offset + 4].try_into().unwrap()) as u64
} else {
u64::from_le_bytes(rxsdt[offset..offset + 8].try_into().unwrap())
};
let table = read_sdt_from_physical(phys_addr)?;
if table.len() >= 4 && &table[0..4] == b"IVRS" {
return AmdViUnit::detect(&table).map_err(|err| format!("failed to parse IVRS: {err}"));
}
offset += entry_size;
}
Ok(Vec::new())
}
#[cfg(target_os = "redox")]
fn run() -> Result<(), String> {
let units = detect_units_from_kernel_acpi().or_else(|err| {
info!("iommu: kernel ACPI discovery unavailable: {err}");
detect_units()
})?;
info!("iommu: detected {} AMD-Vi unit(s)", units.len());
for (index, unit) in units.iter().enumerate() {
info!(
"iommu: discovered unit {} at MMIO {:#x}; initialization is deferred until first use",
index,
unit.info().mmio_base
);
}
let socket =
@@ -86,6 +177,10 @@ fn run() -> Result<(), String> {
break;
}
Err(e) => {
if e.errno == EBADF {
info!("iommu: scheme fd closed, exiting");
break;
}
error!("iommu: failed to read scheme request: {e}");
continue;
}
@@ -107,16 +202,71 @@ fn run() -> Result<(), String> {
Ok(())
}
#[cfg(target_os = "redox")]
fn run_self_test() -> Result<(), String> {
let mut units = detect_units_from_kernel_acpi().or_else(|err| {
info!("iommu: kernel ACPI discovery unavailable: {err}");
detect_units()
})?;
println!("units_detected={}", units.len());
if units.is_empty() {
return Err("iommu self-test detected zero AMD-Vi unit(s)".to_string());
}
let mut initialized_now = 0u32;
let mut events_drained = 0u32;
for (index, unit) in units.iter_mut().enumerate() {
let was_initialized = unit.initialized();
unit.init().map_err(|err| {
format!(
"iommu self-test failed to initialize unit {} at MMIO {:#x}: {}",
index,
unit.info().mmio_base,
err
)
})?;
if !was_initialized {
initialized_now = initialized_now.saturating_add(1);
}
let drained = unit.drain_events().map_err(|err| {
format!(
"iommu self-test failed to drain events for unit {} at MMIO {:#x}: {}",
index,
unit.info().mmio_base,
err
)
})?;
events_drained = events_drained.saturating_add(drained.len() as u32);
}
let initialized_after = units.iter().filter(|unit| unit.initialized()).count() as u64;
println!("units_initialized_now={}", initialized_now);
println!("units_attempted={}", units.len());
println!("units_initialized_after={}", initialized_after);
println!("events_drained={}", events_drained);
Ok(())
}
#[cfg(not(target_os = "redox"))]
fn run() -> Result<(), String> {
let units = detect_units_from_env()?;
let units = detect_units()?;
info!(
"iommu: host build stub active; parsed {} AMD-Vi unit(s) from IOMMU_IVRS_PATH",
"iommu: host build stub active; parsed {} AMD-Vi unit(s) from discovered IVRS source",
units.len()
);
Ok(())
}
#[cfg(not(target_os = "redox"))]
fn run_self_test() -> Result<(), String> {
Err("iommu self-test requires target_os=redox".to_string())
}
fn main() {
let log_level = match env::var("IOMMU_LOG").as_deref() {
Ok("debug") => LevelFilter::Debug,
@@ -128,8 +278,39 @@ fn main() {
init_logging(log_level);
if let Err(e) = run() {
let result = if env::args().any(|arg| arg == "--self-test-init") {
run_self_test()
} else {
run()
};
if let Err(e) = result {
error!("iommu: fatal error: {e}");
process::exit(1);
}
}
#[cfg(test)]
mod tests {
use super::{candidate_ivrs_paths, discover_ivrs_path_from_candidates};
use std::path::PathBuf;
#[test]
fn candidate_paths_include_standard_ivrs_locations() {
let candidates = candidate_ivrs_paths();
assert!(candidates.contains(&PathBuf::from("/sys/firmware/acpi/tables/IVRS")));
assert!(candidates.contains(&PathBuf::from("/sys/firmware/acpi/tables/data/IVRS")));
assert!(candidates.contains(&PathBuf::from("/boot/acpi/IVRS")));
assert!(candidates.contains(&PathBuf::from("/acpi/tables/IVRS")));
}
#[test]
fn discovery_chooses_first_existing_candidate() {
let candidates = vec![
PathBuf::from("/definitely/missing/ivrs"),
PathBuf::from("/tmp"),
];
let discovered = discover_ivrs_path_from_candidates(&candidates);
assert_eq!(discovered, Some(PathBuf::from("/tmp")));
}
}