milestone: desktop path Phases 1-5

Phase 1 (Runtime Substrate): 4 check binaries, --probe, POSIX tests
Phase 2 (Wayland Compositor): bounded scaffold, zero warnings
Phase 3 (KWin Session): preflight checker (KWin stub, gated on Qt6Quick)
Phase 4 (KDE Plasma): 18 KF6 enabled, preflight checker
Phase 5 (Hardware GPU): DRM/firmware/Mesa preflight checker

Build: zero warnings, all scripts syntax-clean. Oracle-verified.
This commit is contained in:
2026-04-29 09:54:06 +01:00
parent b23714f542
commit 8acc73d774
508 changed files with 76526 additions and 396 deletions
+873
View File
@@ -0,0 +1,873 @@
use acpi::aml::object::{Object, WrappedObject};
use acpi::aml::op_region::{RegionHandler, RegionSpace};
use rustc_hash::FxHashMap;
use std::convert::{TryFrom, TryInto};
use std::error::Error;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::{Arc, Mutex};
use std::{fmt, mem};
use syscall::PAGE_SIZE;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use common::io::{Io, Pio};
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use thiserror::Error;
use acpi::{
aml::{namespace::AmlName, AmlError, Interpreter},
platform::AcpiPlatform,
AcpiTables,
};
use amlserde::aml_serde_name::aml_to_symbol;
use amlserde::{AmlSerde, AmlSerdeValue};
#[cfg(target_arch = "x86_64")]
pub mod dmar;
use crate::aml_physmem::{AmlPageCache, AmlPhysMemHandler};
/// The raw SDT header struct, as defined by the ACPI specification.
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct SdtHeader {
pub signature: [u8; 4],
pub length: u32,
pub revision: u8,
pub checksum: u8,
pub oem_id: [u8; 6],
pub oem_table_id: [u8; 8],
pub oem_revision: u32,
pub creator_id: u32,
pub creator_revision: u32,
}
unsafe impl plain::Plain for SdtHeader {}
impl SdtHeader {
pub fn signature(&self) -> SdtSignature {
SdtSignature {
signature: self.signature,
oem_id: self.oem_id,
oem_table_id: self.oem_table_id,
}
}
pub fn length(&self) -> usize {
self.length
.try_into()
.expect("expected usize to be at least 32 bits")
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct SdtSignature {
pub signature: [u8; 4],
pub oem_id: [u8; 6],
pub oem_table_id: [u8; 8],
}
impl fmt::Display for SdtSignature {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}-{}-{}",
String::from_utf8_lossy(&self.signature),
String::from_utf8_lossy(&self.oem_id),
String::from_utf8_lossy(&self.oem_table_id)
)
}
}
#[derive(Debug, Error)]
pub enum TablePhysLoadError {
// TODO: Make syscall::Error implement std::error::Error, when enabling a Cargo feature.
#[error("i/o error: {0}")]
Io(#[from] std::io::Error),
#[error("invalid SDT: {0}")]
Validity(#[from] InvalidSdtError),
}
#[derive(Debug, Error)]
pub enum InvalidSdtError {
#[error("invalid size")]
InvalidSize,
#[error("invalid checksum")]
BadChecksum,
}
struct PhysmapGuard {
virt: *const u8,
size: usize,
}
impl PhysmapGuard {
fn map(page: usize, page_count: usize) -> std::io::Result<Self> {
let size = page_count * PAGE_SIZE;
let virt = unsafe {
common::physmap(page, size, common::Prot::RO, common::MemoryType::default())
.map_err(|error| std::io::Error::from_raw_os_error(error.errno()))?
};
Ok(Self {
virt: virt as *const u8,
size,
})
}
}
impl Deref for PhysmapGuard {
type Target = [u8];
fn deref(&self) -> &Self::Target {
unsafe { std::slice::from_raw_parts(self.virt as *const u8, self.size) }
}
}
impl Drop for PhysmapGuard {
fn drop(&mut self) {
unsafe {
let _ = libredox::call::munmap(self.virt as *mut (), self.size);
}
}
}
#[derive(Clone)]
pub struct Sdt(Arc<[u8]>);
impl Sdt {
pub fn new(slice: Arc<[u8]>) -> Result<Self, InvalidSdtError> {
let header = match plain::from_bytes::<SdtHeader>(&slice) {
Ok(header) => header,
Err(plain::Error::TooShort) => return Err(InvalidSdtError::InvalidSize),
Err(plain::Error::BadAlignment) => panic!(
"plain::from_bytes failed due to alignment, but SdtHeader is #[repr(packed)]!"
),
};
if header.length() != slice.len() {
return Err(InvalidSdtError::InvalidSize);
}
let checksum = slice
.iter()
.copied()
.fold(0_u8, |current_sum, item| current_sum.wrapping_add(item));
if checksum != 0 {
return Err(InvalidSdtError::BadChecksum);
}
Ok(Self(slice))
}
pub fn load_from_physical(physaddr: usize) -> Result<Self, TablePhysLoadError> {
let physaddr_start_page = physaddr / PAGE_SIZE * PAGE_SIZE;
let physaddr_page_offset = physaddr % PAGE_SIZE;
// Begin by reading and validating the header first. The SDT header is always 36 bytes
// long, and can thus span either one or two page table frames.
let needs_extra_page = (PAGE_SIZE - physaddr_page_offset)
.checked_sub(mem::size_of::<SdtHeader>())
.is_none();
let page_table_count = 1 + if needs_extra_page { 1 } else { 0 };
let pages = PhysmapGuard::map(physaddr_start_page, page_table_count)?;
assert!(pages.len() >= mem::size_of::<SdtHeader>());
let sdt_mem = &pages[physaddr_page_offset..];
let sdt = plain::from_bytes::<SdtHeader>(&sdt_mem[..mem::size_of::<SdtHeader>()])
.expect("either alignment is wrong, or the length is too short, both of which are already checked for");
let total_length = sdt.length();
let base_length = std::cmp::min(total_length, sdt_mem.len());
let extended_length = total_length - base_length;
let mut loaded = sdt_mem[..base_length].to_owned();
loaded.reserve(extended_length);
const SIMULTANEOUS_PAGE_COUNT: usize = 4;
let mut left = extended_length;
let mut offset = physaddr_start_page + page_table_count * PAGE_SIZE;
let length_per_iteration = PAGE_SIZE * SIMULTANEOUS_PAGE_COUNT;
while left > 0 {
let to_copy = std::cmp::min(left, length_per_iteration);
let additional_pages = PhysmapGuard::map(offset, to_copy.div_ceil(PAGE_SIZE))?;
loaded.extend(&additional_pages[..to_copy]);
left -= to_copy;
offset += to_copy;
}
assert_eq!(left, 0);
Self::new(loaded.into()).map_err(Into::into)
}
pub fn as_slice(&self) -> &[u8] {
&self.0
}
}
impl Deref for Sdt {
type Target = SdtHeader;
fn deref(&self) -> &Self::Target {
plain::from_bytes::<SdtHeader>(&self.0)
.expect("expected already validated Sdt to be able to get its header")
}
}
impl Sdt {
pub fn data(&self) -> &[u8] {
&self.0[mem::size_of::<SdtHeader>()..]
}
}
impl fmt::Debug for Sdt {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Sdt")
.field("header", &*self as &SdtHeader)
.field("extra_len", &self.data().len())
.finish()
}
}
pub struct Dsdt(Sdt);
pub struct Ssdt(Sdt);
// Current AML implementation builds the aml_context.namespace at startup,
// but the cache for symbols is lazy-loaded when someone
// reads from the acpi:/symbols scheme.
// If you dynamically add an SDT, you can add to the namespace, but you
// must empty the cache so it is rebuilt.
// If you modify an SDT, you must discard the aml_context and rebuild it.
pub struct AmlSymbols {
aml_context: Option<Interpreter<AmlPhysMemHandler>>,
// k = name, v = description
symbol_cache: FxHashMap<String, String>,
page_cache: Arc<Mutex<AmlPageCache>>,
aml_region_handlers: Vec<(RegionSpace, Box<dyn RegionHandler>)>,
}
impl AmlSymbols {
pub fn new(aml_region_handlers: Vec<(RegionSpace, Box<dyn RegionHandler>)>) -> Self {
Self {
aml_context: None,
symbol_cache: FxHashMap::default(),
page_cache: Arc::new(Mutex::new(AmlPageCache::default())),
aml_region_handlers,
}
}
pub fn init(&mut self, pci_fd: Option<&libredox::Fd>) -> Result<(), Box<dyn Error>> {
if self.aml_context.is_some() {
return Err("AML interpreter already initialized".into());
}
let format_err = |err| format!("{:?}", err);
let handler = AmlPhysMemHandler::new(pci_fd, Arc::clone(&self.page_cache));
//TODO: use these parsed tables for the rest of acpid
let rsdp_address = usize::from_str_radix(&std::env::var("RSDP_ADDR")?, 16)?;
let tables =
unsafe { AcpiTables::from_rsdp(handler.clone(), rsdp_address).map_err(format_err)? };
let platform = AcpiPlatform::new(tables, handler).map_err(format_err)?;
let interpreter = Interpreter::new_from_platform(&platform).map_err(format_err)?;
for (region, handler) in self.aml_region_handlers.drain(..) {
interpreter.install_region_handler(region, handler);
}
self.aml_context = Some(interpreter);
Ok(())
}
pub fn aml_context_mut(
&mut self,
pci_fd: Option<&libredox::Fd>,
) -> Result<&mut Interpreter<AmlPhysMemHandler>, AmlEvalError> {
if self.aml_context.is_none() {
match self.init(pci_fd) {
Ok(()) => (),
Err(err) => {
log::error!("failed to initialize AML context: {}", err);
}
}
}
self.aml_context
.as_mut()
.ok_or(AmlEvalError::NotInitialized)
}
pub fn symbols_cache(&self) -> &FxHashMap<String, String> {
&self.symbol_cache
}
pub fn lookup(&self, symbol: &str) -> Option<String> {
if let Some(description) = self.symbol_cache.get(symbol) {
log::trace!("Found symbol in cache, {}, {}", symbol, description);
return Some(description.to_owned());
}
None
}
pub fn build_cache(&mut self, pci_fd: Option<&libredox::Fd>) {
let Ok(aml_context) = self.aml_context_mut(pci_fd) else {
return;
};
let mut symbol_list: Vec<(AmlName, String)> = Vec::with_capacity(5000);
if aml_context
.namespace
.lock()
.traverse(|level_aml_name, level| {
for (child_seg, handle) in level.values.iter() {
if let Ok(aml_name) =
AmlName::from_name_seg(child_seg.to_owned()).resolve(level_aml_name)
{
let name = aml_to_symbol(&aml_name);
symbol_list.push((aml_name, name));
} else {
log::error!(
"AmlName resolve failed, {:?}:{:?}",
level_aml_name,
child_seg
);
}
}
Ok(true)
})
.is_err()
{
log::error!("Namespace traverse failed");
return;
}
let mut symbol_cache: FxHashMap<String, String> = FxHashMap::default();
for (aml_name, name) in &symbol_list {
// create an empty entry, in case something goes wrong with serialization
symbol_cache.insert(name.to_owned(), "".to_owned());
if let Some(ser_value) = AmlSerde::from_aml(aml_context, aml_name) {
if let Ok(ser_string) = ron::ser::to_string_pretty(&ser_value, Default::default()) {
// replace the empty entry
symbol_cache.insert(name.to_owned(), ser_string);
}
}
}
// Cache the new list
log::trace!("Updating symbols list");
self.symbol_cache = symbol_cache;
}
}
#[derive(Debug, Error)]
pub enum AmlEvalError {
#[error("AML error")]
AmlError(AmlError),
#[error("Failed to serialize argument")]
SerializationError,
#[error("Failed to deserialize")]
DeserializationError,
#[error("AML not initialized")]
NotInitialized,
}
impl From<AmlError> for AmlEvalError {
fn from(value: AmlError) -> Self {
AmlEvalError::AmlError(value)
}
}
pub struct AcpiContext {
tables: Vec<Sdt>,
dsdt: Option<Dsdt>,
fadt: Option<Fadt>,
aml_symbols: RwLock<AmlSymbols>,
// TODO: The kernel ACPI code seemed to use load_table quite ubiquitously, however ACPI 5.1
// states that DDBHandles can only be obtained when loading XSDT-pointed tables. So, we'll
// generate an index only for those.
sdt_order: RwLock<Vec<Option<SdtSignature>>>,
pub next_ctx: RwLock<u64>,
}
impl AcpiContext {
pub fn aml_eval(
&self,
symbol: AmlName,
args: Vec<AmlSerdeValue>,
) -> Result<AmlSerdeValue, AmlEvalError> {
let mut symbols = self.aml_symbols.write();
let interpreter = symbols.aml_context_mut(None)?;
interpreter.acquire_global_lock(16)?;
let args = args
.into_iter()
.map(|aml_serde_value| {
aml_serde_value
.to_aml_object()
.map(Object::wrap)
.ok_or(AmlEvalError::DeserializationError)
})
.collect::<Result<Vec<WrappedObject>, AmlEvalError>>()?;
let result = interpreter.evaluate(symbol, args);
interpreter
.release_global_lock()
.expect("Failed to release GIL!"); //TODO: check if this should panic
result
.map_err(AmlEvalError::from)
.map(|object| {
AmlSerdeValue::from_aml_value(object.deref())
.ok_or(AmlEvalError::SerializationError)
})
.flatten()
}
pub fn init(
rxsdt_physaddrs: impl Iterator<Item = u64>,
ec: Vec<(RegionSpace, Box<dyn RegionHandler>)>,
) -> Self {
let tables = rxsdt_physaddrs
.map(|physaddr| {
let physaddr: usize = physaddr
.try_into()
.expect("expected ACPI addresses to be compatible with the current word size");
log::trace!("TABLE AT {:#>08X}", physaddr);
Sdt::load_from_physical(physaddr).expect("failed to load physical SDT")
})
.collect::<Vec<Sdt>>();
let mut this = Self {
tables,
dsdt: None,
fadt: None,
// Temporary values
aml_symbols: RwLock::new(AmlSymbols::new(ec)),
next_ctx: RwLock::new(0),
sdt_order: RwLock::new(Vec::new()),
};
for table in &this.tables {
this.new_index(&table.signature());
}
Fadt::init(&mut this);
//TODO (hangs on real hardware): Dmar::init(&this);
this
}
pub fn dsdt(&self) -> Option<&Dsdt> {
self.dsdt.as_ref()
}
pub fn ssdts(&self) -> impl Iterator<Item = Ssdt> + '_ {
self.find_multiple_sdts(*b"SSDT")
.map(|sdt| Ssdt(sdt.clone()))
}
fn find_single_sdt_pos(&self, signature: [u8; 4]) -> Option<usize> {
let count = self
.tables
.iter()
.filter(|sdt| sdt.signature == signature)
.count();
if count > 1 {
log::warn!(
"Expected only a single SDT of signature `{}` ({:?}), but there were {}",
String::from_utf8_lossy(&signature),
signature,
count
);
}
self.tables
.iter()
.position(|sdt| sdt.signature == signature)
}
pub fn find_multiple_sdts<'a>(&'a self, signature: [u8; 4]) -> impl Iterator<Item = &'a Sdt> {
self.tables
.iter()
.filter(move |sdt| sdt.signature == signature)
}
pub fn take_single_sdt(&self, signature: [u8; 4]) -> Option<Sdt> {
self.find_single_sdt_pos(signature)
.map(|pos| self.tables[pos].clone())
}
pub fn fadt(&self) -> Option<&Fadt> {
self.fadt.as_ref()
}
pub fn sdt_from_signature(&self, signature: &SdtSignature) -> Option<&Sdt> {
self.tables.iter().find(|sdt| {
sdt.signature == signature.signature
&& sdt.oem_id == signature.oem_id
&& sdt.oem_table_id == signature.oem_table_id
})
}
pub fn get_signature_from_index(&self, index: usize) -> Option<SdtSignature> {
self.sdt_order.read().get(index).copied().flatten()
}
pub fn get_index_from_signature(&self, signature: &SdtSignature) -> Option<usize> {
self.sdt_order
.read()
.iter()
.rposition(|sig| sig.map_or(false, |sig| &sig == signature))
}
pub fn tables(&self) -> &[Sdt] {
&self.tables
}
pub fn new_index(&self, signature: &SdtSignature) {
self.sdt_order.write().push(Some(*signature));
}
pub fn aml_lookup(&self, symbol: &str) -> Option<String> {
if let Ok(aml_symbols) = self.aml_symbols(None) {
aml_symbols.lookup(symbol)
} else {
None
}
}
pub fn aml_symbols(
&self,
pci_fd: Option<&libredox::Fd>,
) -> Result<RwLockReadGuard<'_, AmlSymbols>, AmlError> {
// return the cached value if it exists
let symbols = self.aml_symbols.read();
if !symbols.symbols_cache().is_empty() {
return Ok(symbols);
}
// free the read lock
drop(symbols);
// List has not been initialized, we have to build it
log::trace!("Creating symbols list");
let mut aml_symbols = self.aml_symbols.write();
aml_symbols.build_cache(pci_fd);
// return the cached value
Ok(RwLockWriteGuard::downgrade(aml_symbols))
}
/// Discard any cached symbols list. To be called if the AML namespace changes.
pub fn aml_symbols_reset(&self) {
let mut aml_symbols = self.aml_symbols.write();
aml_symbols.symbol_cache = FxHashMap::default();
}
/// Set Power State
/// See https://uefi.org/sites/default/files/resources/ACPI_6_1.pdf
/// - search for PM1a
/// See https://forum.osdev.org/viewtopic.php?t=16990 for practical details
pub fn set_global_s_state(&self, state: u8) {
if state != 5 {
return;
}
let fadt = match self.fadt() {
Some(fadt) => fadt,
None => {
log::error!("Cannot set global S-state due to missing FADT.");
return;
}
};
let port = fadt.pm1a_control_block as u16;
let mut val = 1 << 13;
let aml_symbols = self.aml_symbols.read();
let s5_aml_name = match acpi::aml::namespace::AmlName::from_str("\\_S5") {
Ok(aml_name) => aml_name,
Err(error) => {
log::error!("Could not build AmlName for \\_S5, {:?}", error);
return;
}
};
let s5 = match &aml_symbols.aml_context {
Some(aml_context) => match aml_context.namespace.lock().get(s5_aml_name) {
Ok(s5) => s5,
Err(error) => {
log::error!("Cannot set S-state, missing \\_S5, {:?}", error);
return;
}
},
None => {
log::error!("Cannot set S-state, AML context not initialized");
return;
}
};
let package = match s5.deref() {
acpi::aml::object::Object::Package(package) => package,
_ => {
log::error!("Cannot set S-state, \\_S5 is not a package");
return;
}
};
let slp_typa = match package[0].deref() {
acpi::aml::object::Object::Integer(i) => i.to_owned(),
_ => {
log::error!("typa is not an Integer");
return;
}
};
let slp_typb = match package[1].deref() {
acpi::aml::object::Object::Integer(i) => i.to_owned(),
_ => {
log::error!("typb is not an Integer");
return;
}
};
log::trace!("Shutdown SLP_TYPa {:X}, SLP_TYPb {:X}", slp_typa, slp_typb);
val |= slp_typa as u16;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
log::warn!("Shutdown with ACPI outw(0x{:X}, 0x{:X})", port, val);
Pio::<u16>::new(port).write(val);
}
// TODO: Handle SLP_TYPb
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
{
log::error!(
"Cannot shutdown with ACPI outw(0x{:X}, 0x{:X}) on this architecture",
port,
val
);
}
loop {
core::hint::spin_loop();
}
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct FadtStruct {
pub header: SdtHeader,
pub firmware_ctrl: u32,
pub dsdt: u32,
// field used in ACPI 1.0; no longer in use, for compatibility only
reserved: u8,
pub preferred_power_managament: u8,
pub sci_interrupt: u16,
pub smi_command_port: u32,
pub acpi_enable: u8,
pub acpi_disable: u8,
pub s4_bios_req: u8,
pub pstate_control: u8,
pub pm1a_event_block: u32,
pub pm1b_event_block: u32,
pub pm1a_control_block: u32,
pub pm1b_control_block: u32,
pub pm2_control_block: u32,
pub pm_timer_block: u32,
pub gpe0_block: u32,
pub gpe1_block: u32,
pub pm1_event_length: u8,
pub pm1_control_length: u8,
pub pm2_control_length: u8,
pub pm_timer_length: u8,
pub gpe0_ength: u8,
pub gpe1_length: u8,
pub gpe1_base: u8,
pub c_state_control: u8,
pub worst_c2_latency: u16,
pub worst_c3_latency: u16,
pub flush_size: u16,
pub flush_stride: u16,
pub duty_offset: u8,
pub duty_width: u8,
pub day_alarm: u8,
pub month_alarm: u8,
pub century: u8,
// reserved in ACPI 1.0; used since ACPI 2.0+
pub boot_architecture_flags: u16,
reserved2: u8,
pub flags: u32,
}
unsafe impl plain::Plain for FadtStruct {}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Default)]
pub struct GenericAddressStructure {
address_space: u8,
bit_width: u8,
bit_offset: u8,
access_size: u8,
address: u64,
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct FadtAcpi2Struct {
// 12 byte structure; see below for details
pub reset_reg: GenericAddressStructure,
pub reset_value: u8,
reserved3: [u8; 3],
// 64bit pointers - Available on ACPI 2.0+
pub x_firmware_control: u64,
pub x_dsdt: u64,
pub x_pm1a_event_block: GenericAddressStructure,
pub x_pm1b_event_block: GenericAddressStructure,
pub x_pm1a_control_block: GenericAddressStructure,
pub x_pm1b_control_block: GenericAddressStructure,
pub x_pm2_control_block: GenericAddressStructure,
pub x_pm_timer_block: GenericAddressStructure,
pub x_gpe0_block: GenericAddressStructure,
pub x_gpe1_block: GenericAddressStructure,
}
unsafe impl plain::Plain for FadtAcpi2Struct {}
#[derive(Clone)]
pub struct Fadt(Sdt);
impl Fadt {
pub fn acpi_2_struct(&self) -> Option<&FadtAcpi2Struct> {
let bytes = &self.0 .0[mem::size_of::<FadtStruct>()..];
match plain::from_bytes::<FadtAcpi2Struct>(bytes) {
Ok(fadt2) => Some(fadt2),
Err(plain::Error::TooShort) => None,
Err(plain::Error::BadAlignment) => unreachable!(
"plain::from_bytes reported bad alignment, but FadtAcpi2Struct is #[repr(packed)]"
),
}
}
}
impl Deref for Fadt {
type Target = FadtStruct;
fn deref(&self) -> &Self::Target {
plain::from_bytes::<FadtStruct>(&self.0 .0)
.expect("expected FADT struct to already be validated in Deref impl")
}
}
impl Fadt {
pub fn new(sdt: Sdt) -> Option<Fadt> {
if sdt.signature != *b"FACP" || sdt.length() < mem::size_of::<Fadt>() {
return None;
}
Some(Fadt(sdt))
}
pub fn init(context: &mut AcpiContext) {
let fadt_sdt = context
.take_single_sdt(*b"FACP")
.expect("expected ACPI to always have a FADT");
let fadt = match Fadt::new(fadt_sdt) {
Some(fadt) => fadt,
None => {
log::error!("Failed to find FADT");
return;
}
};
let dsdt_ptr = match fadt.acpi_2_struct() {
Some(fadt2) => usize::try_from(fadt2.x_dsdt).unwrap_or_else(|_| {
usize::try_from(fadt.dsdt).expect("expected any given u32 to fit within usize")
}),
None => usize::try_from(fadt.dsdt).expect("expected any given u32 to fit within usize"),
};
log::debug!("FACP at {:X}", { dsdt_ptr });
let dsdt_sdt = match Sdt::load_from_physical(fadt.dsdt as usize) {
Ok(dsdt) => dsdt,
Err(error) => {
log::error!("Failed to load DSDT: {}", error);
return;
}
};
context.fadt = Some(fadt.clone());
context.dsdt = Some(Dsdt(dsdt_sdt.clone()));
context.tables.push(dsdt_sdt);
}
}
pub enum PossibleAmlTables {
Dsdt(Dsdt),
Ssdt(Ssdt),
}
impl PossibleAmlTables {
pub fn try_new(inner: Sdt) -> Option<Self> {
match &inner.signature {
b"DSDT" => Some(Self::Dsdt(Dsdt(inner))),
b"SSDT" => Some(Self::Ssdt(Ssdt(inner))),
_ => None,
}
}
}
impl AmlContainingTable for PossibleAmlTables {
fn aml(&self) -> &[u8] {
match self {
Self::Dsdt(dsdt) => dsdt.aml(),
Self::Ssdt(ssdt) => ssdt.aml(),
}
}
fn header(&self) -> &SdtHeader {
match self {
Self::Dsdt(dsdt) => dsdt.header(),
Self::Ssdt(ssdt) => ssdt.header(),
}
}
}
pub trait AmlContainingTable {
fn aml(&self) -> &[u8];
fn header(&self) -> &SdtHeader;
}
impl<T> AmlContainingTable for &T
where
T: AmlContainingTable,
{
fn aml(&self) -> &[u8] {
T::aml(*self)
}
fn header(&self) -> &SdtHeader {
T::header(*self)
}
}
impl AmlContainingTable for Dsdt {
fn aml(&self) -> &[u8] {
self.0.data()
}
fn header(&self) -> &SdtHeader {
&*self.0
}
}
impl AmlContainingTable for Ssdt {
fn aml(&self) -> &[u8] {
self.0.data()
}
fn header(&self) -> &SdtHeader {
&*self.0
}
}
@@ -0,0 +1,128 @@
use std::ops::{Deref, DerefMut};
use common::io::Mmio;
// TODO: Only wrap with Mmio where there are hardware-registers. (Some of these structs seem to be
// ring buffer entries, which are not to be treated the same way).
pub struct DrhdPage {
virt: *mut Drhd,
}
impl DrhdPage {
pub fn map(base_phys: usize) -> syscall::Result<Self> {
assert_eq!(
base_phys % crate::acpi::PAGE_SIZE,
0,
"DRHD registers must be page-aligned"
);
// TODO: Uncachable? Can reads have side-effects?
let virt = unsafe {
common::physmap(
base_phys,
crate::acpi::PAGE_SIZE,
common::Prot::RO,
common::MemoryType::default(),
)?
} as *mut Drhd;
Ok(Self { virt })
}
}
impl Deref for DrhdPage {
type Target = Drhd;
fn deref(&self) -> &Self::Target {
unsafe { &*self.virt }
}
}
impl DerefMut for DrhdPage {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.virt }
}
}
impl Drop for DrhdPage {
fn drop(&mut self) {
unsafe {
let _ = libredox::call::munmap(self.virt.cast(), crate::acpi::PAGE_SIZE);
}
}
}
#[repr(C, packed)]
pub struct DrhdFault {
pub sts: Mmio<u32>,
pub ctrl: Mmio<u32>,
pub data: Mmio<u32>,
pub addr: [Mmio<u32>; 2],
_rsv: [Mmio<u64>; 2],
pub log: Mmio<u64>,
}
#[repr(C, packed)]
pub struct DrhdProtectedMemory {
pub en: Mmio<u32>,
pub low_base: Mmio<u32>,
pub low_limit: Mmio<u32>,
pub high_base: Mmio<u64>,
pub high_limit: Mmio<u64>,
}
#[repr(C, packed)]
pub struct DrhdInvalidation {
pub queue_head: Mmio<u64>,
pub queue_tail: Mmio<u64>,
pub queue_addr: Mmio<u64>,
_rsv: Mmio<u32>,
pub cmpl_sts: Mmio<u32>,
pub cmpl_ctrl: Mmio<u32>,
pub cmpl_data: Mmio<u32>,
pub cmpl_addr: [Mmio<u32>; 2],
}
#[repr(C, packed)]
pub struct DrhdPageRequest {
pub queue_head: Mmio<u64>,
pub queue_tail: Mmio<u64>,
pub queue_addr: Mmio<u64>,
_rsv: Mmio<u32>,
pub sts: Mmio<u32>,
pub ctrl: Mmio<u32>,
pub data: Mmio<u32>,
pub addr: [Mmio<u32>; 2],
}
#[repr(C, packed)]
pub struct DrhdMtrrVariable {
pub base: Mmio<u64>,
pub mask: Mmio<u64>,
}
#[repr(C, packed)]
pub struct DrhdMtrr {
pub cap: Mmio<u64>,
pub def_type: Mmio<u64>,
pub fixed: [Mmio<u64>; 11],
pub variable: [DrhdMtrrVariable; 10],
}
#[repr(C, packed)]
pub struct Drhd {
pub version: Mmio<u32>,
_rsv: Mmio<u32>,
pub cap: Mmio<u64>,
pub ext_cap: Mmio<u64>,
pub gl_cmd: Mmio<u32>,
pub gl_sts: Mmio<u32>,
pub root_table: Mmio<u64>,
pub ctx_cmd: Mmio<u64>,
_rsv1: Mmio<u32>,
pub fault: DrhdFault,
_rsv2: Mmio<u32>,
pub pm: DrhdProtectedMemory,
pub invl: DrhdInvalidation,
_rsv3: Mmio<u64>,
pub intr_table: Mmio<u64>,
pub page_req: DrhdPageRequest,
pub mtrr: DrhdMtrr,
}
@@ -0,0 +1,528 @@
//! DMA Remapping Table -- `DMAR`. This is Intel's implementation of IOMMU functionality, known as
//! VT-d.
//!
//! Too understand what all of these structs mean, refer to the "Intel(R) Virtualization
//! Technology for Directed I/O" specification.
// TODO: Move this code to a separate driver as well?
use std::convert::TryFrom;
use std::ops::Deref;
use std::{fmt, mem};
use common::io::Io as _;
use num_derive::FromPrimitive;
use num_traits::FromPrimitive;
use self::drhd::DrhdPage;
use crate::acpi::{AcpiContext, Sdt, SdtHeader};
pub mod drhd;
#[repr(C, packed)]
pub struct DmarStruct {
pub sdt_header: SdtHeader,
pub host_addr_width: u8,
pub flags: u8,
pub _rsvd: [u8; 10],
// This header is followed by N remapping structures.
}
unsafe impl plain::Plain for DmarStruct {}
/// The DMA Remapping Table
#[derive(Debug)]
pub struct Dmar(Sdt);
impl Dmar {
fn remmapping_structs_area(&self) -> &[u8] {
&self.0.as_slice()[mem::size_of::<DmarStruct>()..]
}
}
impl Deref for Dmar {
type Target = DmarStruct;
fn deref(&self) -> &Self::Target {
plain::from_bytes(self.0.as_slice())
.expect("expected Dmar struct to already have checked the length, and alignment issues should be impossible due to #[repr(packed)]")
}
}
impl Dmar {
// TODO: Again, perhaps put this code into a different driver, and read the table the regular
// way via the acpi scheme?
pub fn init(acpi_ctx: &AcpiContext) {
let dmar_sdt = match acpi_ctx.take_single_sdt(*b"DMAR") {
Some(dmar_sdt) => dmar_sdt,
None => {
log::warn!("Unable to find `DMAR` ACPI table.");
return;
}
};
let dmar = match Dmar::new(dmar_sdt) {
Some(dmar) => dmar,
None => {
log::error!("Failed to parse DMAR table, possibly malformed.");
return;
}
};
log::info!("Found DMAR: {}: {}", dmar.host_addr_width, dmar.flags);
log::debug!("DMAR: {:?}", dmar);
for dmar_entry in dmar.iter() {
log::debug!("DMAR entry: {:?}", dmar_entry);
match dmar_entry {
DmarEntry::Drhd(dmar_drhd) => {
let drhd = dmar_drhd.map();
log::debug!("VER: {:X}", drhd.version.read());
log::debug!("CAP: {:X}", drhd.cap.read());
log::debug!("EXT_CAP: {:X}", drhd.ext_cap.read());
log::debug!("GCMD: {:X}", drhd.gl_cmd.read());
log::debug!("GSTS: {:X}", drhd.gl_sts.read());
log::debug!("RT: {:X}", drhd.root_table.read());
}
_ => (),
}
}
}
fn new(sdt: Sdt) -> Option<Dmar> {
assert_eq!(
sdt.signature, *b"DMAR",
"signature already checked against `DMAR`"
);
if sdt.length() < mem::size_of::<DmarStruct>() {
log::error!(
"The DMAR table was too small ({} B < {} B).",
sdt.length(),
mem::size_of::<Dmar>()
);
return None;
}
// No need to check alignment for #[repr(packed)] structs.
Some(Dmar(sdt))
}
pub fn iter(&self) -> DmarIter<'_> {
DmarIter(DmarRawIter {
bytes: self.remmapping_structs_area(),
})
}
}
/// DMAR DMA Remapping Hardware Unit Definition
#[derive(Clone, Copy, Debug)]
#[repr(C, packed)]
pub struct DmarDrhdHeader {
pub kind: u16,
pub length: u16,
pub flags: u8,
pub _rsv: u8,
pub segment: u16,
pub base: u64,
}
unsafe impl plain::Plain for DmarDrhdHeader {}
#[derive(Clone, Copy, Debug)]
#[repr(C, packed)]
pub struct DeviceScopeHeader {
pub ty: u8,
pub len: u8,
pub _rsvd: u16,
pub enumeration_id: u8,
pub start_bus_num: u8,
// The variable-sized path comes after.
}
unsafe impl plain::Plain for DeviceScopeHeader {}
pub struct DeviceScope(Box<[u8]>);
impl DeviceScope {
pub fn try_new(raw: &[u8]) -> Option<Self> {
// TODO: Check ty.
let header_bytes = match raw.get(..mem::size_of::<DeviceScopeHeader>()) {
Some(bytes) => bytes,
None => return None,
};
let header = plain::from_bytes::<DeviceScopeHeader>(header_bytes)
.expect("length already checked, and alignment 1 (#[repr(packed)] should suffice");
let len = usize::from(header.len);
if len > raw.len() {
log::warn!("Device scope smaller than len field.");
return None;
}
Some(Self(raw.into()))
}
}
impl fmt::Debug for DeviceScope {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DeviceScope")
.field("header", &*self as &DeviceScopeHeader)
.field("path", &self.path())
.finish()
}
}
impl Deref for DeviceScope {
type Target = DeviceScopeHeader;
fn deref(&self) -> &Self::Target {
plain::from_bytes(&self.0)
.expect("expected length to be sufficient, and alignment (due to #[repr(packed)]")
}
}
impl DeviceScope {
pub fn path(&self) -> &[u8] {
&self.0[mem::size_of::<DeviceScopeHeader>()..]
}
}
pub struct DmarDrhd(Box<[u8]>);
impl DmarDrhd {
pub fn try_new(raw: &[u8]) -> Option<Self> {
if raw.len() < mem::size_of::<DmarDrhdHeader>() {
return None;
}
Some(Self(raw.into()))
}
pub fn device_scope_area(&self) -> &[u8] {
&self.0[mem::size_of::<DmarDrhdHeader>()..]
}
pub fn map(&self) -> DrhdPage {
let base = usize::try_from(self.base).expect("expected u64 to fit within usize");
DrhdPage::map(base).expect("failed to map DRHD registers")
}
}
impl Deref for DmarDrhd {
type Target = DmarDrhdHeader;
fn deref(&self) -> &Self::Target {
plain::from_bytes::<DmarDrhdHeader>(&self.0[..mem::size_of::<DmarDrhdHeader>()])
.expect("length is already checked, and alignment 1 (#[repr(packed)] should suffice")
}
}
impl fmt::Debug for DmarDrhd {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DmarDrhd")
.field("header", &*self as &DmarDrhd)
// TODO: print out device scopes
.finish()
}
}
/// DMAR Reserved Memory Region Reporting
#[derive(Clone, Copy, Debug)]
#[repr(C, packed)]
pub struct DmarRmrrHeader {
pub kind: u16,
pub length: u16,
pub _rsv: u16,
pub segment: u16,
pub base: u64,
pub limit: u64,
// The device scopes come after.
}
unsafe impl plain::Plain for DmarRmrrHeader {}
pub struct DmarRmrr(Box<[u8]>);
impl DmarRmrr {
pub fn try_new(raw: &[u8]) -> Option<Self> {
if raw.len() < mem::size_of::<DmarRmrrHeader>() {
return None;
}
Some(Self(raw.into()))
}
}
impl Deref for DmarRmrr {
type Target = DmarRmrrHeader;
fn deref(&self) -> &Self::Target {
plain::from_bytes(&self.0[..mem::size_of::<DmarRmrrHeader>()])
.expect("length already checked, and with #[repr(packed)] alignment should be okay")
}
}
impl fmt::Debug for DmarRmrr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DmarRmrr")
.field("header", &*self as &DmarRmrrHeader)
// TODO: print out device scopes
.finish()
}
}
/// DMAR Root Port ATS Capability Reporting
#[derive(Clone, Copy, Debug)]
#[repr(C, packed)]
pub struct DmarAtsrHeader {
kind: u16,
length: u16,
flags: u8,
_rsv: u8,
segment: u16,
// The device scopes come after.
}
unsafe impl plain::Plain for DmarAtsrHeader {}
pub struct DmarAtsr(Box<[u8]>);
impl DmarAtsr {
pub fn try_new(raw: &[u8]) -> Option<Self> {
if raw.len() < mem::size_of::<DmarAtsrHeader>() {
return None;
}
Some(Self(raw.into()))
}
}
impl Deref for DmarAtsr {
type Target = DmarAtsrHeader;
fn deref(&self) -> &Self::Target {
plain::from_bytes(&self.0[..mem::size_of::<DmarAtsrHeader>()])
.expect("length already checked, and with #[repr(packed)] alignment should be okay")
}
}
impl fmt::Debug for DmarAtsr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DmarAtsr")
.field("header", &*self as &DmarAtsrHeader)
// TODO: print out device scopes
.finish()
}
}
/// DMAR Remapping Hardware Static Affinity
#[derive(Clone, Copy, Debug)]
#[repr(C, packed)]
pub struct DmarRhsa {
pub kind: u16,
pub length: u16,
pub _rsv: u32,
pub base: u64,
pub domain: u32,
}
unsafe impl plain::Plain for DmarRhsa {}
impl DmarRhsa {
pub fn try_new(raw: &[u8]) -> Option<Self> {
let bytes = raw.get(..mem::size_of::<DmarRhsa>())?;
let this = plain::from_bytes(bytes)
.expect("length is already checked, and alignment 1 should suffice (#[repr(packed)])");
Some(*this)
}
}
/// DMAR ACPI Name-space Device Declaration
#[derive(Clone, Copy, Debug)]
#[repr(C, packed)]
pub struct DmarAnddHeader {
pub kind: u16,
pub length: u16,
pub _rsv: [u8; 3],
pub acpi_dev: u8,
// The device scopes come after.
}
unsafe impl plain::Plain for DmarAnddHeader {}
pub struct DmarAndd(Box<[u8]>);
impl DmarAndd {
pub fn try_new(raw: &[u8]) -> Option<Self> {
if raw.len() < mem::size_of::<DmarAnddHeader>() {
return None;
}
Some(Self(raw.into()))
}
}
impl Deref for DmarAndd {
type Target = DmarAnddHeader;
fn deref(&self) -> &Self::Target {
plain::from_bytes(&self.0[..mem::size_of::<DmarAnddHeader>()])
.expect("length already checked, and with #[repr(packed)] alignment should be okay")
}
}
impl fmt::Debug for DmarAndd {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DmarAndd")
.field("header", &*self as &DmarAnddHeader)
// TODO: print out device scopes
.finish()
}
}
/// DMAR ACPI Name-space Device Declaration
#[derive(Clone, Copy, Debug)]
#[repr(C, packed)]
pub struct DmarSatcHeader {
pub kind: u16,
pub length: u16,
pub flags: u8,
pub _rsvd: u8,
pub seg_num: u16,
// The device scopes come after.
}
unsafe impl plain::Plain for DmarSatcHeader {}
pub struct DmarSatc(Box<[u8]>);
impl DmarSatc {
pub fn try_new(raw: &[u8]) -> Option<Self> {
if raw.len() < mem::size_of::<DmarSatcHeader>() {
return None;
}
Some(Self(raw.into()))
}
}
impl Deref for DmarSatc {
type Target = DmarSatcHeader;
fn deref(&self) -> &Self::Target {
plain::from_bytes(&self.0[..mem::size_of::<DmarSatcHeader>()])
.expect("length already checked, and with #[repr(packed)] alignment should be okay")
}
}
impl fmt::Debug for DmarSatc {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DmarSatc")
.field("header", &*self as &DmarSatcHeader)
// TODO: print out device scopes
.finish()
}
}
/// The list of different "Remapping Structure Types".
///
/// Refer to section 8.2 in the VTIO spec (as of revision 3.2).
#[derive(Clone, Copy, Debug, FromPrimitive)]
#[repr(u16)]
pub enum EntryType {
Drhd = 0,
Rmrr = 1,
Atsr = 2,
Rhsa = 3,
Andd = 4,
Satc = 5,
}
/// DMAR Entries
#[derive(Debug)]
pub enum DmarEntry {
Drhd(DmarDrhd),
Rmrr(DmarRmrr),
Atsr(DmarAtsr),
Rhsa(DmarRhsa),
Andd(DmarAndd),
// TODO: "SoC Integrated Address Translation Cache Reporting Structure".
Satc(DmarSatc),
TooShort(EntryType),
Unknown(u16),
}
struct DmarRawIter<'sdt> {
bytes: &'sdt [u8],
}
impl<'sdt> Iterator for DmarRawIter<'sdt> {
type Item = (u16, &'sdt [u8]);
fn next(&mut self) -> Option<Self::Item> {
let type_bytes = match self.bytes.get(..2) {
Some(bytes) => bytes,
None => {
if !self.bytes.is_empty() {
log::warn!("DMAR table ended between two entries.");
}
return None;
}
};
let len_bytes = match self.bytes.get(2..4) {
Some(bytes) => bytes,
None => {
log::warn!("DMAR table ended between two entries.");
return None;
}
};
let remainder = &self.bytes[4..];
let type_bytes = <[u8; 2]>::try_from(type_bytes)
.expect("expected a 2-byte slice to be convertible to [u8; 2]");
let len_bytes = <[u8; 2]>::try_from(type_bytes)
.expect("expected a 2-byte slice to be convertible to [u8; 2]");
let ty = u16::from_ne_bytes(type_bytes);
let len = u16::from_ne_bytes(len_bytes);
let len = usize::try_from(len).expect("expected u16 to fit within usize");
if len > remainder.len() {
log::warn!("DMAR remapping structure length was smaller than the remaining length of the table.");
return None;
}
let (current, residue) = self.bytes.split_at(len);
self.bytes = residue;
Some((ty, current))
}
}
pub struct DmarIter<'sdt>(DmarRawIter<'sdt>);
impl Iterator for DmarIter<'_> {
type Item = DmarEntry;
fn next(&mut self) -> Option<Self::Item> {
let (raw_type, raw) = self.0.next()?;
// NOTE: If any of these entries look incorrect, we should simply continue the iterator,
// and instead print a warning.
let entry_type = match EntryType::from_u16(raw_type) {
Some(ty) => ty,
None => {
log::warn!(
"Encountered invalid entry type {} (length {})",
raw_type,
raw.len()
);
return Some(DmarEntry::Unknown(raw_type));
}
};
let item_opt = match entry_type {
EntryType::Drhd => DmarDrhd::try_new(raw).map(DmarEntry::Drhd),
EntryType::Rmrr => DmarRmrr::try_new(raw).map(DmarEntry::Rmrr),
EntryType::Atsr => DmarAtsr::try_new(raw).map(DmarEntry::Atsr),
EntryType::Rhsa => DmarRhsa::try_new(raw).map(DmarEntry::Rhsa),
EntryType::Andd => DmarAndd::try_new(raw).map(DmarEntry::Andd),
EntryType::Satc => DmarSatc::try_new(raw).map(DmarEntry::Satc),
};
let item = item_opt.unwrap_or(DmarEntry::TooShort(entry_type));
Some(item)
}
}
@@ -0,0 +1,430 @@
use acpi::{aml::AmlError, Handle, PciAddress, PhysicalMapping};
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use common::io::{Io, Pio};
use num_traits::PrimInt;
use rustc_hash::FxHashMap;
use std::fmt::LowerHex;
use std::mem::size_of;
use std::ptr::NonNull;
use std::sync::{Arc, Mutex};
use syscall::PAGE_SIZE;
const PAGE_MASK: usize = !(PAGE_SIZE - 1);
const OFFSET_MASK: usize = PAGE_SIZE - 1;
struct MappedPage {
phys_page: usize,
virt_page: usize,
}
impl MappedPage {
fn new(phys_page: usize) -> std::io::Result<Self> {
let virt_page = unsafe {
common::physmap(
phys_page,
PAGE_SIZE,
common::Prot::RW,
common::MemoryType::default(),
)
.map_err(|error| std::io::Error::from_raw_os_error(error.errno()))?
} as usize;
Ok(Self {
phys_page,
virt_page,
})
}
}
impl Drop for MappedPage {
fn drop(&mut self) {
log::trace!("Drop page {:#x}", self.phys_page);
if let Err(e) = unsafe { libredox::call::munmap(self.virt_page as *mut (), PAGE_SIZE) } {
log::error!("funmap (phys): {:?}", e);
}
}
}
#[derive(Default)]
pub struct AmlPageCache {
page_cache: FxHashMap<usize, MappedPage>,
}
impl AmlPageCache {
/// get a virtual address for the given physical page
fn get_page(&mut self, phys_target: usize) -> std::io::Result<&MappedPage> {
let phys_page = phys_target & PAGE_MASK;
if self.page_cache.contains_key(&phys_page) {
log::trace!("re-using cached page {:#x}", phys_page);
Ok(self
.page_cache
.get(&phys_page)
.expect("could not get page after contains=true"))
} else {
let mapped_page = MappedPage::new(phys_page)?;
log::trace!("adding page {:#x} to cache", mapped_page.phys_page);
self.page_cache.insert(phys_page, mapped_page);
Ok(self
.page_cache
.get(&phys_page)
.expect("can't find page that was just inserted"))
}
}
/// The offset into the virtual slice of T that matches the physical target
fn sized_index<T>(phys_target: usize) -> usize {
assert_eq!(
phys_target & !(size_of::<T>() - 1),
phys_target,
"address {} is not aligned",
phys_target
);
(phys_target & OFFSET_MASK) / size_of::<T>()
}
/// Read from the given physical address
fn read_from_phys<T: PrimInt + LowerHex>(&mut self, phys_target: usize) -> std::io::Result<T> {
let mapped_page = self.get_page(phys_target)?;
let page_as_slice = unsafe {
std::slice::from_raw_parts(
mapped_page.virt_page as *const T,
PAGE_SIZE / size_of::<T>(),
)
};
// for debugging only
let _virt_ptr = page_as_slice[Self::sized_index::<T>(phys_target)..].as_ptr() as usize;
let val = page_as_slice[Self::sized_index::<T>(phys_target)];
log::trace!(
"read {:#x}, virt {:#x}, val {:#x}",
phys_target,
_virt_ptr,
val
);
Ok(val)
}
/// Write to the given physical address
fn write_to_phys<T: PrimInt + LowerHex>(
&mut self,
phys_target: usize,
val: T,
) -> std::io::Result<()> {
let mapped_page = self.get_page(phys_target)?;
let page_as_slice = unsafe {
std::slice::from_raw_parts_mut(
mapped_page.virt_page as *mut T,
PAGE_SIZE / size_of::<T>(),
)
};
// for debugging only
let _virt_ptr = page_as_slice[Self::sized_index::<T>(phys_target)..].as_ptr() as usize;
page_as_slice[Self::sized_index::<T>(phys_target)] = val;
log::trace!(
"write {:#x}, virt {:#x}, val {:#x}",
phys_target,
_virt_ptr,
val
);
Ok(())
}
pub fn clear(&mut self) {
log::trace!("Clear page cache");
self.page_cache.clear();
}
}
#[derive(Clone)]
pub struct AmlPhysMemHandler {
page_cache: Arc<Mutex<AmlPageCache>>,
pci_fd: Arc<Option<libredox::Fd>>,
}
/// Read from a physical address.
/// Generic parameter must be u8, u16, u32 or u64.
impl AmlPhysMemHandler {
pub fn new(pci_fd_opt: Option<&libredox::Fd>, page_cache: Arc<Mutex<AmlPageCache>>) -> Self {
let pci_fd = if let Some(pci_fd) = pci_fd_opt {
Some(libredox::Fd::new(pci_fd.raw()))
} else {
log::error!("pci_fd is not registered");
None
};
Self {
page_cache,
pci_fd: Arc::new(pci_fd),
}
}
fn pci_call_metadata(kind: u8, addr: PciAddress, off: u16) -> [u64; 2] {
// Segment: u16, at 28 bits
// Bus: u8, 8 bits, 256 total, at 20 bits
// Device: u8, 5 bits, 32 total, at 15 bits
// Function: u8, 3 bits, 8 total, at 12 bits
// Offset: u16, 12 bits, 4096 total, at 0 bits
[
kind.into(),
(u64::from(addr.segment()) << 28)
| (u64::from(addr.bus()) << 20)
| (u64::from(addr.device()) << 15)
| (u64::from(addr.function()) << 12)
| u64::from(off),
]
}
fn read_pci(&self, addr: PciAddress, off: u16, value: &mut [u8]) {
let metadata = Self::pci_call_metadata(1, addr, off);
match &*self.pci_fd {
Some(pci_fd) => match pci_fd.call_ro(value, syscall::CallFlags::empty(), &metadata) {
Ok(_) => {}
Err(err) => {
log::error!("read pci {addr}@{off:04X}:{:02X}: {}", value.len(), err);
}
},
None => {
log::error!(
"read pci {addr}@{off:04X}:{:02X}: pci access not available",
value.len()
);
}
}
}
fn write_pci(&self, addr: PciAddress, off: u16, value: &[u8]) {
let metadata = Self::pci_call_metadata(2, addr, off);
match &*self.pci_fd {
Some(pci_fd) => match pci_fd.call_wo(value, syscall::CallFlags::empty(), &metadata) {
Ok(_) => {}
Err(err) => {
log::error!("write pci {addr}@{off:04X}={value:02X?}: {}", err);
}
},
None => {
log::error!("write pci {addr}@{off:04X}={value:02X?}: pci access not available");
}
}
}
}
impl acpi::Handler for AmlPhysMemHandler {
unsafe fn map_physical_region<T>(&self, phys: usize, size: usize) -> PhysicalMapping<Self, T> {
let phys_page = phys & PAGE_MASK;
let offset = phys & OFFSET_MASK;
let pages = (offset + size + PAGE_SIZE - 1) / PAGE_SIZE;
let map_size = pages * PAGE_SIZE;
let virt_page = common::physmap(
phys_page,
map_size,
common::Prot::RW,
common::MemoryType::default(),
)
.expect("failed to map physical region") as usize;
PhysicalMapping {
physical_start: phys,
virtual_start: NonNull::new((virt_page + offset) as *mut T).unwrap(),
region_length: size,
mapped_length: map_size,
handler: self.clone(),
}
}
fn unmap_physical_region<T>(region: &PhysicalMapping<Self, T>) {
let virt_page = region.virtual_start.addr().get() & PAGE_MASK;
unsafe {
libredox::call::munmap(virt_page as *mut (), region.mapped_length)
.expect("failed to unmap physical region")
}
}
fn read_u8(&self, address: usize) -> u8 {
log::trace!("read u8 {:X}", address);
if let Ok(mut page_cache) = self.page_cache.lock() {
if let Ok(value) = page_cache.read_from_phys::<u8>(address) {
return value;
}
}
log::error!("failed to read u8 {:#x}", address);
0
}
fn read_u16(&self, address: usize) -> u16 {
log::trace!("read u16 {:X}", address);
if let Ok(mut page_cache) = self.page_cache.lock() {
if let Ok(value) = page_cache.read_from_phys::<u16>(address) {
return value;
}
}
log::error!("failed to read u16 {:#x}", address);
0
}
fn read_u32(&self, address: usize) -> u32 {
log::trace!("read u32 {:X}", address);
if let Ok(mut page_cache) = self.page_cache.lock() {
if let Ok(value) = page_cache.read_from_phys::<u32>(address) {
return value;
}
}
log::error!("failed to read u32 {:#x}", address);
0
}
fn read_u64(&self, address: usize) -> u64 {
log::trace!("read u64 {:X}", address);
if let Ok(mut page_cache) = self.page_cache.lock() {
if let Ok(value) = page_cache.read_from_phys::<u64>(address) {
return value;
}
}
log::error!("failed to read u64 {:#x}", address);
0
}
fn write_u8(&self, address: usize, value: u8) {
log::trace!("write u8 {:X} = {:X}", address, value);
if let Ok(mut page_cache) = self.page_cache.lock() {
if page_cache.write_to_phys::<u8>(address, value).is_ok() {
return;
}
}
log::error!("failed to write u8 {:#x}", address);
}
fn write_u16(&self, address: usize, value: u16) {
log::trace!("write u16 {:X} = {:X}", address, value);
if let Ok(mut page_cache) = self.page_cache.lock() {
if page_cache.write_to_phys::<u16>(address, value).is_ok() {
return;
}
}
log::error!("failed to write u16 {:#x}", address);
}
fn write_u32(&self, address: usize, value: u32) {
log::trace!("write u32 {:X} = {:X}", address, value);
if let Ok(mut page_cache) = self.page_cache.lock() {
if page_cache.write_to_phys::<u32>(address, value).is_ok() {
return;
}
}
log::error!("failed to write u32 {:#x}", address);
}
fn write_u64(&self, address: usize, value: u64) {
log::trace!("write u64 {:X} = {:X}", address, value);
if let Ok(mut page_cache) = self.page_cache.lock() {
if page_cache.write_to_phys::<u64>(address, value).is_ok() {
return;
}
}
log::error!("failed to write u64 {:#x}", address);
}
// Pio must be enabled via syscall::iopl
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn read_io_u8(&self, port: u16) -> u8 {
Pio::<u8>::new(port).read()
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn read_io_u16(&self, port: u16) -> u16 {
Pio::<u16>::new(port).read()
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn read_io_u32(&self, port: u16) -> u32 {
Pio::<u32>::new(port).read()
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn write_io_u8(&self, port: u16, value: u8) {
Pio::<u8>::new(port).write(value)
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn write_io_u16(&self, port: u16, value: u16) {
Pio::<u16>::new(port).write(value)
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn write_io_u32(&self, port: u16, value: u32) {
Pio::<u32>::new(port).write(value)
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
fn read_io_u8(&self, port: u16) -> u8 {
log::error!("cannot read u8 from port 0x{port:04X}");
0
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
fn read_io_u16(&self, port: u16) -> u16 {
log::error!("cannot read u16 from port 0x{port:04X}");
0
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
fn read_io_u32(&self, port: u16) -> u32 {
log::error!("cannot read u32 from port 0x{port:04X}");
0
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
fn write_io_u8(&self, port: u16, value: u8) {
log::error!("cannot write 0x{value:02X} to port 0x{port:04X}");
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
fn write_io_u16(&self, port: u16, value: u16) {
log::error!("cannot write 0x{value:04X} to port 0x{port:04X}");
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
fn write_io_u32(&self, port: u16, value: u32) {
log::error!("cannot write 0x{value:08X} to port 0x{port:04X}");
}
fn read_pci_u8(&self, addr: PciAddress, off: u16) -> u8 {
let mut value = [0u8];
self.read_pci(addr, off, &mut value);
value[0]
}
fn read_pci_u16(&self, addr: PciAddress, off: u16) -> u16 {
let mut value = [0u8; 2];
self.read_pci(addr, off, &mut value);
u16::from_le_bytes(value)
}
fn read_pci_u32(&self, addr: PciAddress, off: u16) -> u32 {
let mut value = [0u8; 4];
self.read_pci(addr, off, &mut value);
u32::from_le_bytes(value)
}
fn write_pci_u8(&self, addr: PciAddress, off: u16, value: u8) {
self.write_pci(addr, off, &[value]);
}
fn write_pci_u16(&self, addr: PciAddress, off: u16, value: u16) {
self.write_pci(addr, off, &value.to_le_bytes());
}
fn write_pci_u32(&self, addr: PciAddress, off: u16, value: u32) {
self.write_pci(addr, off, &value.to_le_bytes());
}
fn nanos_since_boot(&self) -> u64 {
let ts = libredox::call::clock_gettime(libredox::flag::CLOCK_MONOTONIC)
.expect("failed to get time");
(ts.tv_sec as u64) * 1_000_000_000 + (ts.tv_nsec as u64)
}
fn stall(&self, microseconds: u64) {
let start = std::time::Instant::now();
while start.elapsed().as_micros() < microseconds.into() {
std::hint::spin_loop();
}
}
fn sleep(&self, milliseconds: u64) {
std::thread::sleep(std::time::Duration::from_millis(milliseconds));
}
fn create_mutex(&self) -> Handle {
log::debug!("TODO: Handler::create_mutex");
Handle(0)
}
fn acquire(&self, mutex: Handle, timeout: u16) -> Result<(), AmlError> {
log::debug!("TODO: Handler::acquire");
Ok(())
}
fn release(&self, mutex: Handle) {
log::debug!("TODO: Handler::release");
}
}
+256
View File
@@ -0,0 +1,256 @@
use std::time::Duration;
use acpi::aml::{
op_region::{OpRegion, RegionHandler, RegionSpace},
AmlError,
};
use common::{
io::{Io, Pio},
timeout::Timeout,
};
use log::*;
const EC_DATA: u16 = 0x62;
const EC_SC: u16 = 0x66;
const OBF: u8 = 1 << 0; // output full / data ready for host <> empty
const IBF: u8 = 1 << 1; // input full / data ready for ec <> empty
const CMD: u8 = 1 << 3; // byte in data reg is command <> data
const BURST: u8 = 1 << 4; // burst mode <> normal mode
const SCI_EVT: u8 = 1 << 5; // sci event pending <> not
const SMI_EVT: u8 = 1 << 6; // smi event pending <> not
const RD_EC: u8 = 0x80;
const WR_EC: u8 = 0x81;
const BE_EC: u8 = 0x82;
const BD_EC: u8 = 0x83;
const QR_EC: u8 = 0x84;
const BURST_ACK: u8 = 0x90;
pub const DEFAULT_EC_TIMEOUT: Duration = Duration::from_millis(10);
#[repr(transparent)]
pub struct ScBits(u8);
#[allow(dead_code)]
impl ScBits {
const fn obf(&self) -> bool {
(self.0 & OBF) != 0
}
const fn ibf(&self) -> bool {
(self.0 & IBF) != 0
}
const fn cmd(&self) -> bool {
(self.0 & CMD) != 0
}
const fn burst(&self) -> bool {
(self.0 & BURST) != 0
}
const fn sci_evt(&self) -> bool {
(self.0 & SCI_EVT) != 0
}
const fn smi_evt(&self) -> bool {
(self.0 & SMI_EVT) != 0
}
}
#[derive(Debug, Clone, Copy)]
pub struct Ec {
sc: u16,
data: u16,
timeout: Duration,
}
impl Ec {
pub fn new() -> Self {
Self {
sc: EC_SC,
data: EC_DATA,
timeout: DEFAULT_EC_TIMEOUT,
}
}
#[allow(dead_code)]
pub fn with_address(sc: u16, data: u16, timeout: Duration) -> Self {
Self { sc, data, timeout }
}
#[inline]
fn read_reg_sc(&self) -> ScBits {
ScBits(Pio::<u8>::new(self.sc).read())
}
#[inline]
fn read_reg_data(&self) -> u8 {
Pio::<u8>::new(self.data).read()
}
#[inline]
fn write_reg_sc(&self, value: u8) {
Pio::<u8>::new(self.sc).write(value);
}
#[inline]
fn write_reg_data(&self, value: u8) {
Pio::<u8>::new(self.data).write(value);
}
#[inline]
fn wait_for_write_ready(&self) -> Option<()> {
let timeout = Timeout::new(self.timeout);
loop {
if !self.read_reg_sc().ibf() {
return Some(());
}
timeout.run().ok()?;
}
}
#[inline]
fn wait_for_read_ready(&self) -> Option<()> {
let timeout = Timeout::new(self.timeout);
loop {
if self.read_reg_sc().obf() {
return Some(());
}
timeout.run().ok()?;
}
}
//https://uefi.org/htmlspecs/ACPI_Spec_6_4_html/12_ACPI_Embedded_Controller_Interface_Specification/embedded-controller-command-set.html
pub fn read(&self, address: u8) -> Option<u8> {
trace!("ec read addr: {:x}", address);
self.wait_for_write_ready()?;
self.write_reg_sc(RD_EC);
self.wait_for_write_ready()?;
self.write_reg_data(address);
self.wait_for_read_ready()?;
let val = self.read_reg_data();
trace!("got: {:x}", val);
Some(val)
}
pub fn write(&self, address: u8, value: u8) -> Option<()> {
trace!("ec write addr: {:x}, with: {:x}", address, value);
self.wait_for_write_ready()?;
self.write_reg_sc(WR_EC);
self.wait_for_write_ready()?;
self.write_reg_data(address);
self.wait_for_write_ready()?;
self.write_reg_data(value);
trace!("done");
Some(())
}
// disabled if not met
// First Access - 400 microseconds
// Subsequent Accesses - 50 microseconds each
// Total Burst Time - 1 millisecond
//Accesses should be responded to within 50 microseconds.
#[allow(dead_code)]
fn enable_burst(&self) -> bool {
trace!("ec burst enable");
self.wait_for_write_ready();
self.write_reg_sc(BE_EC);
self.wait_for_read_ready();
let res = self.read_reg_data() == BURST_ACK;
trace!("success: {}", res);
res
}
#[allow(dead_code)]
fn disable_burst(&self) {
trace!("ec burst disable");
self.wait_for_write_ready();
self.write_reg_sc(BD_EC);
trace!("done");
}
//OSPM driver sends this command when the SCI_EVT flag in the EC_SC register is set.
#[allow(dead_code)]
fn queue_query(&mut self) -> u8 {
trace!("ec query");
self.wait_for_write_ready();
self.write_reg_sc(QR_EC);
self.wait_for_read_ready();
let val = self.read_reg_data();
trace!("got: {}", val);
val
}
}
impl RegionHandler for Ec {
fn read_u8(
&self,
region: &acpi::aml::op_region::OpRegion,
offset: usize,
) -> Result<u8, acpi::aml::AmlError> {
assert_eq!(region.space, RegionSpace::EmbeddedControl);
self.read(offset as u8).ok_or(AmlError::MutexAcquireTimeout) // TODO proper error type
}
fn write_u8(
&self,
region: &OpRegion,
offset: usize,
value: u8,
) -> Result<(), acpi::aml::AmlError> {
assert_eq!(region.space, RegionSpace::EmbeddedControl);
self.write(offset as u8, value)
.ok_or(AmlError::MutexAcquireTimeout) // TODO proper error type
}
fn read_u16(&self, _region: &OpRegion, _offset: usize) -> Result<u16, acpi::aml::AmlError> {
warn!("Got u16 EC read from AML!");
Err(acpi::aml::AmlError::NoHandlerForRegionAccess(
RegionSpace::EmbeddedControl,
)) // TODO proper error type
}
fn read_u32(&self, _region: &OpRegion, _offset: usize) -> Result<u32, acpi::aml::AmlError> {
warn!("Got u32 EC read from AML!");
Err(acpi::aml::AmlError::NoHandlerForRegionAccess(
RegionSpace::EmbeddedControl,
)) // TODO proper error type
}
fn read_u64(&self, _region: &OpRegion, _offset: usize) -> Result<u64, acpi::aml::AmlError> {
warn!("Got u64 EC read from AML!");
Err(acpi::aml::AmlError::NoHandlerForRegionAccess(
RegionSpace::EmbeddedControl,
)) // TODO proper error type
}
fn write_u16(
&self,
_region: &OpRegion,
_offset: usize,
_value: u16,
) -> Result<(), acpi::aml::AmlError> {
warn!("Got u16 EC write from AML!");
Err(acpi::aml::AmlError::NoHandlerForRegionAccess(
RegionSpace::EmbeddedControl,
)) // TODO proper error type
}
fn write_u32(
&self,
_region: &OpRegion,
_offset: usize,
_value: u32,
) -> Result<(), acpi::aml::AmlError> {
warn!("Got u32 EC write from AML!");
Err(acpi::aml::AmlError::NoHandlerForRegionAccess(
RegionSpace::EmbeddedControl,
)) // TODO proper error type
}
fn write_u64(
&self,
_region: &OpRegion,
_offset: usize,
_value: u64,
) -> Result<(), acpi::aml::AmlError> {
warn!("Got u64 EC write from AML!");
Err(acpi::aml::AmlError::NoHandlerForRegionAccess(
RegionSpace::EmbeddedControl,
)) // TODO proper error type
}
}
+143
View File
@@ -0,0 +1,143 @@
use std::convert::TryFrom;
use std::fs::File;
use std::mem;
use std::ops::ControlFlow;
use std::os::unix::io::AsRawFd;
use std::sync::Arc;
use ::acpi::aml::op_region::{RegionHandler, RegionSpace};
use event::{EventFlags, RawEventQueue};
use redox_scheme::{scheme::register_sync_scheme, Socket};
use scheme_utils::Blocking;
mod acpi;
mod aml_physmem;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod ec;
mod scheme;
fn daemon(daemon: daemon::Daemon) -> ! {
common::setup_logging(
"misc",
"acpi",
"acpid",
common::output_level(),
common::file_level(),
);
log::info!("acpid start");
let rxsdt_raw_data: Arc<[u8]> = std::fs::read("/scheme/kernel.acpi/rxsdt")
.expect("acpid: failed to read `/scheme/kernel.acpi/rxsdt`")
.into();
if rxsdt_raw_data.is_empty() {
log::info!("System doesn't use ACPI");
daemon.ready();
std::process::exit(0);
}
let sdt = self::acpi::Sdt::new(rxsdt_raw_data).expect("acpid: failed to parse [RX]SDT");
let mut thirty_two_bit;
let mut sixty_four_bit;
let physaddrs_iter = match &sdt.signature {
b"RSDT" => {
thirty_two_bit = sdt
.data()
.chunks(mem::size_of::<u32>())
// TODO: With const generics, the compiler has some way of doing this for static sizes.
.map(|chunk| <[u8; mem::size_of::<u32>()]>::try_from(chunk).unwrap())
.map(|chunk| u32::from_le_bytes(chunk))
.map(u64::from);
&mut thirty_two_bit as &mut dyn Iterator<Item = u64>
}
b"XSDT" => {
sixty_four_bit = sdt
.data()
.chunks(mem::size_of::<u64>())
.map(|chunk| <[u8; mem::size_of::<u64>()]>::try_from(chunk).unwrap())
.map(|chunk| u64::from_le_bytes(chunk));
&mut sixty_four_bit as &mut dyn Iterator<Item = u64>
}
_ => panic!("acpid: expected [RX]SDT from kernel to be either of those"),
};
let region_handlers: Vec<(RegionSpace, Box<dyn RegionHandler + 'static>)> = vec![
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
(RegionSpace::EmbeddedControl, Box::new(ec::Ec::new())),
];
let acpi_context = self::acpi::AcpiContext::init(physaddrs_iter, region_handlers);
// TODO: I/O permission bitmap?
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
common::acquire_port_io_rights().expect("acpid: failed to set I/O privilege level to Ring 3");
let shutdown_pipe = File::open("/scheme/kernel.acpi/kstop")
.expect("acpid: failed to open `/scheme/kernel.acpi/kstop`");
let mut event_queue = RawEventQueue::new().expect("acpid: failed to create event queue");
let socket = Socket::nonblock().expect("acpid: failed to create disk scheme");
let mut scheme = self::scheme::AcpiScheme::new(&acpi_context, &socket);
let mut handler = Blocking::new(&socket, 16);
event_queue
.subscribe(shutdown_pipe.as_raw_fd() as usize, 0, EventFlags::READ)
.expect("acpid: failed to register shutdown pipe for event queue");
event_queue
.subscribe(socket.inner().raw(), 1, EventFlags::READ)
.expect("acpid: failed to register scheme socket for event queue");
register_sync_scheme(&socket, "acpi", &mut scheme)
.expect("acpid: failed to register acpi scheme to namespace");
daemon.ready();
libredox::call::setrens(0, 0).expect("acpid: failed to enter null namespace");
let mut mounted = true;
while mounted {
let Some(event) = event_queue
.next()
.transpose()
.expect("acpid: failed to read event file")
else {
break;
};
if event.fd == socket.inner().raw() {
loop {
match handler
.process_requests_nonblocking(&mut scheme)
.expect("acpid: failed to process requests")
{
ControlFlow::Continue(()) => {}
ControlFlow::Break(()) => break,
}
}
} else if event.fd == shutdown_pipe.as_raw_fd() as usize {
log::info!("Received shutdown request from kernel.");
mounted = false;
} else {
log::debug!("Received request to unknown fd: {}", event.fd);
continue;
}
}
drop(shutdown_pipe);
drop(event_queue);
acpi_context.set_global_s_state(5);
unreachable!("System should have shut down before this is entered");
}
fn main() {
common::init();
daemon::Daemon::new(daemon);
}
@@ -0,0 +1,485 @@
use acpi::aml::namespace::AmlName;
use amlserde::aml_serde_name::to_aml_format;
use amlserde::AmlSerdeValue;
use core::str;
use libredox::Fd;
use parking_lot::RwLockReadGuard;
use redox_scheme::scheme::SchemeSync;
use redox_scheme::{CallerCtx, OpenResult, SendFdRequest, Socket};
use ron::de::SpannedError;
use scheme_utils::HandleMap;
use std::convert::{TryFrom, TryInto};
use std::str::FromStr;
use syscall::dirent::{DirEntry, DirentBuf, DirentKind};
use syscall::schemev2::NewFdFlags;
use syscall::FobtainFdFlags;
use syscall::data::Stat;
use syscall::error::{Error, Result};
use syscall::error::{EACCES, EBADF, EBADFD, EINVAL, EIO, EISDIR, ENOENT, ENOTDIR};
use syscall::flag::{MODE_DIR, MODE_FILE};
use syscall::flag::{O_ACCMODE, O_DIRECTORY, O_RDONLY, O_STAT, O_SYMLINK};
use syscall::{EOVERFLOW, EPERM};
use crate::acpi::{AcpiContext, AmlSymbols, SdtSignature};
pub struct AcpiScheme<'acpi, 'sock> {
ctx: &'acpi AcpiContext,
handles: HandleMap<Handle<'acpi>>,
pci_fd: Option<Fd>,
socket: &'sock Socket,
}
struct Handle<'a> {
kind: HandleKind<'a>,
stat: bool,
allowed_to_eval: bool,
}
enum HandleKind<'a> {
TopLevel,
Tables,
Table(SdtSignature),
Symbols(RwLockReadGuard<'a, AmlSymbols>),
Symbol { name: String, description: String },
SchemeRoot,
RegisterPci,
}
impl HandleKind<'_> {
fn is_dir(&self) -> bool {
match self {
Self::TopLevel => true,
Self::Tables => true,
Self::Table(_) => false,
Self::Symbols(_) => true,
Self::Symbol { .. } => false,
Self::SchemeRoot => false,
Self::RegisterPci => false,
}
}
fn len(&self, acpi_ctx: &AcpiContext) -> Result<usize> {
Ok(match self {
// Files
Self::Table(signature) => acpi_ctx
.sdt_from_signature(signature)
.ok_or(Error::new(EBADFD))?
.length(),
Self::Symbol { description, .. } => description.len(),
// Directories
Self::TopLevel | Self::Symbols(_) | Self::Tables => 0,
Self::SchemeRoot | Self::RegisterPci => return Err(Error::new(EBADF)),
})
}
}
impl<'acpi, 'sock> AcpiScheme<'acpi, 'sock> {
pub fn new(ctx: &'acpi AcpiContext, socket: &'sock Socket) -> Self {
Self {
ctx,
handles: HandleMap::new(),
pci_fd: None,
socket,
}
}
}
fn parse_hex_digit(hex: u8) -> Option<u8> {
let hex = hex.to_ascii_lowercase();
if hex >= b'a' && hex <= b'f' {
Some(hex - b'a' + 10)
} else if hex >= b'0' && hex <= b'9' {
Some(hex - b'0')
} else {
None
}
}
fn parse_hex_2digit(hex: &[u8]) -> Option<u8> {
parse_hex_digit(hex[0])
.and_then(|most_significant| Some((most_significant << 4) | parse_hex_digit(hex[1])?))
}
fn parse_oem_id(hex: [u8; 12]) -> Option<[u8; 6]> {
Some([
parse_hex_2digit(&hex[0..2])?,
parse_hex_2digit(&hex[2..4])?,
parse_hex_2digit(&hex[4..6])?,
parse_hex_2digit(&hex[6..8])?,
parse_hex_2digit(&hex[8..10])?,
parse_hex_2digit(&hex[10..12])?,
])
}
fn parse_oem_table_id(hex: [u8; 16]) -> Option<[u8; 8]> {
Some([
parse_hex_2digit(&hex[0..2])?,
parse_hex_2digit(&hex[2..4])?,
parse_hex_2digit(&hex[4..6])?,
parse_hex_2digit(&hex[6..8])?,
parse_hex_2digit(&hex[8..10])?,
parse_hex_2digit(&hex[10..12])?,
parse_hex_2digit(&hex[12..14])?,
parse_hex_2digit(&hex[14..16])?,
])
}
fn parse_table(table: &[u8]) -> Option<SdtSignature> {
let signature_part = table.get(..4)?;
let first_hyphen = table.get(4)?;
let oem_id_part = table.get(5..17)?;
let second_hyphen = table.get(17)?;
let oem_table_part = table.get(18..34)?;
if *first_hyphen != b'-' {
return None;
}
if *second_hyphen != b'-' {
return None;
}
if table.len() > 34 {
return None;
}
Some(SdtSignature {
signature: <[u8; 4]>::try_from(signature_part)
.expect("expected 4-byte slice to be convertible into [u8; 4]"),
oem_id: {
let hex = <[u8; 12]>::try_from(oem_id_part)
.expect("expected 12-byte slice to be convertible into [u8; 12]");
parse_oem_id(hex)?
},
oem_table_id: {
let hex = <[u8; 16]>::try_from(oem_table_part)
.expect("expected 16-byte slice to be convertible into [u8; 16]");
parse_oem_table_id(hex)?
},
})
}
impl SchemeSync for AcpiScheme<'_, '_> {
fn scheme_root(&mut self) -> Result<usize> {
Ok(self.handles.insert(Handle {
stat: false,
kind: HandleKind::SchemeRoot,
allowed_to_eval: false,
}))
}
fn openat(
&mut self,
dirfd: usize,
path: &str,
flags: usize,
_fcntl_flags: u32,
ctx: &CallerCtx,
) -> Result<OpenResult> {
let handle = self.handles.get(dirfd)?;
let path = path.trim_start_matches('/');
let flag_stat = flags & O_STAT == O_STAT;
let flag_dir = flags & O_DIRECTORY == O_DIRECTORY;
let kind = match handle.kind {
HandleKind::SchemeRoot => {
// TODO: arrayvec
let components = {
let mut v = arrayvec::ArrayVec::<&str, 3>::new();
let it = path.split('/');
for component in it.take(3) {
v.push(component);
}
v
};
match &*components {
[""] => HandleKind::TopLevel,
["register_pci"] => HandleKind::RegisterPci,
["tables"] => HandleKind::Tables,
["tables", table] => {
let signature = parse_table(table.as_bytes()).ok_or(Error::new(ENOENT))?;
HandleKind::Table(signature)
}
["symbols"] => {
if let Ok(aml_symbols) = self.ctx.aml_symbols(self.pci_fd.as_ref()) {
HandleKind::Symbols(aml_symbols)
} else {
return Err(Error::new(EIO));
}
}
["symbols", symbol] => {
if let Some(description) = self.ctx.aml_lookup(symbol) {
HandleKind::Symbol {
name: (*symbol).to_owned(),
description,
}
} else {
return Err(Error::new(ENOENT));
}
}
_ => return Err(Error::new(ENOENT)),
}
}
HandleKind::Symbols(ref aml_symbols) => {
if let Some(description) = aml_symbols.lookup(path) {
HandleKind::Symbol {
name: (*path).to_owned(),
description,
}
} else {
return Err(Error::new(ENOENT));
}
}
_ => return Err(Error::new(EACCES)),
};
if kind.is_dir() && !flag_dir && !flag_stat {
return Err(Error::new(EISDIR));
} else if !kind.is_dir() && flag_dir && !flag_stat {
return Err(Error::new(ENOTDIR));
}
let allowed_to_eval = if flags & O_ACCMODE == O_RDONLY || flag_stat {
false
} else if ctx.uid == 0 {
true
} else {
return Err(Error::new(EINVAL));
};
if flags & O_SYMLINK == O_SYMLINK && !flag_stat {
return Err(Error::new(EINVAL));
}
let fd = self.handles.insert(Handle {
stat: flag_stat,
kind,
allowed_to_eval,
});
Ok(OpenResult::ThisScheme {
number: fd,
flags: NewFdFlags::POSITIONED,
})
}
fn fstat(&mut self, id: usize, stat: &mut Stat, _ctx: &CallerCtx) -> Result<()> {
let handle = self.handles.get(id)?;
stat.st_size = handle
.kind
.len(self.ctx)?
.try_into()
.unwrap_or(u64::max_value());
if handle.kind.is_dir() {
stat.st_mode = MODE_DIR;
} else {
stat.st_mode = MODE_FILE;
}
Ok(())
}
fn read(
&mut self,
id: usize,
buf: &mut [u8],
offset: u64,
_fcntl: u32,
_ctx: &CallerCtx,
) -> Result<usize> {
let offset: usize = offset.try_into().map_err(|_| Error::new(EINVAL))?;
let handle = self.handles.get_mut(id)?;
if handle.stat {
return Err(Error::new(EBADF));
}
let src_buf = match &handle.kind {
HandleKind::Table(ref signature) => self
.ctx
.sdt_from_signature(signature)
.ok_or(Error::new(EBADFD))?
.as_slice(),
HandleKind::Symbol { description, .. } => description.as_bytes(),
_ => return Err(Error::new(EINVAL)),
};
let offset = std::cmp::min(src_buf.len(), offset);
let src_buf = &src_buf[offset..];
let to_copy = std::cmp::min(src_buf.len(), buf.len());
buf[..to_copy].copy_from_slice(&src_buf[..to_copy]);
Ok(to_copy)
}
fn getdents<'buf>(
&mut self,
id: usize,
mut buf: DirentBuf<&'buf mut [u8]>,
opaque_offset: u64,
) -> Result<DirentBuf<&'buf mut [u8]>> {
let handle = self.handles.get_mut(id)?;
match &handle.kind {
HandleKind::TopLevel => {
const TOPLEVEL_ENTRIES: &[&str] = &["tables", "symbols"];
for (idx, name) in TOPLEVEL_ENTRIES
.iter()
.enumerate()
.skip(opaque_offset as usize)
{
buf.entry(DirEntry {
inode: 0,
next_opaque_id: idx as u64 + 1,
name,
kind: DirentKind::Directory,
})?;
}
}
HandleKind::Symbols(aml_symbols) => {
for (idx, (symbol_name, _value)) in aml_symbols
.symbols_cache()
.iter()
.enumerate()
.skip(opaque_offset as usize)
{
buf.entry(DirEntry {
inode: 0,
next_opaque_id: idx as u64 + 1,
name: symbol_name.as_str(),
kind: DirentKind::Regular,
})?;
}
}
HandleKind::Tables => {
for (idx, table) in self
.ctx
.tables()
.iter()
.enumerate()
.skip(opaque_offset as usize)
{
let utf8_or_eio = |bytes| str::from_utf8(bytes).map_err(|_| Error::new(EIO));
let mut name = String::new();
name.push_str(utf8_or_eio(&table.signature[..])?);
name.push('-');
for byte in table.oem_id.iter() {
std::fmt::write(&mut name, format_args!("{:>02X}", byte)).unwrap();
}
name.push('-');
for byte in table.oem_table_id.iter() {
std::fmt::write(&mut name, format_args!("{:>02X}", byte)).unwrap();
}
buf.entry(DirEntry {
inode: 0,
next_opaque_id: idx as u64 + 1,
name: &name,
kind: DirentKind::Regular,
})?;
}
}
_ => return Err(Error::new(EIO)),
}
Ok(buf)
}
fn call(
&mut self,
id: usize,
payload: &mut [u8],
_metadata: &[u64],
_ctx: &CallerCtx,
) -> Result<usize> {
let handle = self.handles.get_mut(id)?;
if !handle.allowed_to_eval {
return Err(Error::new(EPERM));
}
let Ok(args): Result<Vec<AmlSerdeValue>, SpannedError> = ron::de::from_bytes(payload)
else {
return Err(Error::new(EINVAL));
};
let HandleKind::Symbol { name, .. } = &handle.kind else {
return Err(Error::new(EBADF));
};
let Ok(aml_name) = AmlName::from_str(&to_aml_format(name)) else {
log::error!("Failed to convert symbol name: \"{name}\" to aml name!");
return Err(Error::new(EBADF));
};
let Ok(result) = self.ctx.aml_eval(aml_name, args) else {
return Err(Error::new(EINVAL));
};
let Ok(serialized_result) = ron::ser::to_string(&result) else {
log::error!("Failed to serialize aml result!");
return Err(Error::new(EINVAL));
};
let byte_result = serialized_result.as_bytes();
let result_len = byte_result.len();
if result_len > payload.len() {
return Err(Error::new(EOVERFLOW));
}
payload[..result_len].copy_from_slice(byte_result);
Ok(result_len)
}
fn on_sendfd(&mut self, sendfd_request: &SendFdRequest) -> Result<usize> {
let id = sendfd_request.id();
let num_fds = sendfd_request.num_fds();
let handle = self.handles.get(id)?;
if !matches!(handle.kind, HandleKind::RegisterPci) {
return Err(Error::new(EACCES));
}
if num_fds == 0 {
return Ok(0);
}
if num_fds > 1 {
return Err(Error::new(EINVAL));
}
let mut new_fd = usize::MAX;
if let Err(e) = sendfd_request.obtain_fd(
&self.socket,
FobtainFdFlags::UPPER_TBL,
std::slice::from_mut(&mut new_fd),
) {
return Err(e);
}
let new_fd = libredox::Fd::new(new_fd);
if self.pci_fd.is_some() {
return Err(Error::new(EINVAL));
} else {
self.pci_fd = Some(new_fd);
}
Ok(num_fds)
}
fn on_close(&mut self, id: usize) {
self.handles.remove(id);
}
}