milestone: desktop path Phases 1-5

Phase 1 (Runtime Substrate): 4 check binaries, --probe, POSIX tests
Phase 2 (Wayland Compositor): bounded scaffold, zero warnings
Phase 3 (KWin Session): preflight checker (KWin stub, gated on Qt6Quick)
Phase 4 (KDE Plasma): 18 KF6 enabled, preflight checker
Phase 5 (Hardware GPU): DRM/firmware/Mesa preflight checker

Build: zero warnings, all scripts syntax-clean. Oracle-verified.
This commit is contained in:
2026-04-29 09:54:06 +01:00
parent b23714f542
commit 8acc73d774
508 changed files with 76526 additions and 396 deletions
@@ -0,0 +1 @@
/target
+33
View File
@@ -0,0 +1,33 @@
[package]
name = "pcid"
description = "PCI and PCI Express driver"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "pcid"
path = "src/main.rs"
[lib]
name = "pcid_interface"
path = "src/lib.rs"
[dependencies]
bincode = "1.2"
fdt.workspace = true
libc.workspace = true
log.workspace = true
pci_types = "0.10.1"
pico-args = { workspace = true, features = ["combined-flags"] }
plain.workspace = true
redox-scheme.workspace = true
scheme-utils = { path = "../../scheme-utils" }
redox_syscall.workspace = true
serde.workspace = true
common = { path = "../common" }
daemon = { path = "../../daemon" }
libredox.workspace = true
[lints]
workspace = true
@@ -0,0 +1,96 @@
use std::cell::Cell;
use std::convert::TryFrom;
use std::sync::Mutex;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use common::io::{Io as _, Pio};
use log::info;
use pci_types::{ConfigRegionAccess, PciAddress};
pub(crate) struct Pci {
lock: Mutex<()>,
}
impl Pci {
pub(crate) fn new() -> Self {
Self {
lock: Mutex::new(()),
}
}
fn set_iopl() {
// The IO privilege level is per-thread, so we need to do the initialization on every thread.
thread_local! {
static IOPL_ONCE: Cell<bool> = Cell::new(false);
}
IOPL_ONCE.with(|iopl_once| {
if !iopl_once.replace(true) {
// make sure that pcid is not granted io port permission unless pcie memory-mapped
// configuration space is not available.
info!(
"PCI: couldn't find or access PCIe extended configuration, \
and thus falling back to PCI 3.0 io ports"
);
common::acquire_port_io_rights().expect("pcid: failed to get IO port rights");
}
});
}
fn address(address: PciAddress, offset: u8) -> u32 {
assert_eq!(
address.segment(),
0,
"usage of multiple segments requires PCIe extended configuration"
);
assert_eq!(offset & 0xFC, offset, "pci offset is not aligned");
0x80000000
| (u32::from(address.bus()) << 16)
| (u32::from(address.device()) << 11)
| (u32::from(address.function()) << 8)
| u32::from(offset)
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl ConfigRegionAccess for Pci {
unsafe fn read(&self, address: PciAddress, offset: u16) -> u32 {
let _guard = self.lock.lock().unwrap();
Self::set_iopl();
let offset =
u8::try_from(offset).expect("offset too large for PCI 3.0 configuration space");
let address = Self::address(address, offset);
Pio::<u32>::new(0xCF8).write(address);
Pio::<u32>::new(0xCFC).read()
}
unsafe fn write(&self, address: PciAddress, offset: u16, value: u32) {
let _guard = self.lock.lock().unwrap();
Self::set_iopl();
let offset =
u8::try_from(offset).expect("offset too large for PCI 3.0 configuration space");
let address = Self::address(address, offset);
Pio::<u32>::new(0xCF8).write(address);
Pio::<u32>::new(0xCFC).write(value);
}
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
impl ConfigRegionAccess for Pci {
unsafe fn read(&self, addr: PciAddress, offset: u16) -> u32 {
let _guard = self.lock.lock().unwrap();
todo!("Pci::CfgAccess::read on this architecture")
}
unsafe fn write(&self, addr: PciAddress, offset: u16, value: u32) {
let _guard = self.lock.lock().unwrap();
todo!("Pci::CfgAccess::write on this architecture")
}
}
@@ -0,0 +1,372 @@
use std::sync::Mutex;
use std::{fs, io, mem};
use common::{MemoryType, PhysBorrowed, Prot};
use fdt::Fdt;
use pci_types::{ConfigRegionAccess, PciAddress};
use fallback::Pci;
mod fallback;
pub struct InterruptMap {
pub addr: [u32; 3],
pub interrupt: u32,
pub parent_phandle: u32,
pub parent_interrupt: [u32; 3],
pub parent_interrupt_cells: usize,
}
// https://elinux.org/Device_Tree_Usage has a lot of useful information
fn locate_ecam_dtb<T>(
f: impl FnOnce(PcieAllocs<'_>, Vec<InterruptMap>, [u32; 4]) -> io::Result<T>,
) -> io::Result<T> {
let dtb = fs::read("/scheme/kernel.dtb")?;
let dt = Fdt::new(&dtb).map_err(|err| {
io::Error::new(
io::ErrorKind::InvalidData,
format!("invalid device tree: {err:?}"),
)
})?;
let node = dt
.find_compatible(&["pci-host-ecam-generic"])
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::NotFound,
"couldn't find pci-host-ecam-generic node in device tree",
)
})?;
let address = node.reg().unwrap().next().unwrap().starting_address as u64;
let bus_range = node.property("bus-range").unwrap();
assert_eq!(bus_range.value.len(), 8);
let start_bus = u32::from_be_bytes(<[u8; 4]>::try_from(&bus_range.value[0..4]).unwrap());
let end_bus = u32::from_be_bytes(<[u8; 4]>::try_from(&bus_range.value[4..8]).unwrap());
// address-cells == 3, size-cells == 2, interrupt-cells == 1
let mut interrupt_map_data = node
.property("interrupt-map")
.unwrap()
.value
.chunks_exact(4)
.map(|x| u32::from_be_bytes(<[u8; 4]>::try_from(x).unwrap()));
let mut interrupt_map = Vec::<InterruptMap>::new();
while let Ok([addr1, addr2, addr3, int1, phandle]) = interrupt_map_data.next_chunk::<5>() {
let parent = dt.find_phandle(phandle).unwrap();
let parent_address_cells = u32::from_be_bytes(
parent.property("#address-cells").unwrap().value[..4]
.try_into()
.unwrap(),
);
match parent_address_cells {
0 => {}
1 => {
assert_eq!(interrupt_map_data.next().unwrap(), 0);
}
2 => {
assert_eq!(interrupt_map_data.next_chunk::<2>().unwrap(), [0, 0]);
}
3 => {
assert_eq!(interrupt_map_data.next_chunk::<3>().unwrap(), [0, 0, 0]);
}
_ => break,
};
let parent_interrupt_cells = parent.interrupt_cells().unwrap();
let parent_interrupt = match parent_interrupt_cells {
1 if let Some(a) = interrupt_map_data.next() => [a, 0, 0],
2 if let Ok([a, b]) = interrupt_map_data.next_chunk::<2>() => [a, b, 0],
3 if let Ok([a, b, c]) = interrupt_map_data.next_chunk::<3>() => [a, b, c],
_ => break,
};
interrupt_map.push(InterruptMap {
addr: [addr1, addr2, addr3],
interrupt: int1,
parent_phandle: phandle,
parent_interrupt,
parent_interrupt_cells,
});
}
let interrupt_map_mask = if let Some(interrupt_mask_node) = node.property("interrupt-map-mask")
{
let mut cells = interrupt_mask_node
.value
.chunks_exact(4)
.map(|x| u32::from_be_bytes(<[u8; 4]>::try_from(x).unwrap()));
cells.next_chunk::<4>().unwrap().to_owned()
} else {
[u32::MAX, u32::MAX, u32::MAX, u32::MAX]
};
f(
PcieAllocs(&[PcieAlloc {
base_addr: address,
seg_group_num: 0,
start_bus: start_bus.try_into().unwrap(),
end_bus: end_bus.try_into().unwrap(),
_rsvd: [0; 4],
}]),
interrupt_map,
interrupt_map_mask,
)
}
pub const MCFG_NAME: [u8; 4] = *b"MCFG";
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct Mcfg {
// base sdt fields
name: [u8; 4],
length: u32,
revision: u8,
checksum: u8,
oem_id: [u8; 6],
oem_table_id: [u8; 8],
oem_revision: u32,
creator_id: [u8; 4],
creator_revision: u32,
_rsvd: [u8; 8],
}
unsafe impl plain::Plain for Mcfg {}
/// The "Memory Mapped Enhanced Configuration Space Base Address Allocation Structure" (yes, it's
/// called that).
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct PcieAlloc {
pub base_addr: u64,
pub seg_group_num: u16,
pub start_bus: u8,
pub end_bus: u8,
_rsvd: [u8; 4],
}
unsafe impl plain::Plain for PcieAlloc {}
#[derive(Debug)]
struct PcieAllocs<'a>(&'a [PcieAlloc]);
impl Mcfg {
fn with<T>(
f: impl FnOnce(PcieAllocs<'_>, Vec<InterruptMap>, [u32; 4]) -> io::Result<T>,
) -> io::Result<T> {
let table_dir = fs::read_dir("/scheme/acpi/tables")?;
// TODO: validate/print MCFG?
for table_direntry in table_dir {
let table_path = table_direntry?.path();
// Every directory entry has to have a filename unless
// the filesystem (or in this case acpid) misbehaves.
// If it misbehaves we have worse problems than pcid
// crashing. `as_encoded_bytes()` returns some superset
// of ASCII, so directly comparing it with an ASCII name
// is fine.
let table_filename = table_path.file_name().unwrap().as_encoded_bytes();
if table_filename.get(0..4) == Some(&MCFG_NAME) {
let bytes = fs::read(table_path)?.into_boxed_slice();
match Mcfg::parse(&*bytes) {
Some((_mcfg, allocs)) => {
log::debug!("MCFG ALLOCS {:?}", allocs.0);
return f(allocs, Vec::new(), [u32::MAX, u32::MAX, u32::MAX, u32::MAX]);
}
None => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"couldn't find mcfg table",
));
}
}
}
}
Err(io::Error::new(
io::ErrorKind::NotFound,
"couldn't find mcfg table",
))
}
fn parse<'a>(bytes: &'a [u8]) -> Option<(&'a Mcfg, PcieAllocs<'a>)> {
if bytes.len() < mem::size_of::<Mcfg>() {
return None;
}
let (header_bytes, allocs_bytes) = bytes.split_at(mem::size_of::<Mcfg>());
let mcfg =
plain::from_bytes::<Mcfg>(header_bytes).expect("packed -> align 1, checked size");
if mcfg.length as usize != bytes.len() {
log::warn!("MCFG {mcfg:?} length mismatch, expected {}", bytes.len());
return None;
}
// TODO: Allow invalid bytes not divisible by PcieAlloc?
let allocs_len =
allocs_bytes.len() / mem::size_of::<PcieAlloc>() * mem::size_of::<PcieAlloc>();
let allocs = plain::slice_from_bytes::<PcieAlloc>(&allocs_bytes[..allocs_len])
.expect("packed -> align 1, checked size");
Some((mcfg, PcieAllocs(allocs)))
}
}
pub struct Pcie {
lock: Mutex<()>,
allocs: Vec<Alloc>,
pub interrupt_map: Vec<InterruptMap>,
pub interrupt_map_mask: [u32; 4],
fallback: Pci,
}
struct Alloc {
seg: u16,
start_bus: u8,
end_bus: u8,
mem: PhysBorrowed,
}
unsafe impl Send for Pcie {}
unsafe impl Sync for Pcie {}
const BYTES_PER_BUS: usize = 1 << 20;
impl Pcie {
pub fn new() -> Self {
match Mcfg::with(Self::from_allocs) {
Ok(pcie) => pcie,
Err(acpi_error) => match locate_ecam_dtb(Self::from_allocs) {
Ok(pcie) => pcie,
Err(fdt_error) => {
log::warn!(
"Couldn't retrieve PCIe info, perhaps the kernel is not compiled with \
acpi or device tree support? Using the PCI 3.0 configuration space \
instead. ACPI error: {:?} FDT error: {:?}",
acpi_error,
fdt_error
);
Self {
lock: Mutex::new(()),
allocs: Vec::new(),
fallback: Pci::new(),
interrupt_map: Vec::new(),
interrupt_map_mask: [u32::MAX, u32::MAX, u32::MAX, u32::MAX],
}
}
},
}
}
fn from_allocs(
allocs: PcieAllocs<'_>,
interrupt_map: Vec<InterruptMap>,
interrupt_map_mask: [u32; 4],
) -> Result<Pcie, io::Error> {
let mut allocs = allocs
.0
.iter()
.filter_map(|desc| {
Some(Alloc {
seg: desc.seg_group_num,
start_bus: desc.start_bus,
end_bus: desc.end_bus,
mem: PhysBorrowed::map(
desc.base_addr.try_into().ok()?,
BYTES_PER_BUS
* (usize::from(desc.end_bus) - usize::from(desc.start_bus) + 1),
Prot::RW,
MemoryType::Uncacheable,
)
.inspect_err(|err| {
log::error!(
"failed to map seg {} bus {}..={}: {}",
{ desc.seg_group_num },
{ desc.start_bus },
{ desc.end_bus },
err
)
})
.ok()?,
})
})
.collect::<Vec<_>>();
allocs.sort_by_key(|alloc| (alloc.seg, alloc.start_bus));
Ok(Self {
lock: Mutex::new(()),
allocs,
interrupt_map,
interrupt_map_mask,
fallback: Pci::new(),
})
}
fn bus_addr(&self, seg: u16, bus: u8) -> Option<*mut u32> {
let alloc = match self
.allocs
.binary_search_by_key(&(seg, bus), |alloc| (alloc.seg, alloc.start_bus))
{
Ok(present_idx) => &self.allocs[present_idx],
Err(0) => return None,
Err(above_idx) => {
let below_alloc = &self.allocs[above_idx - 1];
if bus > below_alloc.end_bus {
return None;
}
below_alloc
}
};
let bus_off = bus - alloc.start_bus;
Some(unsafe {
alloc
.mem
.as_ptr()
.cast::<u8>()
.add(usize::from(bus_off) * BYTES_PER_BUS)
.cast::<u32>()
})
}
fn bus_addr_offset_in_dwords(address: PciAddress, offset: u16) -> usize {
assert_eq!(offset & 0xFFFC, offset, "pcie offset not dword-aligned");
assert_eq!(offset & 0x0FFF, offset, "pcie offset larger than 4095");
(((address.device() as usize) << 15)
| ((address.function() as usize) << 12)
| (offset as usize))
>> 2
}
// TODO: A safer interface, using e.g. a VolatileCell or Volatile<'a>. The PhysBorrowed wrapper
// can possibly deref to or provide a Volatile<T>.
fn mmio_addr(&self, address: PciAddress, offset: u16) -> Option<*mut u32> {
assert_eq!(
address.segment(),
0,
"multiple segments not yet implemented"
);
let bus_addr = self.bus_addr(address.segment(), address.bus())?;
Some(unsafe { bus_addr.add(Self::bus_addr_offset_in_dwords(address, offset)) })
}
}
impl ConfigRegionAccess for Pcie {
unsafe fn read(&self, address: PciAddress, offset: u16) -> u32 {
let _guard = self.lock.lock().unwrap();
match self.mmio_addr(address, offset) {
Some(addr) => addr.read_volatile(),
None => self.fallback.read(address, offset),
}
}
unsafe fn write(&self, address: PciAddress, offset: u16, value: u32) {
let _guard = self.lock.lock().unwrap();
match self.mmio_addr(address, offset) {
Some(addr) => addr.write_volatile(value),
None => self.fallback.write(address, offset, value),
}
}
}
@@ -0,0 +1,284 @@
use pci_types::capability::{MultipleMessageSupport, PciCapability};
use pci_types::{ConfigRegionAccess, EndpointHeader};
use pcid_interface::PciFunction;
use crate::cfg_access::Pcie;
pub struct DriverHandler<'a> {
func: PciFunction,
endpoint_header: &'a mut EndpointHeader,
capabilities: &'a mut [PciCapability],
pcie: &'a Pcie,
}
impl<'a> DriverHandler<'a> {
pub fn new(
func: PciFunction,
endpoint_header: &'a mut EndpointHeader,
capabilities: &'a mut [PciCapability],
pcie: &'a Pcie,
) -> Self {
DriverHandler {
func,
endpoint_header,
capabilities,
pcie,
}
}
pub fn respond(
&mut self,
request: pcid_interface::PcidClientRequest,
) -> pcid_interface::PcidClientResponse {
use pcid_interface::*;
#[forbid(non_exhaustive_omitted_patterns)]
match request {
PcidClientRequest::EnableDevice => {
self.func.legacy_interrupt_line = crate::enable_function(
&self.pcie,
&mut self.endpoint_header,
&mut self.capabilities,
);
PcidClientResponse::EnabledDevice
}
PcidClientRequest::RequestVendorCapabilities => PcidClientResponse::VendorCapabilities(
self.capabilities
.iter()
.filter_map(|capability| match capability {
PciCapability::Vendor(addr) => unsafe {
Some(VendorSpecificCapability::parse(*addr, self.pcie))
},
_ => None,
})
.collect::<Vec<_>>(),
),
PcidClientRequest::RequestConfig => {
PcidClientResponse::Config(SubdriverArguments { func: self.func })
}
PcidClientRequest::RequestFeatures => PcidClientResponse::AllFeatures(
self.capabilities
.iter()
.filter_map(|capability| match capability {
PciCapability::Msi(_) => Some(PciFeature::Msi),
PciCapability::MsiX(_) => Some(PciFeature::MsiX),
_ => None,
})
.collect(),
),
PcidClientRequest::EnableFeature(feature) => {
match feature {
PciFeature::Msi => {
if let Some(msix_capability) =
self.capabilities
.iter_mut()
.find_map(|capability| match capability {
PciCapability::MsiX(cap) => Some(cap),
_ => None,
})
{
// If MSI-X is supported disable it before enabling MSI as they can't be
// active at the same time.
msix_capability.set_enabled(false, self.pcie);
}
let capability = match self.capabilities.iter_mut().find_map(|capability| {
match capability {
PciCapability::Msi(cap) => Some(cap),
_ => None,
}
}) {
Some(capability) => capability,
None => {
return PcidClientResponse::Error(
PcidServerResponseError::NonexistentFeature(feature),
)
}
};
capability.set_enabled(true, self.pcie);
PcidClientResponse::FeatureEnabled(feature)
}
PciFeature::MsiX => {
if let Some(msi_capability) =
self.capabilities
.iter_mut()
.find_map(|capability| match capability {
PciCapability::Msi(cap) => Some(cap),
_ => None,
})
{
// If MSI is supported disable it before enabling MSI-X as they can't be
// active at the same time.
msi_capability.set_enabled(false, self.pcie);
}
let capability = match self.capabilities.iter_mut().find_map(|capability| {
match capability {
PciCapability::MsiX(cap) => Some(cap),
_ => None,
}
}) {
Some(capability) => capability,
None => {
return PcidClientResponse::Error(
PcidServerResponseError::NonexistentFeature(feature),
)
}
};
capability.set_enabled(true, self.pcie);
PcidClientResponse::FeatureEnabled(feature)
}
}
}
PcidClientRequest::FeatureInfo(feature) => PcidClientResponse::FeatureInfo(
feature,
match feature {
PciFeature::Msi => {
if let Some(info) =
self.capabilities
.iter()
.find_map(|capability| match capability {
PciCapability::Msi(cap) => Some(cap),
_ => None,
})
{
PciFeatureInfo::Msi(msi::MsiInfo {
log2_multiple_message_capable: info.multiple_message_capable()
as u8,
is_64bit: info.is_64bit(),
has_per_vector_masking: info.has_per_vector_masking(),
})
} else {
return PcidClientResponse::Error(
PcidServerResponseError::NonexistentFeature(feature),
);
}
}
PciFeature::MsiX => {
if let Some(info) =
self.capabilities
.iter()
.find_map(|capability| match capability {
PciCapability::MsiX(cap) => Some(cap),
_ => None,
})
{
PciFeatureInfo::MsiX(msi::MsixInfo {
table_bar: info.table_bar(),
table_offset: info.table_offset(),
table_size: info.table_size(),
pba_bar: info.pba_bar(),
pba_offset: info.pba_offset(),
})
} else {
return PcidClientResponse::Error(
PcidServerResponseError::NonexistentFeature(feature),
);
}
}
},
),
PcidClientRequest::SetFeatureInfo(info_to_set) => match info_to_set {
SetFeatureInfo::Msi(info_to_set) => {
if let Some(info) =
self.capabilities
.iter_mut()
.find_map(|capability| match capability {
PciCapability::Msi(cap) => Some(cap),
_ => None,
})
{
if let Some(mme) = info_to_set.multi_message_enable {
if (info.multiple_message_capable() as u8) < mme {
return PcidClientResponse::Error(
PcidServerResponseError::InvalidBitPattern,
);
}
info.set_multiple_message_enable(
match mme {
0 => MultipleMessageSupport::Int1,
1 => MultipleMessageSupport::Int2,
2 => MultipleMessageSupport::Int4,
3 => MultipleMessageSupport::Int8,
4 => MultipleMessageSupport::Int16,
5 => MultipleMessageSupport::Int32,
_ => {
return PcidClientResponse::Error(
PcidServerResponseError::InvalidBitPattern,
)
}
},
self.pcie,
);
}
if let Some(message_addr_and_data) = info_to_set.message_address_and_data {
let message_addr = message_addr_and_data.addr;
if message_addr & 0b11 != 0 {
return PcidClientResponse::Error(
PcidServerResponseError::InvalidBitPattern,
);
}
if message_addr_and_data.data
& ((1 << info.multiple_message_enable(self.pcie) as u8) - 1)
!= 0
{
return PcidClientResponse::Error(
PcidServerResponseError::InvalidBitPattern,
);
}
info.set_message_info(
message_addr,
message_addr_and_data
.data
.try_into()
.expect("pcid: MSI message data too big"),
self.pcie,
);
}
if let Some(mask_bits) = info_to_set.mask_bits {
info.set_message_mask(mask_bits, self.pcie);
}
PcidClientResponse::SetFeatureInfo(PciFeature::Msi)
} else {
return PcidClientResponse::Error(
PcidServerResponseError::NonexistentFeature(PciFeature::Msi),
);
}
}
SetFeatureInfo::MsiX { function_mask } => {
if let Some(info) =
self.capabilities
.iter_mut()
.find_map(|capability| match capability {
PciCapability::MsiX(cap) => Some(cap),
_ => None,
})
{
if let Some(mask) = function_mask {
info.set_function_mask(mask, self.pcie);
}
PcidClientResponse::SetFeatureInfo(PciFeature::MsiX)
} else {
return PcidClientResponse::Error(
PcidServerResponseError::NonexistentFeature(PciFeature::MsiX),
);
}
}
_ => unreachable!(),
},
PcidClientRequest::ReadConfig(offset) => {
let value = unsafe { self.pcie.read(self.func.addr, offset) };
return PcidClientResponse::ReadConfig(value);
}
PcidClientRequest::WriteConfig(offset, value) => {
unsafe {
self.pcie.write(self.func.addr, offset, value);
}
return PcidClientResponse::WriteConfig;
}
_ => unreachable!(),
}
}
}
@@ -0,0 +1,55 @@
use std::convert::TryInto;
use serde::{Deserialize, Serialize};
// This type is used instead of [pci_types::Bar] in the driver interface as the
// latter can't be serialized and is missing the convenience functions of [PciBar].
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum PciBar {
None,
Memory32 { addr: u32, size: u32 },
Memory64 { addr: u64, size: u64 },
Port(u16),
}
impl PciBar {
pub fn display(&self) -> String {
match self {
PciBar::None => format!("<none>"),
PciBar::Memory32 { addr, .. } => format!("{addr:08X}"),
PciBar::Memory64 { addr, .. } => format!("{addr:016X}"),
PciBar::Port(port) => format!("P{port:04X}"),
}
}
pub fn is_none(&self) -> bool {
match self {
&PciBar::None => true,
_ => false,
}
}
pub fn expect_port(&self) -> u16 {
match *self {
PciBar::Port(port) => port,
PciBar::Memory32 { .. } | PciBar::Memory64 { .. } => {
panic!("expected port BAR, found memory BAR");
}
PciBar::None => panic!("expected BAR to exist"),
}
}
pub fn expect_mem(&self) -> (usize, usize) {
match *self {
PciBar::Memory32 { addr, size } => (addr as usize, size as usize),
PciBar::Memory64 { addr, size } => (
addr.try_into()
.expect("conversion from 64bit BAR to usize failed"),
size.try_into()
.expect("conversion from 64bit BAR size to usize failed"),
),
PciBar::Port(_) => panic!("expected memory BAR, found port BAR"),
PciBar::None => panic!("expected BAR to exist"),
}
}
}
@@ -0,0 +1,38 @@
use pci_types::capability::PciCapabilityAddress;
use pci_types::ConfigRegionAccess;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct VendorSpecificCapability {
pub data: Vec<u8>,
}
impl VendorSpecificCapability {
pub unsafe fn parse(addr: PciCapabilityAddress, access: &dyn ConfigRegionAccess) -> Self {
let dword = access.read(addr.address, addr.offset);
let length = ((dword >> 16) & 0xFF) as u16;
// let next = (dword >> 8) & 0xFF;
// log::trace!(
// "Vendor specific offset: {:#02x} next: {next:#02x} cap len: {length:#02x}",
// addr.offset
// );
let data = if length > 0 {
assert!(
length > 3 && length % 4 == 0,
"invalid range length: {}",
length
);
let mut raw_data = {
(addr.offset..addr.offset + length)
.step_by(4)
.flat_map(|offset| access.read(addr.address, offset).to_le_bytes())
.collect::<Vec<u8>>()
};
raw_data.drain(3..).collect()
} else {
log::warn!("Vendor specific capability is invalid");
Vec::new()
};
VendorSpecificCapability { data }
}
}
@@ -0,0 +1,88 @@
use std::collections::BTreeMap;
use std::ops::Range;
use serde::Deserialize;
use crate::driver_interface::FullDeviceId;
#[derive(Clone, Debug, Default, Deserialize)]
pub struct Config {
pub drivers: Vec<DriverConfig>,
}
#[derive(Clone, Debug, Default, Deserialize)]
pub struct DriverConfig {
pub name: Option<String>,
pub class: Option<u8>,
pub subclass: Option<u8>,
pub interface: Option<u8>,
pub ids: Option<BTreeMap<String, Vec<u16>>>,
pub vendor: Option<u16>,
pub device: Option<u16>,
pub device_id_range: Option<Range<u16>>,
pub command: Vec<String>,
}
impl DriverConfig {
pub fn match_function(&self, id: &FullDeviceId) -> bool {
if let Some(class) = self.class {
if class != id.class {
return false;
}
}
if let Some(subclass) = self.subclass {
if subclass != id.subclass {
return false;
}
}
if let Some(interface) = self.interface {
if interface != id.interface {
return false;
}
}
if let Some(ref ids) = self.ids {
let mut device_found = false;
for (vendor, devices) in ids {
let vendor_without_prefix = vendor.trim_start_matches("0x");
let vendor = i64::from_str_radix(vendor_without_prefix, 16).unwrap() as u16;
if vendor != id.vendor_id {
continue;
}
for device in devices {
if *device == id.device_id {
device_found = true;
break;
}
}
}
if !device_found {
return false;
}
} else {
if let Some(vendor) = self.vendor {
if vendor != id.vendor_id {
return false;
}
}
if let Some(device) = self.device {
if device != id.device_id {
return false;
}
}
}
if let Some(ref device_id_range) = self.device_id_range {
if id.device_id < device_id_range.start || device_id_range.end <= id.device_id {
return false;
}
}
true
}
}
@@ -0,0 +1,48 @@
use pci_types::device_type::DeviceType;
use serde::{Deserialize, Serialize};
/// All identifying information of a PCI function.
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub struct FullDeviceId {
pub vendor_id: u16,
pub device_id: u16,
pub class: u8,
pub subclass: u8,
pub interface: u8,
pub revision: u8,
}
impl FullDeviceId {
pub fn display(&self) -> String {
let mut string = format!(
"{:>04X}:{:>04X} {:>02X}.{:>02X}.{:>02X}.{:>02X} {:?}",
self.vendor_id,
self.device_id,
self.class,
self.subclass,
self.interface,
self.revision,
self.class,
);
let device_type = DeviceType::from((self.class, self.subclass));
match device_type {
DeviceType::LegacyVgaCompatible => string.push_str(" VGA CTL"),
DeviceType::IdeController => string.push_str(" IDE"),
DeviceType::SataController => match self.interface {
0 => string.push_str(" SATA VND"),
1 => string.push_str(" SATA AHCI"),
_ => (),
},
DeviceType::UsbController => match self.interface {
0x00 => string.push_str(" UHCI"),
0x10 => string.push_str(" OHCI"),
0x20 => string.push_str(" EHCI"),
0x30 => string.push_str(" XHCI"),
_ => (),
},
DeviceType::NvmeController => string.push_str(" NVME"),
_ => (),
}
string
}
}
@@ -0,0 +1,334 @@
//! IRQ helpers.
//!
//! This module allows easy handling of the `/scheme/irq` scheme, and allocating interrupt vectors
//! for use by INTx#, MSI, or MSI-X.
use std::convert::TryFrom;
use std::fs::{self, File};
use std::io::{self, prelude::*};
use std::num::NonZeroU8;
use crate::driver_interface::msi::{MsiAddrAndData, MsixTableEntry};
/// Read the local APIC ID of the bootstrap processor.
pub fn read_bsp_apic_id() -> io::Result<usize> {
let mut buffer = [0u8; 8];
let mut file = File::open("/scheme/irq/bsp")?;
let bytes_read = file.read(&mut buffer)?;
(if bytes_read == 8 {
usize::try_from(u64::from_le_bytes(buffer))
} else if bytes_read == 4 {
usize::try_from(u32::from_le_bytes([
buffer[0], buffer[1], buffer[2], buffer[3],
]))
} else {
panic!(
"`/scheme/irq` scheme responded with {} bytes, expected {}",
bytes_read,
std::mem::size_of::<usize>()
);
})
.or(Err(io::Error::new(
io::ErrorKind::InvalidData,
"bad BSP int size",
)))
}
// TODO: Perhaps read the MADT instead?
/// Obtains an interator over all of the visible CPU ids, for use in IRQ allocation and MSI
/// capability structs or MSI-X tables.
pub fn cpu_ids() -> io::Result<impl Iterator<Item = io::Result<usize>> + 'static> {
Ok(
fs::read_dir("/scheme/irq")?.filter_map(|entry| -> Option<io::Result<_>> {
match entry {
Ok(e) => {
let path = e.path();
let file_name = path.file_name()?.to_str()?;
// the file name should be in the format `cpu-<CPU ID>`
if !file_name.starts_with("cpu-") {
return None;
}
u8::from_str_radix(&file_name[4..], 16)
.map(usize::from)
.map(Ok)
.ok()
}
Err(e) => Some(Err(e)),
}
}),
)
}
/// Allocate multiple interrupt vectors, from the IDT of the specified processor, returning the
/// start vector and the IRQ handles.
///
/// The alignment is a requirement for the allocation range. For example, with an alignment of 8,
/// only ranges that begin with a multiple of eight are accepted. The IRQ handles returned will
/// always correspond to the subsequent IRQ numbers beginning the first value in the return tuple.
///
/// This function is not actually guaranteed to allocate all of the IRQs specified in `count`,
/// since another process might already have requested one vector in the range. The caller must
/// check that the returned vector have the same length as `count`. In the future this function may
/// perhaps lock the entire directory to prevent this from happening, or maybe find the smallest free
/// range with the minimum alignment, to allow other drivers to obtain their necessary IRQs.
///
/// Note that this count/alignment restriction is only mandatory for MSI; MSI-X allows for
/// individually allocated vectors that might be spread out, even on multiple CPUs. Thus, multiple
/// invocations with alignment 1 and count 1 are totally acceptable, although allocating in bulk
/// minimizes the initialization overhead.
pub fn allocate_aligned_interrupt_vectors(
cpu_id: usize,
alignment: NonZeroU8,
count: u8,
) -> io::Result<Option<(u8, Vec<File>)>> {
let cpu_id = u8::try_from(cpu_id).expect("usize cpu ids not implemented yet");
if count == 0 {
return Ok(None);
}
let available_irqs = fs::read_dir(format!("/scheme/irq/cpu-{:02x}", cpu_id))?;
let mut available_irq_numbers = available_irqs.filter_map(|entry| -> Option<io::Result<_>> {
let entry = match entry {
Ok(e) => e,
Err(err) => return Some(Err(err)),
};
let path = entry.path();
let file_name = match path.file_name() {
Some(f) => f,
None => return None,
};
let path_str = match file_name.to_str() {
Some(s) => s,
None => return None,
};
match path_str.parse::<u8>() {
Ok(p) => Some(Ok(p)),
Err(_) => None,
}
});
// TODO: fcntl F_SETLK on `/scheme/irq/`?
let mut handles = Vec::with_capacity(usize::from(count));
let mut index = 0;
let mut first = None;
while let Some(number) = available_irq_numbers.next() {
let number = number?;
// Skip until a suitable alignment is found.
if number % u8::from(alignment) != 0 {
continue;
}
let first = *first.get_or_insert(number);
let irq_number = first + index;
// From the point where the range is aligned, we can start to advance until `count` IRQs
// have been allocated.
if index >= count {
break;
}
// if found, reserve the irq
let irq_handle =
match File::create(format!("/scheme/irq/cpu-{:02x}/{}", cpu_id, irq_number)) {
Ok(handle) => handle,
// return early if the entire range couldn't be allocated
Err(err) if err.kind() == io::ErrorKind::NotFound => break,
Err(err) => return Err(err),
};
handles.push(irq_handle);
index += 1;
}
if handles.is_empty() {
return Ok(None);
}
let first = match first {
Some(f) => f,
None => return Ok(None),
};
Ok(Some((first + 32, handles)))
}
/// Allocate at most `count` interrupt vectors, which can start at any offset. Unless MSI is used
/// and an entire aligned range of vectors is needed, this function should be used.
pub fn allocate_interrupt_vectors(cpu_id: usize, count: u8) -> io::Result<Option<(u8, Vec<File>)>> {
allocate_aligned_interrupt_vectors(cpu_id, NonZeroU8::new(1).unwrap(), count)
}
/// Allocate a single interrupt vector, returning both the vector number (starting from 32 up to
/// 254), and its IRQ handle which is then reserved. Returns Ok(None) if allocation fails due to
/// no available IRQs.
pub fn allocate_single_interrupt_vector(cpu_id: usize) -> io::Result<Option<(u8, File)>> {
let (base, mut files) = match allocate_interrupt_vectors(cpu_id, 1) {
Ok(Some((base, files))) => (base, files),
Ok(None) => return Ok(None),
Err(err) => return Err(err),
};
assert_eq!(files.len(), 1);
Ok(Some((base, files.pop().unwrap())))
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn allocate_single_interrupt_vector_for_msi(cpu_id: usize) -> (MsiAddrAndData, File) {
use crate::driver_interface::msi::x86 as x86_msix;
// FIXME for cpu_id >255 we need to use the IOMMU to use IRQ remapping
let lapic_id = u8::try_from(cpu_id).expect("CPU id couldn't fit inside u8");
let rh = false;
let dm = false;
let addr = x86_msix::message_address(lapic_id, rh, dm);
let (vector, interrupt_handle) = allocate_single_interrupt_vector(cpu_id)
.expect("failed to allocate interrupt vector")
.expect("no interrupt vectors left");
let msg_data = x86_msix::message_data_edge_triggered(x86_msix::DeliveryMode::Fixed, vector);
(
MsiAddrAndData {
addr,
data: msg_data,
},
interrupt_handle,
)
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn allocate_first_msi_interrupt_on_bsp(
pcid_handle: &mut crate::driver_interface::PciFunctionHandle,
) -> File {
use crate::driver_interface::{MsiSetFeatureInfo, PciFeature, SetFeatureInfo};
// TODO: Allow allocation of up to 32 vectors.
let destination_id = read_bsp_apic_id().expect("failed to read BSP apic id");
let (msg_addr_and_data, interrupt_handle) =
allocate_single_interrupt_vector_for_msi(destination_id);
let set_feature_info = MsiSetFeatureInfo {
multi_message_enable: Some(0),
message_address_and_data: Some(msg_addr_and_data),
mask_bits: None,
};
pcid_handle.set_feature_info(SetFeatureInfo::Msi(set_feature_info));
pcid_handle.enable_feature(PciFeature::Msi);
log::debug!("Enabled MSI");
interrupt_handle
}
pub struct InterruptVector {
irq_handle: File,
vector: u16,
kind: InterruptVectorKind,
}
enum InterruptVectorKind {
Legacy,
Msi,
MsiX { table_entry: *mut MsixTableEntry },
}
impl InterruptVector {
pub fn irq_handle(&self) -> &File {
&self.irq_handle
}
pub fn vector(&self) -> u16 {
self.vector
}
pub fn set_masked_if_fast(&mut self, masked: bool) -> bool {
match self.kind {
InterruptVectorKind::Legacy | InterruptVectorKind::Msi => false,
InterruptVectorKind::MsiX { table_entry } => {
unsafe { (*table_entry).set_masked(masked) };
true
}
}
}
}
/// Get the most optimal supported interrupt mechanism: either (in the order of preference):
/// MSI-X, MSI, and INTx# pin. Returns both runtime interrupt structures (MSI/MSI-X capability
/// structures), and the handles to the interrupts.
// FIXME allow allocating multiple interrupt vectors
// FIXME move MSI-X IRQ allocation to pcid
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn pci_allocate_interrupt_vector(
pcid_handle: &mut crate::driver_interface::PciFunctionHandle,
driver: &str,
) -> InterruptVector {
let features = pcid_handle.fetch_all_features();
let has_msi = features.iter().any(|feature| feature.is_msi());
let has_msix = features.iter().any(|feature| feature.is_msix());
if has_msix {
let msix_info = match pcid_handle.feature_info(super::PciFeature::MsiX) {
super::PciFeatureInfo::MsiX(msix) => msix,
_ => unreachable!(),
};
let mut info = unsafe { msix_info.map_and_mask_all(pcid_handle) };
pcid_handle.enable_feature(crate::driver_interface::PciFeature::MsiX);
let entry = info.table_entry_pointer(0);
let bsp_cpu_id = read_bsp_apic_id()
.unwrap_or_else(|err| panic!("{driver}: failed to read BSP APIC ID: {err}"));
let (msg_addr_and_data, irq_handle) = allocate_single_interrupt_vector_for_msi(bsp_cpu_id);
entry.write_addr_and_data(msg_addr_and_data);
entry.unmask();
InterruptVector {
irq_handle,
vector: 0,
kind: InterruptVectorKind::MsiX { table_entry: entry },
}
} else if has_msi {
InterruptVector {
irq_handle: allocate_first_msi_interrupt_on_bsp(pcid_handle),
vector: 0,
kind: InterruptVectorKind::Msi,
}
} else if let Some(irq) = pcid_handle.config().func.legacy_interrupt_line {
// INTx# pin based interrupts.
InterruptVector {
irq_handle: irq.irq_handle(driver),
vector: 0,
kind: InterruptVectorKind::Legacy,
}
} else {
panic!("{driver}: no interrupts supported at all")
}
}
// FIXME support MSI on non-x86 systems
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
pub fn pci_allocate_interrupt_vector(
pcid_handle: &mut crate::driver_interface::PciFunctionHandle,
driver: &str,
) -> InterruptVector {
if let Some(irq) = pcid_handle.config().func.legacy_interrupt_line {
// INTx# pin based interrupts.
InterruptVector {
irq_handle: irq.irq_handle(driver),
vector: 0,
kind: InterruptVectorKind::Legacy,
}
} else {
panic!("{driver}: no interrupts supported at all")
}
}
@@ -0,0 +1,492 @@
use std::fs::File;
use std::io::prelude::*;
use std::os::fd::{FromRawFd, IntoRawFd, RawFd};
use std::path::Path;
use std::ptr::NonNull;
use std::{env, io};
use std::{fmt, process};
use daemon::Daemon;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
pub use bar::PciBar;
pub use cap::VendorSpecificCapability;
pub use id::FullDeviceId;
pub use pci_types::PciAddress;
mod bar;
pub mod cap;
pub mod config;
mod id;
pub mod irq_helpers;
pub mod msi;
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct LegacyInterruptLine {
#[doc(hidden)]
pub irq: u8,
pub phandled: Option<(u32, [u32; 3], usize)>,
}
impl LegacyInterruptLine {
/// Get an IRQ handle for this interrupt line.
pub fn irq_handle(self, driver: &str) -> File {
if let Some((phandle, addr, cells)) = self.phandled {
let path = match cells {
1 => format!("/scheme/irq/phandle-{}/{}", phandle, addr[0]),
2 => format!("/scheme/irq/phandle-{}/{},{}", phandle, addr[0], addr[1]),
3 => format!(
"/scheme/irq/phandle-{}/{},{},{}",
phandle, addr[0], addr[1], addr[2]
),
_ => panic!(
"unexpected number of IRQ description cells for phandle {phandle}: {cells}"
),
};
File::create(path)
.unwrap_or_else(|err| panic!("{driver}: failed to open IRQ file: {err}"))
} else {
File::open(format!("/scheme/irq/{}", self.irq))
.unwrap_or_else(|err| panic!("{driver}: failed to open IRQ file: {err}"))
}
}
}
impl fmt::Display for LegacyInterruptLine {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some((phandle, addr, cells)) = self.phandled {
match cells {
1 => write!(f, "(phandle {}, {:?})", phandle, addr[0]),
2 => write!(f, "(phandle {}, {:?},{:?})", phandle, addr[0], addr[1]),
3 => write!(f, "(phandle {}, {:?})", phandle, addr),
_ => panic!(
"unexpected number of IRQ description cells for phandle {phandle}: {cells}"
),
}
} else {
write!(f, "{}", self.irq)
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(remote = "PciAddress")]
struct PciAddressDef {
#[serde(getter = "PciAddress::segment")]
segment: u16,
#[serde(getter = "PciAddress::bus")]
bus: u8,
#[serde(getter = "PciAddress::device")]
device: u8,
#[serde(getter = "PciAddress::function")]
function: u8,
}
impl From<PciAddressDef> for PciAddress {
fn from(value: PciAddressDef) -> Self {
PciAddress::new(value.segment, value.bus, value.device, value.function)
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct PciRom {
pub addr: u32,
pub size: u32,
pub enabled: bool,
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct PciFunction {
/// Address of the PCI function.
#[serde(with = "PciAddressDef")]
pub addr: PciAddress,
/// PCI Base Address Registers
pub bars: [PciBar; 6],
/// PCI Option ROM
pub rom: Option<PciRom>,
/// Legacy IRQ line: It's the responsibility of pcid to make sure that it be mapped in either
/// the I/O APIC or the 8259 PIC, so that the subdriver can map the interrupt vector directly.
/// The vector to map is always this field, plus 32.
/// If INTx# interrupts aren't supported at all this is `None`.
pub legacy_interrupt_line: Option<LegacyInterruptLine>,
/// All identifying information of the PCI function.
pub full_device_id: FullDeviceId,
}
impl PciFunction {
pub fn name(&self) -> String {
// FIXME stop replacing : with - once it is a valid character in scheme names
format!("pci-{}", self.addr).replace(':', "-")
}
pub fn display(&self) -> String {
let mut string = self.name();
let mut first = true;
for (i, bar) in self.bars.iter().enumerate() {
if !bar.is_none() {
if first {
first = false;
string.push_str(" on:");
}
string.push_str(&format!(" {i}={}", bar.display()));
}
}
if let Some(irq) = self.legacy_interrupt_line {
string.push_str(&format!(" IRQ: {irq}"));
}
string
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SubdriverArguments {
pub func: PciFunction,
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub enum FeatureStatus {
Enabled,
Disabled,
}
impl FeatureStatus {
pub fn enabled(enabled: bool) -> Self {
if enabled {
Self::Enabled
} else {
Self::Disabled
}
}
pub fn is_enabled(&self) -> bool {
if let &Self::Enabled = self {
true
} else {
false
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub enum PciFeature {
Msi,
MsiX,
}
impl PciFeature {
pub fn is_msi(self) -> bool {
if let Self::Msi = self {
true
} else {
false
}
}
pub fn is_msix(self) -> bool {
if let Self::MsiX = self {
true
} else {
false
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub enum PciFeatureInfo {
Msi(msi::MsiInfo),
MsiX(msi::MsixInfo),
}
// TODO: Remove these "features" and just go strait to the actual thing.
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct MsiSetFeatureInfo {
/// The Multi Message Enable field of the Message Control in the MSI Capability Structure,
/// is the log2 of the interrupt vectors, minus one. Can only be 0b000..=0b101.
pub multi_message_enable: Option<u8>,
/// The system-specific message address and data.
///
/// The message address contains things like the CPU that will be targeted, at least on
/// x86_64. The message data contains the actual interrupt vector (lower 8 bits) and
/// the kind of interrupt, at least on x86_64.
pub message_address_and_data: Option<msi::MsiAddrAndData>,
/// A bitmap of the vectors that are masked. This field is not guaranteed (and not likely,
/// at least according to the feature flags I got from QEMU), to exist.
pub mask_bits: Option<u32>,
}
/// Some flags that might be set simultaneously, but separately.
#[derive(Debug, Serialize, Deserialize)]
#[non_exhaustive]
pub enum SetFeatureInfo {
Msi(MsiSetFeatureInfo),
MsiX {
/// Masks the entire function, and all of its vectors.
function_mask: Option<bool>,
},
}
#[derive(Debug, Serialize, Deserialize)]
#[non_exhaustive]
pub enum PcidClientRequest {
EnableDevice,
RequestConfig,
RequestFeatures,
RequestVendorCapabilities,
EnableFeature(PciFeature),
FeatureInfo(PciFeature),
SetFeatureInfo(SetFeatureInfo),
ReadConfig(u16),
WriteConfig(u16, u32),
}
#[derive(Debug, Serialize, Deserialize)]
#[non_exhaustive]
pub enum PcidServerResponseError {
NonexistentFeature(PciFeature),
InvalidBitPattern,
}
#[derive(Debug, Serialize, Deserialize)]
#[non_exhaustive]
pub enum PcidClientResponse {
EnabledDevice,
Config(SubdriverArguments),
AllFeatures(Vec<PciFeature>),
VendorCapabilities(Vec<VendorSpecificCapability>),
FeatureEnabled(PciFeature),
FeatureStatus(PciFeature, FeatureStatus),
Error(PcidServerResponseError),
FeatureInfo(PciFeature, PciFeatureInfo),
SetFeatureInfo(PciFeature),
ReadConfig(u32),
WriteConfig,
}
pub struct MappedBar {
pub ptr: NonNull<u8>,
pub bar_size: usize,
}
/// A handle from a `pcid` client (e.g. `ahcid`) to `pcid`.
pub struct PciFunctionHandle {
channel: File,
config: SubdriverArguments,
mapped_bars: [Option<MappedBar>; 6],
}
fn send<T: Serialize>(w: &mut File, message: &T) {
let mut data = Vec::new();
bincode::serialize_into(&mut data, message).expect("couldn't serialize pcid message");
match w.write(&data) {
Ok(len) => assert_eq!(len, data.len()),
Err(err) => {
log::error!("writing pcid request failed: {err}");
process::exit(1);
}
}
}
fn recv<T: DeserializeOwned>(r: &mut File) -> T {
let mut length_bytes = [0u8; 8];
if let Err(err) = r.read_exact(&mut length_bytes) {
log::error!("reading pcid response length failed: {err}");
process::exit(1);
}
let length = u64::from_le_bytes(length_bytes);
if length > 0x100_000 {
panic!("pcid_interface: buffer too large");
}
let mut data = vec![0u8; length as usize];
if let Err(err) = r.read_exact(&mut data) {
log::error!("reading pcid response failed: {err}");
process::exit(1);
}
bincode::deserialize_from(&data[..]).expect("couldn't deserialize pcid message")
}
impl PciFunctionHandle {
fn connect_default() -> Self {
let channel_fd = match env::var("PCID_CLIENT_CHANNEL") {
Ok(channel_fd) => channel_fd,
Err(err) => {
log::error!("PCID_CLIENT_CHANNEL invalid: {err}");
process::exit(1);
}
};
let channel_fd = match channel_fd.parse::<RawFd>() {
Ok(channel_fd) => channel_fd,
Err(err) => {
log::error!("PCID_CLIENT_CHANNEL invalid: {err}");
process::exit(1);
}
};
Self::connect_common(channel_fd)
}
pub fn connect_by_path(device_path: &Path) -> io::Result<Self> {
let channel_fd = libredox::call::open(
device_path.join("channel").to_str().unwrap(),
libredox::flag::O_RDWR,
0,
)?;
Ok(Self::connect_common(channel_fd as RawFd))
}
fn connect_common(channel_fd: i32) -> PciFunctionHandle {
let mut channel = unsafe { File::from_raw_fd(channel_fd) };
send(&mut channel, &PcidClientRequest::RequestConfig);
let config = match recv(&mut channel) {
PcidClientResponse::Config(a) => a,
other => {
log::error!("received wrong pcid response: {other:?}");
process::exit(1);
}
};
Self {
channel,
config,
mapped_bars: [const { None }; 6],
}
}
pub fn into_inner_fd(self) -> RawFd {
self.channel.into_raw_fd()
}
fn send(&mut self, req: &PcidClientRequest) {
send(&mut self.channel, req)
}
fn recv(&mut self) -> PcidClientResponse {
recv(&mut self.channel)
}
pub fn config(&self) -> SubdriverArguments {
self.config.clone()
}
pub fn enable_device(&mut self) {
self.send(&PcidClientRequest::EnableDevice);
match self.recv() {
PcidClientResponse::EnabledDevice => {}
other => {
log::error!("received wrong pcid response: {other:?}");
process::exit(1);
}
}
}
pub fn get_vendor_capabilities(&mut self) -> Vec<VendorSpecificCapability> {
self.send(&PcidClientRequest::RequestVendorCapabilities);
match self.recv() {
PcidClientResponse::VendorCapabilities(a) => a,
other => {
log::error!("received wrong pcid response: {other:?}");
process::exit(1);
}
}
}
// FIXME turn into struct with bool fields
pub fn fetch_all_features(&mut self) -> Vec<PciFeature> {
self.send(&PcidClientRequest::RequestFeatures);
match self.recv() {
PcidClientResponse::AllFeatures(a) => a,
other => {
log::error!("received wrong pcid response: {other:?}");
process::exit(1);
}
}
}
pub fn enable_feature(&mut self, feature: PciFeature) {
self.send(&PcidClientRequest::EnableFeature(feature));
match self.recv() {
PcidClientResponse::FeatureEnabled(feat) if feat == feature => {}
other => {
log::error!("received wrong pcid response: {other:?}");
process::exit(1);
}
}
}
pub fn feature_info(&mut self, feature: PciFeature) -> PciFeatureInfo {
self.send(&PcidClientRequest::FeatureInfo(feature));
match self.recv() {
PcidClientResponse::FeatureInfo(feat, info) if feat == feature => info,
other => {
log::error!("received wrong pcid response: {other:?}");
process::exit(1);
}
}
}
pub fn set_feature_info(&mut self, info: SetFeatureInfo) {
self.send(&PcidClientRequest::SetFeatureInfo(info));
match self.recv() {
PcidClientResponse::SetFeatureInfo(_) => {}
other => {
log::error!("received wrong pcid response: {other:?}");
process::exit(1);
}
}
}
pub unsafe fn read_config(&mut self, offset: u16) -> u32 {
self.send(&PcidClientRequest::ReadConfig(offset));
match self.recv() {
PcidClientResponse::ReadConfig(value) => value,
other => {
log::error!("received wrong pcid response: {other:?}");
process::exit(1);
}
}
}
pub unsafe fn write_config(&mut self, offset: u16, value: u32) {
self.send(&PcidClientRequest::WriteConfig(offset, value));
match self.recv() {
PcidClientResponse::WriteConfig => {}
other => {
log::error!("received wrong pcid response: {other:?}");
process::exit(1);
}
}
}
pub unsafe fn map_bar(&mut self, bir: u8) -> &MappedBar {
let mapped_bar = &mut self.mapped_bars[bir as usize];
if let Some(mapped_bar) = mapped_bar {
mapped_bar
} else {
let (bar, bar_size) = self.config.func.bars[bir as usize].expect_mem();
let ptr = match unsafe {
common::physmap(
bar,
bar_size,
common::Prot::RW,
// FIXME once the kernel supports this use write-through for prefetchable BAR
common::MemoryType::Uncacheable,
)
} {
Ok(ptr) => ptr,
Err(err) => {
log::error!("failed to map BAR at {bar:016X}: {err}");
process::exit(1);
}
};
mapped_bar.insert(MappedBar {
ptr: NonNull::new(ptr.cast::<u8>()).expect("Mapping a BAR resulted in a nullptr"),
bar_size,
})
}
}
}
pub fn pci_daemon<F: FnOnce(Daemon, PciFunctionHandle) -> !>(f: F) -> ! {
Daemon::new(|daemon| {
common::init();
let pcid_handle = PciFunctionHandle::connect_default();
f(daemon, pcid_handle)
})
}
@@ -0,0 +1,257 @@
use std::fmt;
use std::ptr::NonNull;
use crate::driver_interface::PciBar;
use crate::PciFunctionHandle;
use common::io::{Io, Mmio};
use serde::{Deserialize, Serialize};
/// The address and data to use for MSI and MSI-X.
///
/// For MSI using this only works when you need a single interrupt vector.
/// For MSI-X you can have a single [MsiEntry] for each interrupt vector.
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct MsiAddrAndData {
pub addr: u64,
pub data: u32,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct MsiInfo {
pub log2_multiple_message_capable: u8,
pub is_64bit: bool,
pub has_per_vector_masking: bool,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct MsixInfo {
pub table_bar: u8,
pub table_offset: u32,
pub table_size: u16,
pub pba_bar: u8,
pub pba_offset: u32,
}
impl MsixInfo {
pub unsafe fn map_and_mask_all(self, pcid_handle: &mut PciFunctionHandle) -> MappedMsixRegs {
self.validate(pcid_handle.config().func.bars);
let virt_table_base = unsafe {
pcid_handle
.map_bar(self.table_bar)
.ptr
.as_ptr()
.byte_add(self.table_offset as usize)
};
let mut info = MappedMsixRegs {
virt_table_base: NonNull::new(virt_table_base.cast::<MsixTableEntry>()).unwrap(),
info: self,
};
// Mask all interrupts in case some earlier driver/os already unmasked them (according to
// the PCI Local Bus spec 3.0, they are masked after system reset).
for i in 0..info.info.table_size {
info.table_entry_pointer(i.into()).mask();
}
info
}
fn validate(&self, bars: [PciBar; 6]) {
if self.table_bar > 5 {
panic!(
"MSI-X Table BIR contained a reserved enum value: {}",
self.table_bar
);
}
if self.pba_bar > 5 {
panic!(
"MSI-X PBA BIR contained a reserved enum value: {}",
self.pba_bar
);
}
let table_size = self.table_size;
let table_offset = self.table_offset as usize;
let table_min_length = table_size * 16;
let pba_offset = self.pba_offset as usize;
let pba_min_length = table_size.div_ceil(8);
let (_, table_bar_size) = bars[self.table_bar as usize].expect_mem();
let (_, pba_bar_size) = bars[self.pba_bar as usize].expect_mem();
// Ensure that the table and PBA are within the BAR.
if !(0..table_bar_size as u64).contains(&(table_offset as u64 + table_min_length as u64)) {
panic!(
"Table {:#x}:{:#x} outside of BAR with length {:#x}",
table_offset,
table_offset + table_min_length as usize,
table_bar_size
);
}
if !(0..pba_bar_size as u64).contains(&(pba_offset as u64 + pba_min_length as u64)) {
panic!(
"PBA {:#x}:{:#x} outside of BAR with length {:#x}",
pba_offset,
pba_offset + pba_min_length as usize,
pba_bar_size
);
}
}
}
pub struct MappedMsixRegs {
pub virt_table_base: NonNull<MsixTableEntry>,
pub info: MsixInfo,
}
impl MappedMsixRegs {
pub unsafe fn table_entry_pointer_unchecked(&mut self, k: usize) -> &mut MsixTableEntry {
&mut *self.virt_table_base.as_ptr().add(k)
}
pub fn table_entry_pointer(&mut self, k: usize) -> &mut MsixTableEntry {
assert!(k < self.info.table_size as usize);
unsafe { self.table_entry_pointer_unchecked(k) }
}
}
#[repr(C, packed)]
pub struct MsixTableEntry {
pub addr_lo: Mmio<u32>,
pub addr_hi: Mmio<u32>,
pub msg_data: Mmio<u32>,
pub vec_ctl: Mmio<u32>,
}
const _: () = {
assert!(size_of::<MsixTableEntry>() == 16);
};
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub mod x86 {
#[repr(u8)]
pub enum TriggerMode {
Edge = 0,
Level = 1,
}
#[repr(u8)]
pub enum LevelTriggerMode {
Deassert = 0,
Assert = 1,
}
#[repr(u8)]
pub enum DeliveryMode {
Fixed = 0b000,
LowestPriority = 0b001,
Smi = 0b010,
// 0b011 is reserved
Nmi = 0b100,
Init = 0b101,
// 0b110 is reserved
ExtInit = 0b111,
}
// TODO: should the reserved field be preserved?
pub const fn message_address(
destination_id: u8,
redirect_hint: bool,
dest_mode_logical: bool,
) -> u64 {
0x0000_0000_FEE0_0000u64
| ((destination_id as u64) << 12)
| ((redirect_hint as u64) << 3)
| ((dest_mode_logical as u64) << 2)
}
pub const fn message_data(
trigger_mode: TriggerMode,
level_trigger_mode: LevelTriggerMode,
delivery_mode: DeliveryMode,
vector: u8,
) -> u32 {
((trigger_mode as u32) << 15)
| ((level_trigger_mode as u32) << 14)
| ((delivery_mode as u32) << 8)
| vector as u32
}
pub const fn message_data_level_triggered(
level_trigger_mode: LevelTriggerMode,
delivery_mode: DeliveryMode,
vector: u8,
) -> u32 {
message_data(
TriggerMode::Level,
level_trigger_mode,
delivery_mode,
vector,
)
}
pub const fn message_data_edge_triggered(delivery_mode: DeliveryMode, vector: u8) -> u32 {
message_data(
TriggerMode::Edge,
LevelTriggerMode::Deassert,
delivery_mode,
vector,
)
}
}
impl MsixTableEntry {
pub fn addr_lo(&self) -> u32 {
self.addr_lo.read()
}
pub fn addr_hi(&self) -> u32 {
self.addr_hi.read()
}
pub fn set_addr_lo(&mut self, value: u32) {
self.addr_lo.write(value);
}
pub fn set_addr_hi(&mut self, value: u32) {
self.addr_hi.write(value);
}
pub fn msg_data(&self) -> u32 {
self.msg_data.read()
}
pub fn vec_ctl(&self) -> u32 {
self.vec_ctl.read()
}
pub fn set_msg_data(&mut self, value: u32) {
self.msg_data.write(value);
}
pub fn addr(&self) -> u64 {
u64::from(self.addr_lo()) | (u64::from(self.addr_hi()) << 32)
}
pub const VEC_CTL_MASK_BIT: u32 = 1;
pub fn set_masked(&mut self, masked: bool) {
self.vec_ctl.writef(Self::VEC_CTL_MASK_BIT, masked)
}
pub fn mask(&mut self) {
self.set_masked(true);
}
pub fn unmask(&mut self) {
self.set_masked(false);
}
pub fn write_addr_and_data(&mut self, entry: MsiAddrAndData) {
self.set_addr_lo(entry.addr as u32);
self.set_addr_hi((entry.addr >> 32) as u32);
self.set_msg_data(entry.data);
}
}
impl fmt::Debug for MsixTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MsixTableEntry")
.field("addr", &self.addr())
.field("msg_data", &self.msg_data())
.field("vec_ctl", &self.vec_ctl())
.finish()
}
}
@@ -0,0 +1,6 @@
//! Interface to `pcid`.
#![feature(never_type)]
mod driver_interface;
pub use driver_interface::*;
+373
View File
@@ -0,0 +1,373 @@
#![feature(iter_next_chunk)]
#![feature(if_let_guard)]
#![feature(non_exhaustive_omitted_patterns_lint)]
use std::collections::BTreeMap;
use log::{debug, info, trace, warn};
use pci_types::capability::PciCapability;
use pci_types::{
Bar as TyBar, CommandRegister, EndpointHeader, HeaderType, PciAddress,
PciHeader as TyPciHeader, PciPciBridgeHeader,
};
use redox_scheme::scheme::register_sync_scheme;
use scheme_utils::Blocking;
use crate::cfg_access::Pcie;
use pcid_interface::{FullDeviceId, LegacyInterruptLine, PciBar, PciFunction, PciRom};
mod cfg_access;
mod driver_handler;
mod scheme;
pub struct Func {
inner: PciFunction,
capabilities: Vec<PciCapability>,
endpoint_header: EndpointHeader,
enabled: bool,
}
fn handle_parsed_header(
pcie: &Pcie,
tree: &mut BTreeMap<PciAddress, Func>,
endpoint_header: EndpointHeader,
full_device_id: FullDeviceId,
) {
let mut bars = [PciBar::None; 6];
let mut skip = false;
for i in 0..6 {
if skip {
skip = false;
continue;
}
match endpoint_header.bar(i, pcie) {
Some(TyBar::Io { port }) => bars[i as usize] = PciBar::Port(port.try_into().unwrap()),
Some(TyBar::Memory32 {
address,
size,
prefetchable: _,
}) => {
bars[i as usize] = PciBar::Memory32 {
addr: address,
size,
}
}
Some(TyBar::Memory64 {
address,
size,
prefetchable: _,
}) => {
bars[i as usize] = PciBar::Memory64 {
addr: address,
size,
};
skip = true; // Each 64bit memory BAR occupies two slots
}
None => bars[i as usize] = PciBar::None,
}
}
let mut string = String::new();
for (i, bar) in bars.iter().enumerate() {
if !bar.is_none() {
string.push_str(&format!(" {i}={}", bar.display()));
}
}
if !string.is_empty() {
debug!(" BAR{}", string);
}
//TODO: submit to pci_types
let get_rom = |pci_address, offset| -> Option<PciRom> {
use pci_types::ConfigRegionAccess;
const ROM_ENABLED: u32 = 1;
const ROM_ADDRESS_MASK: u32 = 0xfffff800;
let data = unsafe { pcie.read(pci_address, offset) };
let enabled = (data & ROM_ENABLED) == ROM_ENABLED;
let addr = data & ROM_ADDRESS_MASK;
let size = unsafe {
pcie.write(
pci_address,
offset,
ROM_ADDRESS_MASK | if enabled { ROM_ENABLED } else { 0 },
);
let mut readback = pcie.read(pci_address, offset);
pcie.write(pci_address, offset, data);
/*
* If the entire readback value is zero, the BAR is not implemented, so we return `None`.
*/
if readback == 0x0 {
return None;
}
readback &= ROM_ADDRESS_MASK;
1 << readback.trailing_zeros()
};
Some(PciRom {
addr,
size,
enabled,
})
};
let rom = get_rom(endpoint_header.header().address(), 0x30);
if let Some(rom) = rom {
debug!(" ROM={:08X}", rom.addr);
}
let capabilities = if endpoint_header.status(pcie).has_capability_list() {
endpoint_header.capabilities(pcie).collect::<Vec<_>>()
} else {
Vec::new()
};
debug!(
"PCI DEVICE CAPABILITIES for {}: {:?}",
endpoint_header.header().address(),
capabilities
);
let func = Func {
inner: pcid_interface::PciFunction {
addr: endpoint_header.header().address(),
bars,
rom,
legacy_interrupt_line: None, // Will be filled in when enabling the device
full_device_id: full_device_id.clone(),
},
capabilities,
endpoint_header,
enabled: false,
};
tree.insert(func.inner.addr, func);
}
fn enable_function(
pcie: &Pcie,
endpoint_header: &mut EndpointHeader,
capabilities: &mut [PciCapability],
) -> Option<LegacyInterruptLine> {
// Enable bus mastering, memory space, and I/O space
endpoint_header.update_command(pcie, |cmd| {
cmd | CommandRegister::BUS_MASTER_ENABLE
| CommandRegister::MEMORY_ENABLE
| CommandRegister::IO_ENABLE
});
// Disable MSI and MSI-X in case a previous driver instance enabled them.
for capability in capabilities {
match capability {
PciCapability::Msi(capability) => {
capability.set_enabled(false, pcie);
}
PciCapability::MsiX(capability) => {
capability.set_enabled(false, pcie);
}
_ => {}
}
}
// Set IRQ line to 9 if not set
let mut irq = 0xFF;
let mut interrupt_pin = 0xFF;
endpoint_header.update_interrupt(pcie, |(pin, mut line)| {
if line == 0xFF {
line = 9;
}
irq = line;
interrupt_pin = pin;
(pin, line)
});
let legacy_interrupt_enabled = match interrupt_pin {
0 => false,
1 | 2 | 3 | 4 => true,
other => {
warn!("pcid: invalid interrupt pin: {}", other);
false
}
};
if legacy_interrupt_enabled {
let pci_address = endpoint_header.header().address();
let dt_address = ((pci_address.bus() as u32) << 16)
| ((pci_address.device() as u32) << 11)
| ((pci_address.function() as u32) << 8);
let addr = [
dt_address & pcie.interrupt_map_mask[0],
0u32,
0u32,
interrupt_pin as u32 & pcie.interrupt_map_mask[3],
];
let mapping = pcie
.interrupt_map
.iter()
.find(|x| x.addr == addr[0..3] && x.interrupt == addr[3]);
let phandled = if let Some(mapping) = mapping {
Some((
mapping.parent_phandle,
mapping.parent_interrupt,
mapping.parent_interrupt_cells,
))
} else {
None
};
if mapping.is_some() {
debug!("found mapping: addr={:?} => {:?}", addr, phandled);
}
Some(LegacyInterruptLine { irq, phandled })
} else {
None
}
}
fn main() {
common::init();
daemon::Daemon::new(daemon);
}
fn daemon(daemon: daemon::Daemon) -> ! {
common::setup_logging(
"bus",
"pci",
"pcid",
common::output_level(),
common::file_level(),
);
let pcie = Pcie::new();
info!("PCI SG-BS:DV.F VEND:DEVI CL.SC.IN.RV");
let mut scheme = scheme::PciScheme::new(pcie);
let socket = redox_scheme::Socket::create().expect("failed to open pci scheme socket");
let handler = Blocking::new(&socket, 16);
{
match libredox::Fd::open("/scheme/acpi/register_pci", libredox::flag::O_WRONLY, 0) {
Ok(register_pci) => {
let access_id = scheme.access();
let access_fd = socket
.create_this_scheme_fd(0, access_id, syscall::O_RDWR, 0)
.expect("failed to issue this resource");
let access_bytes = access_fd.to_ne_bytes();
let _ = register_pci
.call_wo(
&access_bytes,
syscall::CallFlags::WRITE | syscall::CallFlags::FD,
&[],
)
.expect("failed to send pci_fd to acpid");
}
Err(err) => {
if err.errno() == libredox::errno::ENODEV {
debug!("pcid: acpid not found. Running without ACPI integration.");
} else {
warn!("pcid: failed to open acpid register_pci (error: {}). Running without ACPI integration.", err);
}
}
}
}
// FIXME Use full ACPI for enumerating the host bridges. MCFG only describes the first
// host bridge, while multi-processor systems likely have a host bridge for each CPU.
// See also https://www.kernel.org/doc/html/latest/PCI/acpi-info.html
// Bus 0x80 is scanned for compatibility with newer (Arrow Lake) Intel CPUs where PCH devices
// are there. This workaround may not be required if we had ACPI bus enumeration.
let mut bus_nums = vec![0, 0x80];
let mut bus_i = 0;
while bus_i < bus_nums.len() {
let bus_num = bus_nums[bus_i];
bus_i += 1;
for dev_num in 0..32 {
scan_device(
&mut scheme.tree,
&scheme.pcie,
&mut bus_nums,
bus_num,
dev_num,
);
}
}
debug!("Enumeration complete, now starting pci scheme");
register_sync_scheme(&socket, "pci", &mut scheme)
.expect("failed to register pci scheme to namespace");
let _ = daemon.ready();
handler
.process_requests_blocking(scheme)
.expect("pcid: failed to process requests");
}
fn scan_device(
tree: &mut BTreeMap<PciAddress, Func>,
pcie: &Pcie,
bus_nums: &mut Vec<u8>,
bus_num: u8,
dev_num: u8,
) {
for func_num in 0..8 {
let header = TyPciHeader::new(PciAddress::new(0, bus_num, dev_num, func_num));
let (vendor_id, device_id) = header.id(pcie);
if vendor_id == 0xffff && device_id == 0xffff {
if func_num == 0 {
trace!("PCI {:>02X}:{:>02X}: no dev", bus_num, dev_num);
return;
}
continue;
}
let (revision, class, subclass, interface) = header.revision_and_class(pcie);
let full_device_id = FullDeviceId {
vendor_id,
device_id,
class,
subclass,
interface,
revision,
};
info!("PCI {} {}", header.address(), full_device_id.display());
let has_multiple_functions = header.has_multiple_functions(pcie);
match header.header_type(pcie) {
HeaderType::Endpoint => {
handle_parsed_header(
pcie,
tree,
EndpointHeader::from_header(header, pcie).unwrap(),
full_device_id,
);
}
HeaderType::PciPciBridge => {
let bridge_header = PciPciBridgeHeader::from_header(header, pcie).unwrap();
bus_nums.push(bridge_header.secondary_bus_number(pcie));
}
ty => {
warn!("pcid: unknown header type: {ty:?}");
}
}
if func_num == 0 && !has_multiple_functions {
return;
}
}
}
@@ -0,0 +1,428 @@
use std::collections::{BTreeMap, VecDeque};
use pci_types::{ConfigRegionAccess, PciAddress};
use redox_scheme::scheme::SchemeSync;
use redox_scheme::{CallerCtx, OpenResult};
use scheme_utils::HandleMap;
use syscall::dirent::{DirEntry, DirentBuf, DirentKind};
use syscall::error::{Error, Result, EACCES, EBADF, EINVAL, EIO, EISDIR, ENOENT, ENOTDIR};
use syscall::flag::{MODE_CHR, MODE_DIR, O_DIRECTORY, O_STAT};
use syscall::schemev2::NewFdFlags;
use syscall::ENOLCK;
use crate::cfg_access::Pcie;
pub struct PciScheme {
handles: HandleMap<HandleWrapper>,
pub pcie: Pcie,
pub tree: BTreeMap<PciAddress, crate::Func>,
}
enum Handle {
TopLevel { entries: Vec<String> },
Access,
Device,
Channel { addr: PciAddress, st: ChannelState },
SchemeRoot,
}
struct HandleWrapper {
inner: Handle,
stat: bool,
}
impl Handle {
fn is_file(&self) -> bool {
matches!(self, Self::Access | Self::Channel { .. })
}
fn is_dir(&self) -> bool {
!self.is_file()
}
// TODO: capability rather than root
fn requires_root(&self) -> bool {
matches!(self, Self::Access | Self::Channel { .. })
}
fn is_scheme_root(&self) -> bool {
matches!(self, Self::SchemeRoot)
}
}
enum ChannelState {
AwaitingData,
AwaitingResponseRead(VecDeque<u8>),
}
const DEVICE_CONTENTS: &[&str] = &["channel"];
impl PciScheme {
pub fn access(&mut self) -> usize {
self.handles.insert(HandleWrapper {
inner: Handle::Access,
stat: false,
})
}
}
impl SchemeSync for PciScheme {
fn scheme_root(&mut self) -> Result<usize> {
Ok(self.handles.insert(HandleWrapper {
inner: Handle::SchemeRoot,
stat: false,
}))
}
fn openat(
&mut self,
dirfd: usize,
path: &str,
flags: usize,
_fcntl_flags: u32,
ctx: &CallerCtx,
) -> Result<OpenResult> {
let handle = self.handles.get(dirfd)?;
if !handle.inner.is_scheme_root() {
return Err(Error::new(EACCES));
}
log::trace!("OPEN `{}` flags {}", path, flags);
// TODO: Check flags are correct
let expects_dir = path.ends_with('/') || flags & O_DIRECTORY != 0;
let path = path.trim_matches('/');
let handle = if path.is_empty() {
Handle::TopLevel {
entries: self
.tree
.iter()
// FIXME remove replacement of : once the old scheme format is no longer supported.
.map(|(addr, _)| format!("{}", addr).replace(':', "--"))
.collect::<Vec<_>>(),
}
} else if path == "access" {
Handle::Access
} else {
let idx = path.find('/').unwrap_or(path.len());
let (addr_str, after) = path.split_at(idx);
let addr = parse_pci_addr(addr_str).ok_or(Error::new(ENOENT))?;
self.parse_after_pci_addr(addr, after)?
};
let stat = flags & O_STAT != 0;
if expects_dir && handle.is_file() && !stat {
return Err(Error::new(ENOTDIR));
}
if !expects_dir && handle.is_dir() && !stat {
return Err(Error::new(EISDIR));
}
if ctx.uid != 0 && handle.requires_root() && !stat {
return Err(Error::new(EACCES));
}
let id = self.handles.insert(HandleWrapper {
inner: handle,
stat,
});
Ok(OpenResult::ThisScheme {
number: id,
flags: NewFdFlags::POSITIONED,
})
}
fn fstat(&mut self, id: usize, stat: &mut syscall::Stat, _ctx: &CallerCtx) -> Result<()> {
let handle = self.handles.get_mut(id)?;
let (len, mode) = match handle.inner {
Handle::TopLevel { ref entries } => (entries.len(), MODE_DIR | 0o755),
Handle::Device => (DEVICE_CONTENTS.len(), MODE_DIR | 0o755),
Handle::Access | Handle::Channel { .. } => (0, MODE_CHR | 0o600),
Handle::SchemeRoot => return Err(Error::new(EBADF)),
};
stat.st_size = len as u64;
stat.st_mode = mode;
Ok(())
}
fn read(
&mut self,
id: usize,
buf: &mut [u8],
_offset: u64,
_fcntl_flags: u32,
_ctx: &CallerCtx,
) -> Result<usize> {
let handle = self.handles.get_mut(id)?;
if handle.stat {
return Err(Error::new(EBADF));
}
match handle.inner {
Handle::TopLevel { .. } => Err(Error::new(EISDIR)),
Handle::Device => Err(Error::new(EISDIR)),
Handle::Channel {
addr: _,
ref mut st,
} => Self::read_channel(st, buf),
Handle::SchemeRoot => Err(Error::new(EBADF)),
_ => Err(Error::new(EBADF)),
}
}
fn getdents<'buf>(
&mut self,
id: usize,
mut buf: DirentBuf<&'buf mut [u8]>,
opaque_offset: u64,
) -> Result<DirentBuf<&'buf mut [u8]>> {
let Ok(offset) = usize::try_from(opaque_offset) else {
return Ok(buf);
};
let handle = self.handles.get_mut(id)?;
if handle.stat {
return Err(Error::new(EBADF));
}
let entries = match handle.inner {
Handle::TopLevel { ref entries } => {
for (i, dent_name) in entries.iter().enumerate().skip(offset) {
buf.entry(DirEntry {
inode: 0,
name: dent_name,
kind: DirentKind::Unspecified,
next_opaque_id: i as u64 + 1,
})?;
}
return Ok(buf);
}
Handle::Device => DEVICE_CONTENTS,
Handle::Access | Handle::Channel { .. } => return Err(Error::new(ENOTDIR)),
Handle::SchemeRoot => return Err(Error::new(EBADF)),
};
for (i, dent_name) in entries.iter().enumerate().skip(offset) {
buf.entry(DirEntry {
inode: 0,
name: dent_name,
kind: DirentKind::Unspecified,
next_opaque_id: i as u64 + 1,
})?;
}
Ok(buf)
}
fn write(
&mut self,
id: usize,
buf: &[u8],
_offset: u64,
_fcntl_flags: u32,
_ctx: &CallerCtx,
) -> Result<usize> {
let handle = self.handles.get_mut(id)?;
if handle.stat {
return Err(Error::new(EBADF));
}
match handle.inner {
Handle::Channel { addr, ref mut st } => {
Self::write_channel(&self.pcie, &mut self.tree, addr, st, buf)
}
_ => Err(Error::new(EBADF)),
}
}
fn call(
&mut self,
id: usize,
payload: &mut [u8],
metadata: &[u64],
_ctx: &CallerCtx,
) -> Result<usize> {
let handle = self.handles.get_mut(id)?;
if handle.stat {
return Err(Error::new(EBADF));
}
match handle.inner {
Handle::Access => {
let payload_len = u16::try_from(payload.len()).map_err(|_| Error::new(EINVAL))?;
let write = match metadata.get(0) {
Some(1) => false,
Some(2) => true,
_ => return Err(Error::new(EINVAL)),
};
let (addr, offset) = match metadata.get(1) {
Some(value) => {
// Segment: u16, at 28 bits
// Bus: u8, 8 bits, 256 total, at 20 bits
// Device: u8, 5 bits, 32 total, at 15 bits
// Function: u8, 3 bits, 8 total, at 12 bits
// Offset: u16, 12 bits, 4096 total, at 0 bits
(
PciAddress::new(
((value >> 28) & 0xFFFF) as u16,
((value >> 20) & 0xFF) as u8,
((value >> 15) & 0x1F) as u8,
((value >> 12) & 0x7) as u8,
),
(value & 0xFFF) as u16,
)
}
None => return Err(Error::new(EINVAL)),
};
// This handle must allow less than 4 byte access, but the
// lower level only works with 4 byte reads and writes
let unaligned = offset % 4;
let start = offset - unaligned;
let end = offset + payload_len;
let mut i = 0;
while start + i < end {
let mut bytes = unsafe { self.pcie.read(addr, start + i) }.to_le_bytes();
for j in 0..bytes.len() {
if let Some(payload_i) = i.checked_sub(unaligned) {
if let Some(payload_b) = payload.get_mut(usize::from(payload_i)) {
if write {
bytes[j] = *payload_b;
} else {
*payload_b = bytes[j]
}
}
}
i += 1;
}
if write {
let value = u32::from_le_bytes(bytes);
unsafe {
self.pcie.write(addr, start + i, value);
}
}
}
Ok(payload.len())
}
_ => Err(Error::new(EBADF)),
}
}
fn on_close(&mut self, id: usize) {
match self.handles.remove(id) {
Some(HandleWrapper {
inner: Handle::Channel { addr, .. },
..
}) => {
log::trace!("TODO: Support disabling device (called on {})", addr);
if let Some(func) = self.tree.get_mut(&addr) {
func.enabled = false;
}
}
_ => {}
}
}
}
impl PciScheme {
pub fn new(pcie: Pcie) -> Self {
Self {
handles: HandleMap::new(),
pcie,
tree: BTreeMap::new(),
}
}
fn parse_after_pci_addr(&mut self, addr: PciAddress, after: &str) -> Result<Handle> {
if after.chars().next().map_or(false, |c| c != '/') {
return Err(Error::new(ENOENT));
}
let func = self.tree.get_mut(&addr).ok_or(Error::new(ENOENT))?;
Ok(if after.is_empty() {
Handle::Device
} else {
let path = &after[1..];
match path {
"channel" => {
if func.enabled {
return Err(Error::new(ENOLCK));
}
func.inner.legacy_interrupt_line = crate::enable_function(
&self.pcie,
&mut func.endpoint_header,
&mut func.capabilities,
);
func.enabled = true;
Handle::Channel {
addr,
st: ChannelState::AwaitingData,
}
}
_ => return Err(Error::new(ENOENT)),
}
})
}
fn read_channel(state: &mut ChannelState, buf: &mut [u8]) -> Result<usize> {
match *state {
ChannelState::AwaitingResponseRead(ref mut queue) => {
let byte_count = std::cmp::min(queue.len(), buf.len());
// XXX: Why can't VecDeque support dequeueing into slices?
for (idx, byte) in queue.drain(..byte_count).enumerate() {
buf[idx] = byte;
}
if queue.is_empty() {
*state = ChannelState::AwaitingData;
}
Ok(byte_count)
}
ChannelState::AwaitingData => Err(Error::new(EINVAL)),
}
}
fn write_channel(
pci_state: &Pcie,
tree: &mut BTreeMap<PciAddress, crate::Func>,
addr: PciAddress,
state: &mut ChannelState,
buf: &[u8],
) -> Result<usize> {
match *state {
ChannelState::AwaitingResponseRead(_) => return Err(Error::new(EINVAL)),
ChannelState::AwaitingData => {
let func = tree.get_mut(&addr).unwrap();
let request = bincode::deserialize_from(buf).map_err(|_| Error::new(EINVAL))?;
let response = crate::driver_handler::DriverHandler::new(
func.inner.clone(),
&mut func.endpoint_header,
&mut func.capabilities,
&*pci_state,
)
.respond(request);
let mut output_bytes = vec![0_u8; 8];
bincode::serialize_into(&mut output_bytes, &response)
.map_err(|_| Error::new(EIO))?;
let len = output_bytes.len() - 8;
output_bytes[..8].copy_from_slice(&u64::to_le_bytes(len as u64));
*state = ChannelState::AwaitingResponseRead(output_bytes.into());
Ok(buf.len())
}
}
}
}
fn parse_pci_addr(addr: &str) -> Option<PciAddress> {
// FIXME use : instead of -- as separator once the old scheme format is no longer supported.
let (segment, rest) = addr.split_once("--")?;
let segment = u16::from_str_radix(segment, 16).ok()?;
// FIXME use : instead of -- as separator once the old scheme format is no longer supported.
let (bus, rest) = rest.split_once("--")?;
let bus = u8::from_str_radix(bus, 16).ok()?;
let (device, function) = rest.split_once('.')?;
let device = u8::from_str_radix(device, 16).ok()?;
let function = u8::from_str_radix(function, 16).ok()?;
Some(PciAddress::new(segment, bus, device, function))
}