Red Bear OS — microkernel OS in Rust, based on Redox

Derivative of Redox OS (https://www.redox-os.org) adding:
- AMD GPU driver (amdgpu) via LinuxKPI compat layer
- ext4 filesystem support (ext4d scheme daemon)
- ACPI fixes for AMD bare metal (x2APIC, DMAR, IVRS, MCFG)
- Custom branding (hostname, os-release, boot identity)

Build system is full upstream Redox with RBOS overlay in local/.
Patches for kernel, base, and relibc are symlinked from local/patches/
and protected from make clean/distclean. Custom recipes live in
local/recipes/ with symlinks into the recipes/ search path.

Build:  make all CONFIG_NAME=redbear-full
Sync:   ./local/scripts/sync-upstream.sh
This commit is contained in:
2026-04-12 19:05:00 +01:00
commit 50b731f1b7
3392 changed files with 98327 additions and 0 deletions
@@ -0,0 +1,219 @@
use core::ptr::NonNull;
use std::sync::atomic::{AtomicI32, Ordering};
use redox_syscall::flag::{MAP_SHARED, O_CLOEXEC, O_RDWR, PROT_READ, PROT_WRITE};
use redox_syscall::PAGE_SIZE;
use syscall as redox_syscall;
use crate::{DriverError, Result};
/// SAFETY: Cached FD for `/scheme/memory/physical`. -1 means uninitialized.
/// This FD is process-lifetime cached for performance. If scheme:memory
/// restarts (which should never happen — it's a kernel scheme), all
/// in-flight DMA operations are already undefined behavior.
static DMA_MEMORY_FD: AtomicI32 = AtomicI32::new(-1);
fn get_dma_memory_fd() -> Result<i32> {
let current = DMA_MEMORY_FD.load(Ordering::Acquire);
if current >= 0 {
return Ok(current);
}
let fd = libredox::call::open("/scheme/memory/physical", (O_CLOEXEC | O_RDWR) as i32, 0)
.map_err(|e| DriverError::Io(std::io::Error::from_raw_os_error(e.errno())))?;
let raw = fd as i32;
// Try to store; if another thread won the race, close ours and use theirs.
match DMA_MEMORY_FD.compare_exchange(-1, raw, Ordering::AcqRel, Ordering::Acquire) {
Ok(_) => Ok(raw),
Err(existing) => {
let _ = libredox::call::close(fd as usize);
Ok(existing)
}
}
}
fn virt_to_phys_cached(virt: usize) -> Result<usize> {
// Use a cached fd for address translation
static TRANSLATION_FD: AtomicI32 = AtomicI32::new(-1);
let raw = match TRANSLATION_FD.load(Ordering::Acquire) {
fd if fd >= 0 => fd,
_ => {
let fd = libredox::Fd::open("/scheme/memory/translation", O_CLOEXEC as i32, 0)
.map_err(|e| DriverError::Io(std::io::Error::from_raw_os_error(e.errno())))?;
let raw = fd.raw() as i32;
// Leak the fd intentionally — it's a global cache
std::mem::forget(fd);
match TRANSLATION_FD.compare_exchange(-1, raw, Ordering::AcqRel, Ordering::Acquire) {
Ok(_) => raw,
Err(existing) => {
let _ = libredox::call::close(raw as usize);
existing
}
}
}
};
let mut buf = virt.to_ne_bytes();
libredox::call::call_ro(
raw as usize,
&mut buf,
redox_syscall::CallFlags::empty(),
&[],
)
.map_err(DriverError::from)?;
Ok(usize::from_ne_bytes(buf))
}
enum DmaStorage {
/// Allocated via scheme:memory — freed via munmap
SchemeMapped { ptr: NonNull<u8>, size: usize },
/// Allocated via heap — freed via dealloc
Heap {
ptr: NonNull<u8>,
layout: std::alloc::Layout,
},
}
pub struct DmaBuffer {
storage: DmaStorage,
phys_addr: usize,
size: usize,
}
impl DmaBuffer {
/// Allocate a physically contiguous DMA buffer.
///
/// Uses scheme:memory to allocate real physical pages, ensuring the buffer
/// is safe for DMA hardware access. Falls back to heap allocation only in
/// non-Redox environments (e.g., Linux host for testing), logging a warning.
pub fn allocate(size: usize, align: usize) -> Result<Self> {
let align = align.max(64);
let aligned_size = size.next_multiple_of(PAGE_SIZE).max(align);
// Attempt 1: Allocate via scheme:memory (physically contiguous)
if let Ok(mem_fd) = get_dma_memory_fd() {
if let Ok(mapped) = Self::allocate_via_scheme(mem_fd, aligned_size, align) {
return Ok(mapped);
}
}
// Fallback: heap allocation (NOT physically contiguous — log warning)
log::warn!(
"DmaBuffer: falling back to heap allocation ({} bytes) — NOT physically contiguous!",
size
);
let layout = std::alloc::Layout::from_size_align(size, align)
.map_err(|e| DriverError::Other(format!("invalid DMA layout: {e}")))?;
let ptr = unsafe { std::alloc::alloc_zeroed(layout) };
let ptr = NonNull::new(ptr).ok_or_else(|| {
DriverError::Other(format!(
"DMA allocation failed: {size} bytes aligned to {align}"
))
})?;
let phys_addr = virt_to_phys_cached(ptr.as_ptr() as usize)?;
Ok(Self {
storage: DmaStorage::Heap { ptr, layout },
phys_addr,
size,
})
}
/// Allocate physically contiguous memory via scheme:memory/physical.
fn allocate_via_scheme(mem_fd: i32, size: usize, _align: usize) -> Result<Self> {
// Open a physical memory region of the requested size
let path = format!("zeroed@{}", size);
let region_fd =
libredox::call::openat(mem_fd as usize, &path, (O_CLOEXEC | O_RDWR) as i32, 0)
.map_err(|e| DriverError::Io(std::io::Error::from_raw_os_error(e.errno())))?;
// Map it into our address space
let ptr = unsafe {
libredox::call::mmap(libredox::call::MmapArgs {
fd: region_fd as usize,
offset: 0,
length: size,
flags: MAP_SHARED.bits() as u32,
prot: (PROT_READ | PROT_WRITE).bits() as u32,
addr: core::ptr::null_mut(),
})
}
.map_err(|e| {
let _ = libredox::call::close(region_fd as usize);
DriverError::MappingFailed {
phys: 0,
size,
reason: format!("DMA mmap failed: {e:?}"),
}
})?;
let _ = libredox::call::close(region_fd as usize);
let phys_addr = virt_to_phys_cached(ptr as usize)?;
let ptr = NonNull::new(ptr as *mut u8)
.ok_or_else(|| DriverError::Other("DMA mmap returned null".into()))?;
log::debug!(
"DmaBuffer: {} bytes at virt={:#x} phys={:#x} (physically contiguous)",
size,
ptr.as_ptr() as usize,
phys_addr
);
Ok(Self {
storage: DmaStorage::SchemeMapped { ptr, size },
phys_addr,
size,
})
}
pub fn as_ptr(&self) -> *const u8 {
match &self.storage {
DmaStorage::SchemeMapped { ptr, .. } | DmaStorage::Heap { ptr, .. } => ptr.as_ptr(),
}
}
pub fn as_mut_ptr(&mut self) -> *mut u8 {
match &mut self.storage {
DmaStorage::SchemeMapped { ptr, .. } | DmaStorage::Heap { ptr, .. } => ptr.as_ptr(),
}
}
pub fn physical_address(&self) -> usize {
self.phys_addr
}
pub fn len(&self) -> usize {
self.size
}
pub fn is_empty(&self) -> bool {
self.size == 0
}
/// Returns true if this buffer is guaranteed physically contiguous.
/// On real hardware, this must be true for DMA to work safely.
pub fn is_physically_contiguous(&self) -> bool {
matches!(self.storage, DmaStorage::SchemeMapped { .. })
}
}
impl Drop for DmaBuffer {
fn drop(&mut self) {
match &self.storage {
DmaStorage::SchemeMapped { ptr, size } => {
let _ = unsafe { libredox::call::munmap(ptr.as_ptr() as *mut (), *size) };
}
DmaStorage::Heap { ptr, layout } => {
unsafe { std::alloc::dealloc(ptr.as_ptr(), *layout) };
}
}
}
}
unsafe impl Send for DmaBuffer {}
unsafe impl Sync for DmaBuffer {}
@@ -0,0 +1,68 @@
use syscall as redox_syscall;
use crate::Result;
#[cfg(all(target_arch = "x86_64", target_os = "redox"))]
pub fn acquire_iopl() -> Result<()> {
extern "C" {
fn redox_cur_thrfd_v0() -> usize;
}
let kernel_fd = redox_syscall::dup(unsafe { redox_cur_thrfd_v0() }, b"open_via_dup")?;
let res = libredox::call::call_wo(
kernel_fd,
&[],
redox_syscall::CallFlags::empty(),
&[redox_syscall::ProcSchemeVerb::Iopl as u64],
);
let _ = redox_syscall::close(kernel_fd);
res.map(|_| ()).map_err(|e| e.into())
}
#[cfg(all(target_arch = "x86_64", not(target_os = "redox")))]
pub fn acquire_iopl() -> Result<()> {
Err(crate::DriverError::Other(String::from(
"acquire_iopl: only available on Redox",
)))
}
#[cfg(target_arch = "x86_64")]
#[inline]
pub fn inb(port: u16) -> u8 {
let val: u8;
unsafe { core::arch::asm!("inb {1:x}, {0}", out(reg_byte) val, in(reg) port) };
val
}
#[cfg(target_arch = "x86_64")]
#[inline]
pub fn outb(port: u16, val: u8) {
unsafe { core::arch::asm!("outb {1:x}, {0}", in(reg_byte) val, in(reg) port) };
}
#[cfg(target_arch = "x86_64")]
#[inline]
pub fn inl(port: u16) -> u32 {
let val: u32;
unsafe { core::arch::asm!("inl {1:x}, {0:e}", out(reg) val, in(reg) port) };
val
}
#[cfg(target_arch = "x86_64")]
#[inline]
pub fn outl(port: u16, val: u32) {
unsafe { core::arch::asm!("outl {1:x}, {0:e}", in(reg) val, in(reg) port) };
}
#[cfg(target_arch = "x86_64")]
#[inline]
pub fn inw(port: u16) -> u16 {
let val: u16;
unsafe { core::arch::asm!("inw {1:x}, {0:x}", out(reg) val, in(reg) port) };
val
}
#[cfg(target_arch = "x86_64")]
#[inline]
pub fn outw(port: u16, val: u16) {
unsafe { core::arch::asm!("outw {1:x}, {0:x}", in(reg) val, in(reg) port) };
}
@@ -0,0 +1,305 @@
use std::fs::File;
use std::io::{ErrorKind, Read};
#[cfg(target_os = "redox")]
use std::fs::OpenOptions;
#[cfg(target_os = "redox")]
use std::io::Write;
use crate::memory::{CacheType, MmioProt, MmioRegion};
use crate::pci::{MsixCapability, PciDevice, PciDeviceInfo};
use crate::{DriverError, Result};
const MSIX_ENTRY_SIZE: usize = 16;
const MSIX_VECTOR_CTRL_OFFSET: usize = 12;
const MSIX_MASK_BIT: u32 = 1;
#[cfg(target_os = "redox")]
const X86_MSI_ADDRESS_BASE: u64 = 0x0000_0000_FEE0_0000;
pub struct IrqHandle {
fd: File,
irq: u32,
}
#[derive(Debug)]
pub struct IrqEvent {
pub irq: u32,
}
pub struct MsixTable {
pub base: MmioRegion,
pub pba: MmioRegion,
pub table_size: u16,
pub bar_addr: u64,
}
pub struct MsixVector {
pub index: u16,
pub irq: u32,
pub fd: File,
}
impl IrqHandle {
#[cfg(target_os = "redox")]
pub fn request(irq: u32) -> Result<Self> {
let path = format!("/scheme/irq/{irq}");
let fd = File::open(&path).map_err(|e| {
log::warn!("failed to open IRQ {irq} at {path}: {e}");
e
})?;
log::debug!("IRQ {irq} acquired via {path}");
Ok(Self { fd, irq })
}
#[cfg(not(target_os = "redox"))]
pub fn request(irq: u32) -> Result<Self> {
Err(DriverError::Irq(format!(
"IRQ {irq} is only available on target_os=redox"
)))
}
pub fn wait(&mut self) -> Result<IrqEvent> {
let mut buf = [0u8; 8];
self.fd.read_exact(&mut buf)?;
Ok(IrqEvent { irq: self.irq })
}
pub fn try_wait(&mut self) -> Result<Option<IrqEvent>> {
let mut buf = [0u8; 8];
loop {
match self.fd.read(&mut buf) {
Ok(0) => return Ok(None),
Ok(_) => return Ok(Some(IrqEvent { irq: self.irq })),
Err(err) if err.kind() == ErrorKind::WouldBlock => return Ok(None),
Err(err) if err.kind() == ErrorKind::Interrupted => continue,
Err(err) => return Err(err.into()),
}
}
}
#[cfg(target_os = "redox")]
pub fn set_affinity(&self, cpu_mask: u64) -> Result<()> {
let path = format!("/scheme/irq/{}/affinity", self.irq);
let mut fd = OpenOptions::new().write(true).open(&path).map_err(|err| {
DriverError::Irq(format!("failed to open IRQ affinity control {path}: {err}"))
})?;
fd.write_all(&cpu_mask.to_le_bytes())?;
Ok(())
}
#[cfg(not(target_os = "redox"))]
pub fn set_affinity(&self, _cpu_mask: u64) -> Result<()> {
Err(DriverError::Irq(
"IRQ affinity control is only available on target_os=redox".into(),
))
}
pub fn irq(&self) -> u32 {
self.irq
}
}
impl MsixTable {
pub fn map(device_info: &PciDeviceInfo, cap: &MsixCapability) -> Result<Self> {
let table_bar = lookup_msix_bar(device_info, cap.table_bar, "table")?;
let pba_bar = lookup_msix_bar(device_info, cap.pba_bar, "PBA")?;
let table_len = usize::from(cap.table_size) * MSIX_ENTRY_SIZE;
let pba_len = usize::from(cap.table_size).div_ceil(64) * core::mem::size_of::<u64>();
let table_phys =
checked_bar_window(table_bar.addr, table_bar.size, cap.table_offset, table_len)?;
let pba_phys = checked_bar_window(pba_bar.addr, pba_bar.size, cap.pba_offset, pba_len)?;
let base = MmioRegion::map(
table_phys,
table_len,
CacheType::DeviceMemory,
MmioProt::READ_WRITE,
)?;
let pba = MmioRegion::map(
pba_phys,
pba_len,
CacheType::DeviceMemory,
MmioProt::READ_WRITE,
)?;
Ok(Self {
base,
pba,
table_size: cap.table_size,
bar_addr: table_bar.addr,
})
}
pub fn mask_all(&self) {
for index in 0..self.table_size {
self.mask_vector(index);
}
}
pub fn enable(&mut self, pci_device: &mut PciDevice, cap_offset: u8) -> Result<()> {
pci_device.enable_msix(cap_offset)
}
#[cfg(target_os = "redox")]
pub fn request_vector(&self, index: u16) -> Result<MsixVector> {
let cpu_id = read_bsp_cpu_id()?;
let (irq, fd) = allocate_irq_vector(cpu_id)?;
self.program_x86_message(index, cpu_id, irq)?;
self.unmask_vector(index);
Ok(MsixVector { fd, index, irq })
}
#[cfg(not(target_os = "redox"))]
pub fn request_vector(&self, index: u16) -> Result<MsixVector> {
Err(DriverError::Irq(format!(
"MSI-X vector {index} allocation is only available on target_os=redox"
)))
}
pub fn mask_vector(&self, index: u16) {
if let Ok(offset) = self.entry_offset(index) {
self.base
.write32(offset + MSIX_VECTOR_CTRL_OFFSET, MSIX_MASK_BIT);
}
}
pub fn unmask_vector(&self, index: u16) {
if let Ok(offset) = self.entry_offset(index) {
self.base.write32(offset + MSIX_VECTOR_CTRL_OFFSET, 0);
}
}
pub fn is_pending(&self, index: u16) -> bool {
if index >= self.table_size {
return false;
}
let word_index = usize::from(index / 64) * core::mem::size_of::<u64>();
let bit = u32::from(index % 64);
(self.pba.read64(word_index) & (1u64 << bit)) != 0
}
fn entry_offset(&self, index: u16) -> Result<usize> {
if index >= self.table_size {
return Err(DriverError::Irq(format!(
"MSI-X vector index {index} is outside table size {}",
self.table_size
)));
}
Ok(usize::from(index) * MSIX_ENTRY_SIZE)
}
#[cfg(target_os = "redox")]
fn program_x86_message(&self, index: u16, cpu_id: u8, irq: u32) -> Result<()> {
let offset = self.entry_offset(index)?;
let vector = irq
.checked_add(32)
.ok_or_else(|| DriverError::Irq(format!("IRQ {irq} overflowed x86 vector space")))?;
let vector = u8::try_from(vector).map_err(|_| {
DriverError::Irq(format!("IRQ {irq} does not fit in an x86 MSI-X vector"))
})?;
let message_addr = X86_MSI_ADDRESS_BASE | (u64::from(cpu_id) << 12);
self.base.write32(offset, message_addr as u32);
self.base.write32(offset + 4, (message_addr >> 32) as u32);
self.base.write32(offset + 8, u32::from(vector));
Ok(())
}
}
fn lookup_msix_bar<'a>(
device_info: &'a PciDeviceInfo,
bar_index: u8,
label: &str,
) -> Result<&'a crate::pci::PciBarInfo> {
device_info
.find_memory_bar(bar_index as usize)
.ok_or_else(|| DriverError::CapabilityNotFound(format!("MSI-X {label} BAR {}", bar_index)))
}
fn checked_bar_window(bar_addr: u64, bar_size: u64, offset: u32, len: usize) -> Result<u64> {
let len_u64 = u64::try_from(len)
.map_err(|_| DriverError::InvalidParam("MSI-X BAR window length overflow"))?;
let start = bar_addr
.checked_add(u64::from(offset))
.ok_or(DriverError::InvalidParam("MSI-X BAR address overflow"))?;
let end = u64::from(offset)
.checked_add(len_u64)
.ok_or(DriverError::InvalidParam("MSI-X BAR range overflow"))?;
if end > bar_size {
return Err(DriverError::Irq(format!(
"MSI-X BAR window offset {:#x} len {:#x} exceeds BAR size {:#x}",
offset, len, bar_size
)));
}
Ok(start)
}
#[cfg(target_os = "redox")]
fn read_bsp_cpu_id() -> Result<u8> {
let mut fd = File::open("/scheme/irq/bsp")
.map_err(|err| DriverError::Irq(format!("failed to open /scheme/irq/bsp: {err}")))?;
let mut buf = [0u8; 8];
let bytes_read = fd.read(&mut buf)?;
let raw = match bytes_read {
8 => u64::from_le_bytes(buf),
4 => u32::from_le_bytes([buf[0], buf[1], buf[2], buf[3]]) as u64,
_ => {
return Err(DriverError::Irq(format!(
"unexpected /scheme/irq/bsp payload size {bytes_read}"
)))
}
};
u8::try_from(raw).map_err(|_| DriverError::Irq(format!("BSP CPU id {raw} does not fit in u8")))
}
#[cfg(target_os = "redox")]
fn allocate_irq_vector(cpu_id: u8) -> Result<(u32, File)> {
let dir = format!("/scheme/irq/cpu-{cpu_id:02x}");
let entries = std::fs::read_dir(&dir).map_err(|err| {
DriverError::Irq(format!("failed to enumerate IRQ vectors in {dir}: {err}"))
})?;
let mut candidates = Vec::new();
for entry in entries {
let entry = entry?;
let Some(name) = entry.file_name().to_str().map(str::to_owned) else {
continue;
};
let Ok(irq) = name.parse::<u32>() else {
continue;
};
candidates.push(irq);
}
candidates.sort_unstable();
for irq in candidates {
let path = format!("{dir}/{irq}");
match OpenOptions::new()
.read(true)
.write(true)
.create_new(true)
.open(&path)
{
Ok(fd) => return Ok((irq, fd)),
Err(err) if err.kind() == ErrorKind::AlreadyExists => continue,
Err(err) if err.kind() == ErrorKind::NotFound => continue,
Err(err) => {
return Err(DriverError::Irq(format!(
"failed to allocate MSI-X IRQ vector via {path}: {err}"
)))
}
}
}
Err(DriverError::Irq(format!(
"no free IRQ vectors available in {dir}"
)))
}
@@ -0,0 +1,84 @@
//! Safe Rust wrappers for Redox OS scheme-based hardware access.
//!
//! Provides abstractions for physical memory mapping, interrupt handling,
//! PCI device access, port I/O, DMA buffer management, and capability scanning.
//!
//! All hardware access goes through Redox's scheme system:
//! - `scheme:memory` for physical memory mapping and address translation
//! - `scheme:irq` for interrupt delivery
//! - `scheme:pci` for PCI device enumeration and configuration
//!
//! # Example
//!
//! ```no_run
//! use redox_driver_sys::pci::PciDevice;
//!
//! // Open a PCI device by location
//! let dev = PciDevice::open(0, 0x10, 0, 0)?;
//! let vendor = dev.vendor_id();
//! let bars = dev.parse_bars()?;
//! if let Some(bar) = bars[0].memory_info() {
//! let mmio = dev.map_bar(0, bar.addr, bar.size)?;
//! let reg = mmio.read32(0);
//! }
//! ```
pub mod dma;
pub mod io;
pub mod irq;
pub mod memory;
pub mod pci;
pub mod pcid_client;
use syscall as redox_syscall;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum DriverError {
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("system call error: {0}")]
Syscall(#[from] redox_syscall::error::Error),
#[error("invalid address: {0}")]
InvalidAddress(u64),
#[error("invalid parameter: {0}")]
InvalidParam(&'static str),
#[error("mapping failed for {phys:#x}+{size:#x}: {reason}")]
MappingFailed {
phys: u64,
size: usize,
reason: String,
},
#[error("device not found: {0}")]
DeviceNotFound(String),
#[error("firmware not found: {0}")]
FirmwareNotFound(String),
#[error("PCI error: {0}")]
Pci(String),
#[error("IRQ error: {0}")]
Irq(String),
#[error("capability not found: {0}")]
CapabilityNotFound(String),
#[error("{0}")]
Other(String),
}
pub type Result<T> = core::result::Result<T, DriverError>;
impl From<libredox::error::Error> for DriverError {
fn from(error: libredox::error::Error) -> Self {
// Preserve the raw errno rather than going through std::io::Error
// which discards the syscall-specific error code.
Self::Syscall(redox_syscall::error::Error::new(error.errno()))
}
}
@@ -0,0 +1,300 @@
use core::ptr;
use core::sync::atomic::{AtomicPtr, Ordering};
use redox_syscall::flag::{
MAP_SHARED, O_CLOEXEC, O_RDONLY, O_RDWR, O_WRONLY, PROT_READ, PROT_WRITE,
};
use redox_syscall::PAGE_SIZE;
use syscall as redox_syscall;
use crate::{DriverError, Result};
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum CacheType {
WriteBack,
Uncacheable,
WriteCombining,
DeviceMemory,
}
impl CacheType {
pub fn suffix(&self) -> &'static str {
match self {
Self::WriteBack => "wb",
Self::Uncacheable => "uc",
Self::WriteCombining => "wc",
Self::DeviceMemory => "dev",
}
}
}
bitflags::bitflags! {
#[derive(Debug, Clone, Copy)]
pub struct MmioProt: u8 {
const READ = 0b01;
const WRITE = 0b10;
const READ_WRITE = 0b11;
}
}
// SAFETY: The memory scheme root FD is cached for the process lifetime.
// This is valid because:
// 1. scheme:memory is a kernel-built-in scheme that never terminates.
// 2. The FD is opened with O_CLOEXEC — children after exec(2) do not inherit it.
// 3. This code MUST NOT be used in processes that fork() without exec() —
// the child would share the same FD table slot, risking double-close.
static MEMORY_ROOT_FD: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
fn ensure_memory_root() -> Result<libredox::Fd> {
let current = MEMORY_ROOT_FD.load(Ordering::Acquire);
if !current.is_null() {
let raw_fd = current as usize;
let dup_fd = libredox::call::dup(raw_fd, b"")
.map_err(|e| std::io::Error::from_raw_os_error(e.errno()))?;
return Ok(libredox::Fd::new(dup_fd));
}
let fd = libredox::Fd::open("/scheme/memory/scheme-root", O_CLOEXEC as i32, 0)?;
let raw = fd.raw();
match MEMORY_ROOT_FD.compare_exchange(
ptr::null_mut(),
raw as *mut (),
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
std::mem::forget(fd);
let dup_fd = libredox::call::dup(raw, b"")
.map_err(|e| std::io::Error::from_raw_os_error(e.errno()))?;
return Ok(libredox::Fd::new(dup_fd));
}
Err(existing) => {
let dup_fd = libredox::call::dup(existing as usize, b"")
.map_err(|e| std::io::Error::from_raw_os_error(e.errno()))?;
return Ok(libredox::Fd::new(dup_fd));
}
}
}
pub struct MmioRegion {
ptr: *mut u8,
size: usize,
}
impl MmioRegion {
pub fn map(phys_addr: u64, size: usize, cache: CacheType, prot: MmioProt) -> Result<Self> {
if phys_addr == 0 {
return Err(DriverError::InvalidAddress(phys_addr));
}
let aligned_size = size.next_multiple_of(PAGE_SIZE);
let path = format!("physical@{}", cache.suffix());
let mode = if prot.contains(MmioProt::READ | MmioProt::WRITE) {
O_RDWR
} else if prot.contains(MmioProt::WRITE) {
O_WRONLY
} else {
O_RDONLY
};
let mut mmap_prot = redox_syscall::MapFlags::empty();
if prot.contains(MmioProt::READ) {
mmap_prot |= PROT_READ;
}
if prot.contains(MmioProt::WRITE) {
mmap_prot |= PROT_WRITE;
}
let root_fd = ensure_memory_root()?;
let mem_fd = root_fd.openat(&path, (O_CLOEXEC | mode) as i32, 0)?;
let ptr = unsafe {
libredox::call::mmap(libredox::call::MmapArgs {
fd: mem_fd.raw(),
offset: phys_addr,
length: aligned_size,
flags: MAP_SHARED.bits() as u32,
prot: mmap_prot.bits() as u32,
addr: ptr::null_mut(),
})
}
.map_err(|e| DriverError::MappingFailed {
phys: phys_addr,
size,
reason: format!("{e:?}"),
})?;
Ok(Self {
ptr: ptr as *mut u8,
size: aligned_size,
})
}
#[inline]
pub fn read8(&self, offset: usize) -> u8 {
if offset.checked_add(1).map_or(true, |end| end > self.size) {
log::error!(
"MMIO read8 out of bounds: offset={:#x}, size={:#x}",
offset,
self.size
);
return 0;
}
unsafe { core::ptr::read_volatile(self.ptr.add(offset)) }
}
#[inline]
pub fn write8(&self, offset: usize, val: u8) {
if offset.checked_add(1).map_or(true, |end| end > self.size) {
log::error!(
"MMIO write8 out of bounds: offset={:#x}, size={:#x}",
offset,
self.size
);
return;
}
unsafe { core::ptr::write_volatile(self.ptr.add(offset), val) }
}
#[inline]
pub fn read16(&self, offset: usize) -> u16 {
if offset.checked_add(2).map_or(true, |end| end > self.size) {
log::error!(
"MMIO read16 out of bounds: offset={:#x}, size={:#x}",
offset,
self.size
);
return 0;
}
unsafe { core::ptr::read_volatile(self.ptr.add(offset) as *const u16) }
}
#[inline]
pub fn write16(&self, offset: usize, val: u16) {
if offset.checked_add(2).map_or(true, |end| end > self.size) {
log::error!(
"MMIO write16 out of bounds: offset={:#x}, size={:#x}",
offset,
self.size
);
return;
}
unsafe { core::ptr::write_volatile(self.ptr.add(offset) as *mut u16, val) }
}
#[inline]
pub fn read32(&self, offset: usize) -> u32 {
if offset.checked_add(4).map_or(true, |end| end > self.size) {
log::error!(
"MMIO read32 out of bounds: offset={:#x}, size={:#x}",
offset,
self.size
);
return 0;
}
unsafe { core::ptr::read_volatile(self.ptr.add(offset) as *const u32) }
}
#[inline]
pub fn write32(&self, offset: usize, val: u32) {
if offset.checked_add(4).map_or(true, |end| end > self.size) {
log::error!(
"MMIO write32 out of bounds: offset={:#x}, size={:#x}",
offset,
self.size
);
return;
}
unsafe { core::ptr::write_volatile(self.ptr.add(offset) as *mut u32, val) }
}
#[inline]
pub fn read64(&self, offset: usize) -> u64 {
if offset.checked_add(8).map_or(true, |end| end > self.size) {
log::error!(
"MMIO read64 out of bounds: offset={:#x}, size={:#x}",
offset,
self.size
);
return 0;
}
unsafe { core::ptr::read_volatile(self.ptr.add(offset) as *const u64) }
}
#[inline]
pub fn write64(&self, offset: usize, val: u64) {
if offset.checked_add(8).map_or(true, |end| end > self.size) {
log::error!(
"MMIO write64 out of bounds: offset={:#x}, size={:#x}",
offset,
self.size
);
return;
}
unsafe { core::ptr::write_volatile(self.ptr.add(offset) as *mut u64, val) }
}
pub fn read_bytes(&self, offset: usize, buf: &mut [u8]) {
if offset
.checked_add(buf.len())
.map_or(true, |end| end > self.size)
{
log::error!(
"MMIO read_bytes out of bounds: offset={:#x}, len={:#x}, size={:#x}",
offset,
buf.len(),
self.size
);
return;
}
// Volatile byte-by-byte read for MMIO correctness (compiler may
// optimise away or reorder copy_nonoverlapping).
for (i, byte) in buf.iter_mut().enumerate() {
*byte = unsafe { core::ptr::read_volatile(self.ptr.add(offset + i)) };
}
}
pub fn write_bytes(&self, offset: usize, buf: &[u8]) {
if offset
.checked_add(buf.len())
.map_or(true, |end| end > self.size)
{
log::error!(
"MMIO write_bytes out of bounds: offset={:#x}, len={:#x}, size={:#x}",
offset,
buf.len(),
self.size
);
return;
}
// Volatile byte-by-byte write for MMIO correctness.
for (i, byte) in buf.iter().enumerate() {
unsafe { core::ptr::write_volatile(self.ptr.add(offset + i), *byte) };
}
}
pub fn as_ptr(&self) -> *const u8 {
self.ptr
}
pub fn as_mut_ptr(&mut self) -> *mut u8 {
self.ptr
}
pub fn size(&self) -> usize {
self.size
}
}
impl Drop for MmioRegion {
fn drop(&mut self) {
if !self.ptr.is_null() {
let _ = unsafe { libredox::call::munmap(self.ptr as *mut (), self.size) };
}
}
}
unsafe impl Send for MmioRegion {}
unsafe impl Sync for MmioRegion {}
@@ -0,0 +1,680 @@
use std::io::{Read, Seek, SeekFrom, Write};
use crate::{DriverError, Result};
pub const PCI_VENDOR_ID_AMD: u16 = 0x1002;
pub const PCI_VENDOR_ID_INTEL: u16 = 0x8086;
pub const PCI_VENDOR_ID_NVIDIA: u16 = 0x10DE;
pub const PCI_CLASS_DISPLAY: u8 = 0x03;
pub const PCI_CLASS_DISPLAY_VGA: u8 = 0x00;
pub const PCI_CLASS_DISPLAY_3D: u8 = 0x02;
pub const PCI_HEADER_TYPE_NORMAL: u8 = 0x00;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct PciLocation {
pub segment: u16,
pub bus: u8,
pub device: u8,
pub function: u8,
}
impl PciLocation {
pub fn scheme_path(&self) -> String {
format!(
"/scheme/pci/{:04x}--{:02x}--{:02x}.{}",
self.segment, self.bus, self.device, self.function
)
}
pub fn bdf(&self) -> u32 {
((self.bus as u32) << 16)
| ((self.device as u32) & 0x1F) << 11
| ((self.function as u32) & 0x07) << 8
}
pub fn from_bdf(bdf: u32) -> Self {
PciLocation {
segment: 0,
bus: ((bdf >> 16) & 0xFF) as u8,
device: ((bdf >> 11) & 0x1F) as u8,
function: ((bdf >> 8) & 0x07) as u8,
}
}
}
impl std::fmt::Display for PciLocation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{:04x}:{:02x}:{:02x}.{}",
self.segment, self.bus, self.device, self.function
)
}
}
#[derive(Clone, Copy, Debug)]
pub struct PciBarInfo {
pub index: usize,
pub kind: PciBarKind,
pub addr: u64,
pub size: u64,
pub prefetchable: bool,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PciBarKind {
Memory32,
Memory64,
Io,
None,
}
impl PciBarInfo {
pub fn is_memory(&self) -> bool {
matches!(self.kind, PciBarKind::Memory32 | PciBarKind::Memory64)
}
pub fn is_io(&self) -> bool {
self.kind == PciBarKind::Io
}
pub fn memory_info(&self) -> Option<(u64, usize)> {
if self.is_memory() && self.addr != 0 && self.size != 0 {
Some((self.addr, self.size as usize))
} else {
None
}
}
pub fn io_port(&self) -> Option<u16> {
if self.is_io() && self.addr != 0 {
Some(self.addr as u16)
} else {
None
}
}
}
pub const PCI_CMD_IO_SPACE: u16 = 0x0001;
pub const PCI_CMD_MEMORY_SPACE: u16 = 0x0002;
pub const PCI_CMD_BUS_MASTER: u16 = 0x0004;
pub const PCI_CMD_MEM_WRITE_INVALIDATE: u16 = 0x0010;
pub const PCI_CMD_PARITY_ERROR_RESPONSE: u16 = 0x0040;
pub const PCI_CMD_SERR_ENABLE: u16 = 0x0100;
pub const PCI_CMD_INTX_DISABLE: u16 = 0x0400;
#[derive(Clone, Debug)]
pub struct PciCapability {
pub id: u8,
pub offset: u8,
pub vendor_cap_id: Option<u8>,
}
pub const PCI_CAP_ID_MSI: u8 = 0x05;
pub const PCI_CAP_ID_MSIX: u8 = 0x11;
pub const PCI_CAP_ID_PCIE: u8 = 0x10;
pub const PCI_CAP_ID_POWER: u8 = 0x01;
pub const PCI_CAP_ID_VNDR: u8 = 0x09;
#[derive(Clone, Debug)]
pub struct MsixCapability {
pub table_bar: u8,
pub table_offset: u32,
pub pba_bar: u8,
pub pba_offset: u32,
pub table_size: u16,
pub masked: bool,
}
#[derive(Clone, Debug)]
pub struct PciDeviceInfo {
pub location: PciLocation,
pub vendor_id: u16,
pub device_id: u16,
pub revision: u8,
pub class_code: u8,
pub subclass: u8,
pub prog_if: u8,
pub header_type: u8,
pub irq: Option<u32>,
pub bars: Vec<PciBarInfo>,
pub capabilities: Vec<PciCapability>,
}
impl PciDeviceInfo {
pub fn is_gpu(&self) -> bool {
self.class_code == PCI_CLASS_DISPLAY
}
pub fn is_amd_gpu(&self) -> bool {
self.class_code == PCI_CLASS_DISPLAY && self.vendor_id == PCI_VENDOR_ID_AMD
}
pub fn is_intel_gpu(&self) -> bool {
self.class_code == PCI_CLASS_DISPLAY && self.vendor_id == PCI_VENDOR_ID_INTEL
}
pub fn find_capability(&self, id: u8) -> Option<&PciCapability> {
self.capabilities.iter().find(|c| c.id == id)
}
pub fn find_msix(&self) -> Option<MsixCapability> {
self.find_capability(PCI_CAP_ID_MSIX).and_then(|cap| {
let mut dev = PciDevice::from_info(self).ok()?;
dev.parse_msix(cap.offset).ok()
})
}
pub fn find_memory_bar(&self, index: usize) -> Option<&PciBarInfo> {
self.bars.iter().find(|b| b.index == index && b.is_memory())
}
}
pub struct PciDevice {
location: PciLocation,
config_fd: std::fs::File,
}
impl PciDevice {
pub fn open(segment: u16, bus: u8, device: u8, function: u8) -> Result<Self> {
let loc = PciLocation {
segment,
bus,
device,
function,
};
Self::open_location(&loc)
}
pub fn open_location(loc: &PciLocation) -> Result<Self> {
let config_path = format!("{}/config", loc.scheme_path());
let fd = std::fs::OpenOptions::new()
.read(true)
.write(true)
.open(&config_path)
.map_err(|e| {
DriverError::Pci(format!("cannot open PCI config at {}: {}", config_path, e))
})?;
Ok(PciDevice {
location: *loc,
config_fd: fd,
})
}
pub fn from_info(info: &PciDeviceInfo) -> Result<Self> {
Self::open_location(&info.location)
}
pub fn location(&self) -> &PciLocation {
&self.location
}
pub fn read_config_dword(&mut self, offset: u64) -> Result<u32> {
self.config_fd.seek(SeekFrom::Start(offset))?;
let mut buf = [0u8; 4];
self.config_fd.read_exact(&mut buf)?;
Ok(u32::from_le_bytes(buf))
}
pub fn read_config_word(&mut self, offset: u64) -> Result<u16> {
self.config_fd.seek(SeekFrom::Start(offset))?;
let mut buf = [0u8; 2];
self.config_fd.read_exact(&mut buf)?;
Ok(u16::from_le_bytes(buf))
}
pub fn read_config_byte(&mut self, offset: u64) -> Result<u8> {
self.config_fd.seek(SeekFrom::Start(offset))?;
let mut buf = [0u8; 1];
self.config_fd.read_exact(&mut buf)?;
Ok(buf[0])
}
pub fn write_config_dword(&mut self, offset: u64, val: u32) -> Result<()> {
self.config_fd.seek(SeekFrom::Start(offset))?;
self.config_fd.write_all(&val.to_le_bytes())?;
Ok(())
}
pub fn write_config_word(&mut self, offset: u64, val: u16) -> Result<()> {
self.config_fd.seek(SeekFrom::Start(offset))?;
self.config_fd.write_all(&val.to_le_bytes())?;
Ok(())
}
pub fn write_config_byte(&mut self, offset: u64, val: u8) -> Result<()> {
self.config_fd.seek(SeekFrom::Start(offset))?;
self.config_fd.write_all(&[val])?;
Ok(())
}
pub fn vendor_id(&mut self) -> Result<u16> {
self.read_config_word(0x00)
}
pub fn device_id(&mut self) -> Result<u16> {
self.read_config_word(0x02)
}
pub fn command(&mut self) -> Result<u16> {
self.read_config_word(0x04)
}
pub fn set_command(&mut self, flags: u16) -> Result<()> {
self.write_config_word(0x04, flags)
}
pub fn enable_device(&mut self) -> Result<()> {
let mut cmd = self.command()?;
cmd |= PCI_CMD_IO_SPACE | PCI_CMD_MEMORY_SPACE | PCI_CMD_BUS_MASTER;
self.set_command(cmd)
}
pub fn set_bus_master(&mut self, enable: bool) -> Result<()> {
let mut cmd = self.command()?;
if enable {
cmd |= 0x0004;
} else {
cmd &= !0x0004;
}
self.set_command(cmd)
}
pub fn set_intx_disable(&mut self, disable: bool) -> Result<()> {
let mut cmd = self.command()?;
if disable {
cmd |= 0x0400;
} else {
cmd &= !0x0400;
}
self.set_command(cmd)
}
pub fn status(&mut self) -> Result<u16> {
self.read_config_word(0x06)
}
pub fn revision(&mut self) -> Result<u8> {
self.read_config_byte(0x08)
}
pub fn class_code(&mut self) -> Result<u8> {
self.read_config_byte(0x0B)
}
pub fn subclass(&mut self) -> Result<u8> {
self.read_config_byte(0x0A)
}
pub fn prog_if(&mut self) -> Result<u8> {
self.read_config_byte(0x09)
}
pub fn header_type(&mut self) -> Result<u8> {
let ht = self.read_config_byte(0x0E)?;
Ok(ht & 0x7F)
}
pub fn is_multi_function(&mut self) -> Result<bool> {
let ht = self.read_config_byte(0x0E)?;
Ok(ht & 0x80 != 0)
}
pub fn irq_line(&mut self) -> Result<u8> {
self.read_config_byte(0x3C)
}
pub fn irq_pin(&mut self) -> Result<u8> {
self.read_config_byte(0x3D)
}
pub fn full_info(&mut self) -> Result<PciDeviceInfo> {
let vendor_id = self.vendor_id()?;
let device_id = self.device_id()?;
let revision = self.revision()?;
let prog_if = self.prog_if()?;
let subclass = self.subclass()?;
let class_code = self.class_code()?;
let header_type = self.header_type()?;
let irq_byte = self.irq_line()?;
let bars = if header_type == PCI_HEADER_TYPE_NORMAL {
self.parse_bars()?
} else {
Vec::new()
};
let capabilities = if header_type == PCI_HEADER_TYPE_NORMAL {
self.parse_capabilities()?
} else {
Vec::new()
};
Ok(PciDeviceInfo {
location: self.location,
vendor_id,
device_id,
revision,
class_code,
subclass,
prog_if,
header_type,
irq: if irq_byte != 0 && irq_byte != 0xFF {
Some(irq_byte as u32)
} else {
None
},
bars,
capabilities,
})
}
pub fn parse_bars(&mut self) -> Result<Vec<PciBarInfo>> {
let mut bars = Vec::with_capacity(6);
let mut bar_idx = 0usize;
let mut config_offset = 0x10u64;
while bar_idx < 6 && config_offset <= 0x24 {
let val_lo = self.read_config_dword(config_offset)?;
if val_lo == 0 {
bars.push(PciBarInfo {
index: bar_idx,
kind: PciBarKind::None,
addr: 0,
size: 0,
prefetchable: false,
});
bar_idx += 1;
config_offset += 4;
continue;
}
let is_io = (val_lo & 0x01) != 0;
if is_io {
let addr = (val_lo & 0xFFFFFFFC) as u64;
let size = self.probe_bar_size(config_offset)?;
bars.push(PciBarInfo {
index: bar_idx,
kind: PciBarKind::Io,
addr,
size,
prefetchable: false,
});
bar_idx += 1;
config_offset += 4;
} else {
let is_64bit = ((val_lo >> 2) & 0x01) != 0;
let prefetchable = ((val_lo >> 3) & 0x01) != 0;
let addr_lo = (val_lo & 0xFFFFFFF0) as u64;
let (addr, size) = if is_64bit {
let val_hi = self.read_config_dword(config_offset + 4)?;
let full_addr = addr_lo | ((val_hi as u64) << 32);
let full_size = self.probe_bar64_size(config_offset)?;
bars.push(PciBarInfo {
index: bar_idx,
kind: PciBarKind::Memory64,
addr: full_addr,
size: full_size,
prefetchable,
});
bar_idx += 2;
config_offset += 8;
continue;
} else {
let sz = self.probe_bar_size(config_offset)?;
(addr_lo, sz)
};
bars.push(PciBarInfo {
index: bar_idx,
kind: PciBarKind::Memory32,
addr,
size,
prefetchable,
});
bar_idx += 1;
config_offset += 4;
}
}
Ok(bars)
}
fn probe_bar_size(&mut self, offset: u64) -> Result<u64> {
let original = self.read_config_dword(offset)?;
self.write_config_dword(offset, 0xFFFFFFFF)?;
let inverted = self.read_config_dword(offset)?;
self.write_config_dword(offset, original)?;
let is_io = (original & 0x01) != 0;
let mask = if is_io { 0xFFFFFFFC } else { 0xFFFFFFF0 };
let size_val = !(inverted & mask) & mask;
if size_val == 0 {
return Ok(0);
}
Ok(size_val as u64)
}
fn probe_bar64_size(&mut self, offset: u64) -> Result<u64> {
let original_lo = self.read_config_dword(offset)?;
let original_hi = self.read_config_dword(offset + 4)?;
self.write_config_dword(offset, 0xFFFFFFFF)?;
self.write_config_dword(offset + 4, 0xFFFFFFFF)?;
let inverted_lo = self.read_config_dword(offset)?;
let inverted_hi = self.read_config_dword(offset + 4)?;
self.write_config_dword(offset, original_lo)?;
self.write_config_dword(offset + 4, original_hi)?;
let lo = !(inverted_lo & 0xFFFFFFF0) & 0xFFFFFFF0;
let hi = !inverted_hi;
if lo == 0 && hi == 0 {
return Ok(0);
}
let size = ((hi as u64) << 32) | (lo as u64);
Ok(size)
}
pub fn parse_capabilities(&mut self) -> Result<Vec<PciCapability>> {
let status = self.status()?;
if status & 0x0010 == 0 {
return Ok(Vec::new());
}
let mut caps = Vec::new();
let mut cap_ptr = self.read_config_byte(0x34)? as u64;
let mut visited = 0u8;
while cap_ptr >= 0x40 && visited < 48 {
let cap_id = self.read_config_byte(cap_ptr)?;
let next_ptr = self.read_config_byte(cap_ptr + 1)? as u64;
if cap_id == 0 {
break;
}
let vendor_cap_id = if cap_id == PCI_CAP_ID_VNDR {
self.read_config_byte(cap_ptr + 2).ok()
} else {
None
};
caps.push(PciCapability {
id: cap_id,
offset: cap_ptr as u8,
vendor_cap_id,
});
if next_ptr == 0 || next_ptr <= cap_ptr {
break;
}
cap_ptr = next_ptr;
visited += 1;
}
Ok(caps)
}
pub fn parse_msix(&mut self, cap_offset: u8) -> Result<MsixCapability> {
let msg_ctrl = self.read_config_word(cap_offset as u64 + 2)?;
let table_raw = self.read_config_dword(cap_offset as u64 + 4)?;
let pba_raw = self.read_config_dword(cap_offset as u64 + 8)?;
let table_bar = (table_raw & 0x07) as u8;
let table_offset = table_raw & 0xFFFFFFF8;
let pba_bar = (pba_raw & 0x07) as u8;
let pba_offset = pba_raw & 0xFFFFFFF8;
let table_size = (msg_ctrl & 0x07FF) + 1;
let masked = (msg_ctrl & 0x8000) != 0;
Ok(MsixCapability {
table_bar,
table_offset,
pba_bar,
pba_offset,
table_size,
masked,
})
}
pub fn enable_msix(&mut self, cap_offset: u8) -> Result<()> {
let msg_ctrl = self.read_config_word(cap_offset as u64 + 2)?;
let new_ctrl = msg_ctrl | 0x8000;
self.write_config_word(cap_offset as u64 + 2, new_ctrl)?;
Ok(())
}
pub fn disable_msix(&mut self, cap_offset: u8) -> Result<()> {
let msg_ctrl = self.read_config_word(cap_offset as u64 + 2)?;
let new_ctrl = msg_ctrl & !0x8000;
self.write_config_word(cap_offset as u64 + 2, new_ctrl)?;
Ok(())
}
pub fn map_bar(
&mut self,
_bar_index: usize,
phys_addr: u64,
size: usize,
) -> Result<crate::memory::MmioRegion> {
crate::memory::MmioRegion::map(
phys_addr,
size,
crate::memory::CacheType::DeviceMemory,
crate::memory::MmioProt::READ_WRITE,
)
}
}
impl std::io::Write for PciDevice {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.config_fd.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.config_fd.flush()
}
}
pub fn enumerate_pci_class(class: u8) -> Result<Vec<PciDeviceInfo>> {
let entries = std::fs::read_dir("/scheme/pci")?;
let mut devices = Vec::new();
for entry in entries {
let entry = entry?;
let name = entry.file_name();
let name_str = match name.to_str() {
Some(s) => s,
None => continue,
};
// pcid scheme entries use format: segment--bus--device.function
let location = match parse_scheme_entry(name_str) {
Some(loc) => loc,
None => continue,
};
let config_path = format!("{}/config", location.scheme_path());
if let Ok(data) = std::fs::read(&config_path) {
if data.len() < 64 {
continue;
}
let class_code = data[0x0b];
if class_code != class {
continue;
}
let vendor_id = u16::from_le_bytes([data[0x00], data[0x01]]);
let device_id = u16::from_le_bytes([data[0x02], data[0x03]]);
let subclass = data[0x0a];
let prog_if = data[0x09];
let revision = data[0x08];
let header_type = data[0x0e] & 0x7F;
let irq_line = data[0x3c];
devices.push(PciDeviceInfo {
location,
vendor_id,
device_id,
revision,
class_code,
subclass,
prog_if,
header_type,
irq: if irq_line != 0 && irq_line != 0xff {
Some(irq_line as u32)
} else {
None
},
bars: Vec::new(),
capabilities: Vec::new(),
});
}
}
log::debug!(
"PCI enumeration for class {class:#04x}: found {} devices",
devices.len()
);
Ok(devices)
}
fn parse_scheme_entry(name: &str) -> Option<PciLocation> {
let parts: Vec<&str> = name.splitn(3, "--").collect();
if parts.len() != 3 {
return None;
}
let segment = u16::from_str_radix(parts[0], 16).ok()?;
let bus = u8::from_str_radix(parts[1], 16).ok()?;
let dev_func: Vec<&str> = parts[2].splitn(2, '.').collect();
if dev_func.len() != 2 {
return None;
}
let device = u8::from_str_radix(dev_func[0], 16).ok()?;
let function = u8::from_str_radix(dev_func[1], 16).ok()?;
Some(PciLocation {
segment,
bus,
device,
function,
})
}
pub fn find_amd_gpus() -> Result<Vec<PciDeviceInfo>> {
let mut all = enumerate_pci_class(PCI_CLASS_DISPLAY)?;
all.retain(|d| d.is_amd_gpu());
Ok(all)
}
pub fn find_intel_gpus() -> Result<Vec<PciDeviceInfo>> {
let mut all = enumerate_pci_class(PCI_CLASS_DISPLAY)?;
all.retain(|d| d.is_intel_gpu());
Ok(all)
}
@@ -0,0 +1,135 @@
use std::fs::File;
use std::io::{Read, Write};
use std::os::fd::{FromRawFd, IntoRawFd, RawFd};
use std::path::Path;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct PciFunction {
pub segment: u16,
pub bus: u8,
pub device: u8,
pub function: u8,
pub irq: Option<u8>,
}
#[derive(Debug, Serialize, Deserialize)]
pub enum PcidClientRequest {
EnableDevice,
RequestConfig,
ReadConfig(u16),
WriteConfig(u16, u32),
}
#[derive(Debug, Serialize, Deserialize)]
pub enum PcidClientResponse {
EnabledDevice,
Config(PciFunction),
ReadConfig(u32),
WriteConfig,
Error(String),
}
pub struct PcidClient {
channel: File,
}
impl PcidClient {
pub fn connect_default() -> Option<Self> {
let fd_str = std::env::var("PCID_CLIENT_CHANNEL").ok()?;
let fd: RawFd = fd_str.parse().ok()?;
Some(Self::connect_common(fd))
}
pub fn connect_by_path(device_path: &Path) -> Result<Self, std::io::Error> {
let channel_path = device_path.join("channel");
let fd = libredox::call::open(
channel_path.to_str().ok_or_else(|| {
std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid path")
})?,
libredox::flag::O_RDWR,
0,
)
.map_err(|e| std::io::Error::from_raw_os_error(e.errno()))?;
Ok(Self::connect_common(fd as RawFd))
}
fn connect_common(channel_fd: RawFd) -> Self {
let channel = unsafe { File::from_raw_fd(channel_fd) };
Self { channel }
}
fn send<T: Serialize>(&mut self, msg: &T) -> Result<(), std::io::Error> {
let data = bincode::serialize(msg)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
let len = data.len() as u64;
self.channel.write_all(&len.to_le_bytes())?;
self.channel.write_all(&data)?;
Ok(())
}
fn recv<T: DeserializeOwned>(&mut self) -> Result<T, std::io::Error> {
let mut len_buf = [0u8; 8];
self.channel.read_exact(&mut len_buf)?;
let len = u64::from_le_bytes(len_buf) as usize;
if len > 0x100_000 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"response too large",
));
}
let mut data = vec![0u8; len];
self.channel.read_exact(&mut data)?;
bincode::deserialize_from(&data[..])
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
}
pub fn request_config(&mut self) -> Result<PciFunction, std::io::Error> {
self.send(&PcidClientRequest::RequestConfig)?;
match self.recv()? {
PcidClientResponse::Config(func) => Ok(func),
other => Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("unexpected response: {other:?}"),
)),
}
}
pub fn enable_device(&mut self) -> Result<(), std::io::Error> {
self.send(&PcidClientRequest::EnableDevice)?;
match self.recv()? {
PcidClientResponse::EnabledDevice => Ok(()),
other => Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("unexpected response: {other:?}"),
)),
}
}
pub fn read_config(&mut self, offset: u16) -> Result<u32, std::io::Error> {
self.send(&PcidClientRequest::ReadConfig(offset))?;
match self.recv()? {
PcidClientResponse::ReadConfig(val) => Ok(val),
other => Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("unexpected response: {other:?}"),
)),
}
}
pub fn write_config(&mut self, offset: u16, value: u32) -> Result<(), std::io::Error> {
self.send(&PcidClientRequest::WriteConfig(offset, value))?;
match self.recv()? {
PcidClientResponse::WriteConfig => Ok(()),
other => Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("unexpected response: {other:?}"),
)),
}
}
pub fn into_raw_fd(self) -> RawFd {
self.channel.into_raw_fd()
}
}