feat: build system transition to release fork + archive hardening

Release fork infrastructure:
- REDBEAR_RELEASE=0.1.1 with offline enforcement (fetch/distclean/unfetch blocked)
- 195 BLAKE3-verified source archives in standard format
- Atomic provisioning via provision-release.sh (staging + .complete sentry)
- 5-phase improvement plan: restore format auto-detection, source tree
  validation (validate-source-trees.py), archive-map.json, REPO_BINARY fallback

Archive normalization:
- Removed 87 duplicate/unversioned archives from shared pool
- Regenerated all archives in consistent format with source/ + recipe.toml
- BLAKE3SUMS and manifest.json generated from stable tarball set

Patch management:
- verify-patches.sh: pre-sync dry-run report (OK/REVERSED/CONFLICT)
- 121 upstream-absorbed patches moved to absorbed/ directories
- 43 active patches verified clean against rebased sources
- Stress test: base updated to upstream HEAD, relibc reset and patched

Compilation fixes:
- relibc: Vec imports in redox-rt (proc.rs, lib.rs, sys.rs)
- relibc: unsafe from_raw_parts in mod.rs (2024 edition)
- fetch.rs: rev comparison handles short/full hash prefixes
- kibi recipe: corrected rev mismatch

New scripts: restore-sources.sh, provision-release.sh, verify-sources-archived.sh,
check-upstream-releases.sh, validate-source-trees.py, verify-patches.sh,
repair-archive-format.sh, generate-manifest.py

Documentation: AGENTS.md, README.md, local/AGENTS.md updated for release fork model
This commit is contained in:
2026-05-02 01:41:17 +01:00
parent f55acba68c
commit 5851974b20
242 changed files with 29015 additions and 1818 deletions
@@ -0,0 +1,55 @@
From: Red Bear OS
Date: 2026-04-28
Subject: daemon: handle missing INIT_NOTIFY gracefully instead of panicking
The Daemon::new() and Daemon::ready() functions in the daemon library
called unwrap() on the INIT_NOTIFY environment variable and the ready
pipe write, causing a hard panic when a daemon is started outside the
init system's notification pipe mechanism.
Replace unwrap() with graceful error handling:
- get_fd() returns -1 if the env var is missing or invalid, logging
a warning via eprintln
- ready() logs a warning on write failure instead of panicking
diff --git a/daemon/src/lib.rs b/daemon/src/lib.rs
index 9f507221..a0ba9d88 100644
--- a/daemon/src/lib.rs
+++ b/daemon/src/lib.rs
@@ -11,12 +11,23 @@ use redox_scheme::Socket;
use redox_scheme::scheme::{SchemeAsync, SchemeSync};
unsafe fn get_fd(var: &str) -> RawFd {
- let fd: RawFd = std::env::var(var).unwrap().parse().unwrap();
+ let fd: RawFd = match std::env::var(var)
+ .map_err(|e| eprintln!("daemon: env var {var} not set: {e}"))
+ .ok()
+ .and_then(|val| {
+ val.parse()
+ .map_err(|e| eprintln!("daemon: failed to parse {var} as fd: {e}"))
+ .ok()
+ }) {
+ Some(fd) => fd,
+ None => return -1,
+ };
if unsafe { libc::fcntl(fd, libc::F_SETFD, libc::FD_CLOEXEC) } == -1 {
- panic!(
+ eprintln!(
"daemon: failed to set CLOEXEC flag for {var} fd: {}",
io::Error::last_os_error()
);
+ return -1;
}
fd
}
@@ -50,7 +61,9 @@ impl Daemon {
/// Notify the process that the daemon is ready to accept requests.
pub fn ready(mut self) {
- self.write_pipe.write_all(&[0]).unwrap();
+ if let Err(err) = self.write_pipe.write_all(&[0]) {
+ eprintln!("daemon::ready write failed: {err}");
+ }
}
/// Executes `Command` as a child process.
@@ -0,0 +1,61 @@
diff --git a/drivers/pcid/src/scheme.rs b/drivers/pcid/src/scheme.rs
index ce55b33f..c06bdec4 100644
--- a/drivers/pcid/src/scheme.rs
+++ b/drivers/pcid/src/scheme.rs
@@ -21,6 +21,10 @@ enum Handle {
Access,
Device,
Channel { addr: PciAddress, st: ChannelState },
+ // Uevent surface for hotplug consumers. Opening uevent returns an object
+ // from which device add/remove events can be read. Since pcid currently
+ // only scans at startup, this surface is ready for hotplug polling consumers.
+ Uevent,
SchemeRoot,
/// Represents an open handle to a device's bind endpoint
Bind { addr: PciAddress },
@@ -34,6 +38,6 @@ struct HandleWrapper {
}
fn is_file(&self) -> bool {
- matches!(self, Self::Access | Self::Channel { .. } | Self::Bind { .. })
+ matches!(self, Self::Access | Self::Channel { .. } | Self::Bind { .. } | Self::Uevent)
}
fn is_dir(&self) -> bool {
!self.is_file()
@@ -96,6 +100,8 @@ impl SchemeSync for PciScheme {
}
} else if path == "access" {
Handle::Access
+ } else if path == "uevent" {
+ Handle::Uevent
} else {
let idx = path.find('/').unwrap_or(path.len());
let (addr_str, after) = path.split_at(idx);
@@ -140,5 +146,6 @@ impl SchemeSync for PciScheme {
Handle::Device => (DEVICE_CONTENTS.len(), MODE_DIR | 0o755),
Handle::Access | Handle::Channel { .. } | Handle::Bind { .. } => (0, MODE_CHR | 0o600),
+ Handle::Uevent => (0, MODE_CHR | 0o644),
Handle::SchemeRoot => return Err(Error::new(EBADF)),
};
stat.st_size = len as u64;
@@ -164,7 +171,13 @@ impl SchemeSync for PciScheme {
Handle::Channel {
addr: _,
ref mut st,
} => Self::read_channel(st, buf),
+ Handle::Uevent => {
+ // Uevent surface is ready for hotplug polling consumers.
+ // pcid currently only scans at startup, so return empty (EAGAIN would indicate no data available).
+ // Consumers can poll and re-read to check for new events.
+ Ok(0)
+ }
Handle::SchemeRoot | Handle::Bind { .. } => Err(Error::new(EBADF)),
_ => Err(Error::new(EBADF)),
}
@@ -199,6 +212,6 @@ impl SchemeSync for PciScheme {
}
Handle::Device => DEVICE_CONTENTS,
- Handle::Access | Handle::Channel { .. } | Handle::Bind { .. } => return Err(Error::new(ENOTDIR)),
+ Handle::Access | Handle::Channel { .. } | Handle::Bind { .. } | Handle::Uevent => return Err(Error::new(ENOTDIR)),
Handle::SchemeRoot => return Err(Error::new(EBADF)),
};
for (i, dent_name) in entries.iter().enumerate().skip(offset) {
@@ -0,0 +1,20 @@
diff --git a/drivers/usb/xhcid/src/xhci/mod.rs b/drivers/usb/xhcid/src/xhci/mod.rs
index f1c6d08e..a3f2e15c 100644
--- a/drivers/usb/xhcid/src/xhci/mod.rs
+++ b/drivers/usb/xhcid/src/xhci/mod.rs
@@ -904,6 +904,7 @@ impl<const N: usize> Xhci<N> {
match self.spawn_drivers(port_id) {
Ok(()) => {
info!("xhcid: uevent add device usb/{}", port_id.root_hub_port_num());
+ // NOTE: driver-manager hotplug loop detects new USB devices via this log
}
Err(err) => {
error!("Failed to spawn driver for port {}: `{}`", port_id, err)
@@ -974,6 +975,7 @@ impl<const N: usize> Xhci<N> {
info!("xhcid: uevent remove device usb/{}", port_id.root_hub_port_num());
result
} else {
+ // NOTE: driver-manager hotplug loop detects USB device removal via this log
debug!(
"Attempted to detach from port {}, which wasn't previously attached.",
port_id
@@ -0,0 +1,287 @@
# P2-ac97d-ihdad-main.patch
#
# Audio daemon main entry points: AC97 and Intel HDA driver initialization,
# error handling, and BAR access improvements.
#
# Covers:
# - ac97d/src/main.rs: BAR access, error handling, codec initialization
# - ihdad/src/main.rs: error handling, device initialization
#
diff --git a/drivers/audio/ac97d/src/main.rs b/drivers/audio/ac97d/src/main.rs
index ffa8a94b..e4dbf930 100644
--- a/drivers/audio/ac97d/src/main.rs
+++ b/drivers/audio/ac97d/src/main.rs
@@ -3,6 +3,7 @@ use std::os::unix::io::AsRawFd;
use std::usize;
use event::{user_data, EventQueue};
+use log::error;
use pcid_interface::PciFunctionHandle;
use redox_scheme::scheme::register_sync_scheme;
use redox_scheme::Socket;
@@ -22,13 +23,28 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! {
let mut name = pci_config.func.name();
name.push_str("_ac97");
- let bar0 = pci_config.func.bars[0].expect_port();
- let bar1 = pci_config.func.bars[1].expect_port();
+ let bar0 = match pci_config.func.bars[0].try_port() {
+ Ok(port) => port,
+ Err(err) => {
+ error!("ac97d: invalid BAR0: {err}");
+ std::process::exit(1);
+ }
+ };
+ let bar1 = match pci_config.func.bars[1].try_port() {
+ Ok(port) => port,
+ Err(err) => {
+ error!("ac97d: invalid BAR1: {err}");
+ std::process::exit(1);
+ }
+ };
let irq = pci_config
.func
.legacy_interrupt_line
- .expect("ac97d: no legacy interrupts supported");
+ .unwrap_or_else(|| {
+ error!("ac97d: no legacy interrupts supported");
+ std::process::exit(1);
+ });
println!(" + ac97 {}", pci_config.func.display());
@@ -40,13 +56,35 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! {
common::file_level(),
);
- common::acquire_port_io_rights().expect("ac97d: failed to set I/O privilege level to Ring 3");
+ if let Err(err) = common::acquire_port_io_rights() {
+ error!("ac97d: failed to set I/O privilege level to Ring 3: {err}");
+ std::process::exit(1);
+ }
- let mut irq_file = irq.irq_handle("ac97d");
+ let mut irq_file = match irq.try_irq_handle("ac97d") {
+ Ok(file) => file,
+ Err(err) => {
+ error!("ac97d: failed to open IRQ handle: {err}");
+ std::process::exit(1);
+ }
+ };
- let socket = Socket::nonblock().expect("ac97d: failed to create socket");
- let mut device =
- unsafe { device::Ac97::new(bar0, bar1).expect("ac97d: failed to allocate device") };
+ let socket = match Socket::nonblock() {
+ Ok(socket) => socket,
+ Err(err) => {
+ error!("ac97d: failed to create socket: {err}");
+ std::process::exit(1);
+ }
+ };
+ let mut device = unsafe {
+ match device::Ac97::new(bar0, bar1) {
+ Ok(device) => device,
+ Err(err) => {
+ error!("ac97d: failed to allocate device: {err}");
+ std::process::exit(1);
+ }
+ }
+ };
let mut readiness_based = ReadinessBased::new(&socket, 16);
user_data! {
@@ -56,49 +94,81 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! {
}
}
- let event_queue = EventQueue::<Source>::new().expect("ac97d: Could not create event queue.");
+ let event_queue = match EventQueue::<Source>::new() {
+ Ok(queue) => queue,
+ Err(err) => {
+ error!("ac97d: could not create event queue: {err}");
+ std::process::exit(1);
+ }
+ };
event_queue
.subscribe(
irq_file.as_raw_fd() as usize,
Source::Irq,
event::EventFlags::READ,
)
- .unwrap();
+ .unwrap_or_else(|err| {
+ error!("ac97d: failed to subscribe IRQ fd: {err}");
+ std::process::exit(1);
+ });
event_queue
.subscribe(
socket.inner().raw(),
Source::Scheme,
event::EventFlags::READ,
)
- .unwrap();
-
- register_sync_scheme(&socket, "audiohw", &mut device)
- .expect("ac97d: failed to register audiohw scheme to namespace");
+ .unwrap_or_else(|err| {
+ error!("ac97d: failed to subscribe scheme fd: {err}");
+ std::process::exit(1);
+ });
+
+ register_sync_scheme(&socket, "audiohw", &mut device).unwrap_or_else(|err| {
+ error!("ac97d: failed to register audiohw scheme to namespace: {err}");
+ std::process::exit(1);
+ });
daemon.ready();
- libredox::call::setrens(0, 0).expect("ac97d: failed to enter null namespace");
+ if let Err(err) = libredox::call::setrens(0, 0) {
+ error!("ac97d: failed to enter null namespace: {err}");
+ std::process::exit(1);
+ }
let all = [Source::Irq, Source::Scheme];
- for event in all
- .into_iter()
- .chain(event_queue.map(|e| e.expect("ac97d: failed to get next event").user_data))
- {
+ for event in all.into_iter().chain(event_queue.map(|e| match e {
+ Ok(event) => event.user_data,
+ Err(err) => {
+ error!("ac97d: failed to get next event: {err}");
+ std::process::exit(1);
+ }
+ })) {
match event {
Source::Irq => {
let mut irq = [0; 8];
- irq_file.read(&mut irq).unwrap();
+ if let Err(err) = irq_file.read(&mut irq) {
+ error!("ac97d: failed to read IRQ file: {err}");
+ std::process::exit(1);
+ }
if !device.irq() {
continue;
}
- irq_file.write(&mut irq).unwrap();
+ if let Err(err) = irq_file.write(&mut irq) {
+ error!("ac97d: failed to acknowledge IRQ: {err}");
+ std::process::exit(1);
+ }
readiness_based
.poll_all_requests(&mut device)
- .expect("ac97d: failed to poll requests");
+ .unwrap_or_else(|err| {
+ error!("ac97d: failed to poll requests: {err}");
+ std::process::exit(1);
+ });
readiness_based
.write_responses()
- .expect("ac97d: failed to write to socket");
+ .unwrap_or_else(|err| {
+ error!("ac97d: failed to write to socket: {err}");
+ std::process::exit(1);
+ });
/*
let next_read = device_irq.next_read();
@@ -110,10 +180,16 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! {
Source::Scheme => {
readiness_based
.read_and_process_requests(&mut device)
- .expect("ac97d: failed to read from socket");
+ .unwrap_or_else(|err| {
+ error!("ac97d: failed to read from socket: {err}");
+ std::process::exit(1);
+ });
readiness_based
.write_responses()
- .expect("ac97d: failed to write to socket");
+ .unwrap_or_else(|err| {
+ error!("ac97d: failed to write to socket: {err}");
+ std::process::exit(1);
+ });
/*
let next_read = device.borrow().next_read();
@@ -125,8 +201,8 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! {
}
}
- std::process::exit(0);
+ std::process::exit(1);
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
diff --git a/drivers/audio/ihdad/src/main.rs b/drivers/audio/ihdad/src/main.rs
index 31a2add7..11d80133 100755
--- a/drivers/audio/ihdad/src/main.rs
+++ b/drivers/audio/ihdad/src/main.rs
@@ -6,7 +6,7 @@ use std::os::unix::io::AsRawFd;
use std::usize;
use event::{user_data, EventQueue};
-use pcid_interface::irq_helpers::pci_allocate_interrupt_vector;
+use pcid_interface::irq_helpers::try_pci_allocate_interrupt_vector;
use pcid_interface::PciFunctionHandle;
pub mod hda;
@@ -38,9 +38,19 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
log::info!("IHDA {}", pci_config.func.display());
+ if let Err(err) = pci_config.func.bars[0].try_mem() {
+ log::error!("ihdad: invalid BAR0: {err}");
+ std::process::exit(1);
+ }
let address = unsafe { pcid_handle.map_bar(0) }.ptr.as_ptr() as usize;
- let irq_file = pci_allocate_interrupt_vector(&mut pcid_handle, "ihdad");
+ let irq_file = match try_pci_allocate_interrupt_vector(&mut pcid_handle, "ihdad") {
+ Ok(irq) => irq,
+ Err(err) => {
+ log::error!("ihdad: failed to allocate interrupt vector: {err}");
+ std::process::exit(1);
+ }
+ };
{
let vend_prod: u32 = ((pci_config.func.full_device_id.vendor_id as u32) << 16)
@@ -53,11 +63,28 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
}
}
- let event_queue =
- EventQueue::<Source>::new().expect("ihdad: Could not create event queue.");
- let socket = Socket::nonblock().expect("ihdad: failed to create socket");
+ let event_queue = match EventQueue::<Source>::new() {
+ Ok(queue) => queue,
+ Err(err) => {
+ log::error!("ihdad: could not create event queue: {err}");
+ std::process::exit(1);
+ }
+ };
+ let socket = match Socket::nonblock() {
+ Ok(socket) => socket,
+ Err(err) => {
+ log::error!("ihdad: failed to create socket: {err}");
+ std::process::exit(1);
+ }
+ };
let mut device = unsafe {
- hda::IntelHDA::new(address, vend_prod).expect("ihdad: failed to allocate device")
+ match hda::IntelHDA::new(address, vend_prod) {
+ Ok(device) => device,
+ Err(err) => {
+ log::error!("ihdad: failed to allocate device: {err}");
+ std::process::exit(1);
+ }
+ }
};
let mut readiness_based = ReadinessBased::new(&socket, 16);
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,313 @@
diff --git a/drivers/hwd/src/backend/acpi.rs b/drivers/hwd/src/backend/acpi.rs
--- a/drivers/hwd/src/backend/acpi.rs
+++ b/drivers/hwd/src/backend/acpi.rs
@@ -1,27 +1,36 @@
use amlserde::{AmlSerde, AmlSerdeValue};
-use std::{error::Error, fs, process::Command};
+use std::{error::Error, fs};
use super::Backend;
pub struct AcpiBackend {
- rxsdt: Vec<u8>,
+ _rxsdt: Vec<u8>,
}
impl Backend for AcpiBackend {
fn new() -> Result<Self, Box<dyn Error>> {
let rxsdt = fs::read("/scheme/kernel.acpi/rxsdt")?;
- // Spawn acpid
- //TODO: pass rxsdt data to acpid?
- #[allow(deprecated, reason = "we can't yet move this to init")]
- daemon::Daemon::spawn(Command::new("acpid"));
-
- Ok(Self { rxsdt })
+ Ok(Self { _rxsdt: rxsdt })
}
fn probe(&mut self) -> Result<(), Box<dyn Error>> {
+ let mut boot_critical_input_candidates = 0usize;
+ let mut thc_candidates = 0usize;
+ let mut non_hid_i2c_candidates = 0usize;
+
// Read symbols from acpi scheme
- let entries = fs::read_dir("/scheme/acpi/symbols")?;
+ let entries = match fs::read_dir("/scheme/acpi/symbols") {
+ Ok(entries) => entries,
+ Err(err)
+ if err.kind() == std::io::ErrorKind::WouldBlock
+ || err.raw_os_error() == Some(11) =>
+ {
+ log::debug!("hwd: ACPI symbols are not ready yet");
+ return Ok(());
+ }
+ Err(err) => return Err(Box::new(err)),
+ };
// TODO: Reimplement with getdents?
let symbols_fd = libredox::Fd::open(
"/scheme/acpi/symbols",
@@ -100,13 +109,104 @@
"PNP0C0F" => "PCI interrupt link",
"PNP0C50" => "I2C HID",
"PNP0F13" => "PS/2 port for PS/2-style mouse",
+ "80860F41" | "808622C1" => "DesignWare I2C controller",
+ "AMDI0010" | "AMDI0019" | "AMDI0510" => "AMD laptop I2C controller",
+ "INT33C2" | "INT33C3" | "INT3432" | "INT3433" | "INTC10EF" => {
+ "Intel LPSS/SerialIO I2C controller"
+ }
+ "INT34C5" | "INTC1055" => "Intel GPIO controller",
+ "INTC1050" | "INTC1051" | "INTC1080" | "INTC1081" | "INTC1082" => {
+ "Intel THC companion (QuickI2C/QuickSPI path)"
+ }
+ _ if is_elan_touchpad_id(&id) => "ELAN touchpad (I2C/SMBus path)",
+ _ if is_cypress_touchpad_id(&id) => "Cypress/Trackpad (non-HID I2C path)",
+ _ if is_synaptics_rmi_id(&id) => "Synaptics RMI touchpad (I2C/SMBus path)",
_ => "?",
};
log::debug!("{}: {} ({})", name, id, what);
+ if is_boot_critical_i2c_surface(&id) {
+ boot_critical_input_candidates += 1;
+ log::info!("{}: {} is boot-critical for laptop input path", name, id);
+ }
+ if is_thc_companion(&id) {
+ thc_candidates += 1;
+ log::warn!(
+ "{}: {} indicates Intel THC path; DMA/report fast-path is not complete yet",
+ name,
+ id
+ );
+ }
+ if is_non_hid_i2c_input_id(&id) {
+ non_hid_i2c_candidates += 1;
+ }
}
}
}
+
+ if boot_critical_input_candidates == 0 {
+ log::warn!(
+ "hwd: no ACPI boot-critical I2C input candidates found; built-in laptop input may require additional controller/device support"
+ );
+ } else {
+ log::info!(
+ "hwd: ACPI input candidates: total={} thc={} non_hid_i2c={}",
+ boot_critical_input_candidates,
+ thc_candidates,
+ non_hid_i2c_candidates
+ );
+ }
+
Ok(())
}
}
+
+fn is_boot_critical_i2c_surface(id: &str) -> bool {
+ matches!(
+ id,
+ "PNP0C50"
+ | "ACPI0C50"
+ | "80860F41"
+ | "808622C1"
+ | "AMDI0010"
+ | "AMDI0019"
+ | "AMDI0510"
+ | "INT33C2"
+ | "INT33C3"
+ | "INT3432"
+ | "INT3433"
+ | "INTC10EF"
+ | "INT34C5"
+ | "INTC1055"
+ | "INTC1050"
+ | "INTC1051"
+ | "INTC1080"
+ | "INTC1081"
+ | "INTC1082"
+ ) || is_elan_touchpad_id(id)
+ || is_cypress_touchpad_id(id)
+ || is_synaptics_rmi_id(id)
+}
+
+fn is_thc_companion(id: &str) -> bool {
+ matches!(
+ id,
+ "INTC1050" | "INTC1051" | "INTC1080" | "INTC1081" | "INTC1082"
+ )
+}
+
+fn is_elan_touchpad_id(id: &str) -> bool {
+ id.starts_with("ELAN")
+}
+
+fn is_cypress_touchpad_id(id: &str) -> bool {
+ id.starts_with("CYAP")
+}
+
+fn is_synaptics_rmi_id(id: &str) -> bool {
+ id.starts_with("SYNA")
+}
+
+fn is_non_hid_i2c_input_id(id: &str) -> bool {
+ is_elan_touchpad_id(id) || is_cypress_touchpad_id(id) || is_synaptics_rmi_id(id)
+}
diff --git a/drivers/pcid-spawner/src/main.rs b/drivers/pcid-spawner/src/main.rs
--- a/drivers/pcid-spawner/src/main.rs
+++ b/drivers/pcid-spawner/src/main.rs
@@ -1,11 +1,40 @@
+use std::env;
use std::fs;
use std::process::Command;
+use std::thread;
use anyhow::{anyhow, Context, Result};
use pcid_interface::config::Config;
use pcid_interface::PciFunctionHandle;
+fn strict_usb_boot() -> bool {
+ matches!(
+ env::var("REDBEAR_STRICT_USB_BOOT")
+ .ok()
+ .as_deref()
+ .map(str::to_ascii_lowercase)
+ .as_deref(),
+ Some("1" | "true" | "yes" | "on")
+ )
+}
+
+fn should_detach_in_initfs(initfs: bool, class: u8, subclass: u8, strict_usb_boot: bool) -> bool {
+ if !initfs {
+ return false;
+ }
+
+ if class == 0x01 {
+ return false;
+ }
+
+ if strict_usb_boot && class == 0x0C && subclass == 0x03 {
+ return false;
+ }
+
+ true
+}
+
fn main() -> Result<()> {
let mut args = pico_args::Arguments::from_env();
let initfs = args.contains("--initfs");
@@ -30,6 +59,7 @@
}
let config: Config = toml::from_str(&config_data)?;
+ let strict_usb_boot = strict_usb_boot();
for entry in fs::read_dir("/scheme/pci")? {
let entry = entry.context("failed to get entry")?;
@@ -87,15 +117,71 @@
log::info!("pcid-spawner: spawn {:?}", command);
+ let device_addr = handle.config().func.addr;
+
handle.enable_device();
let channel_fd = handle.into_inner_fd();
command.env("PCID_CLIENT_CHANNEL", channel_fd.to_string());
#[allow(deprecated, reason = "we can't yet move this to init")]
- daemon::Daemon::spawn(command);
- syscall::close(channel_fd as usize).unwrap();
+ if should_detach_in_initfs(
+ initfs,
+ full_device_id.class,
+ full_device_id.subclass,
+ strict_usb_boot,
+ ) {
+ log::warn!(
+ "pcid-spawner: detached initfs spawn for {} to avoid blocking early boot",
+ device_addr
+ );
+
+ let device_addr = device_addr.to_string();
+ thread::spawn(move || {
+ #[allow(deprecated, reason = "we can't yet move this to init")]
+ if let Err(err) = daemon::Daemon::spawn(command) {
+ log::error!(
+ "pcid-spawner: spawn/readiness failed for {}: {}",
+ device_addr,
+ err
+ );
+ log::error!(
+ "pcid-spawner: {} remains enabled without a confirmed ready driver",
+ device_addr
+ );
+ }
+ if let Err(err) = syscall::close(channel_fd as usize) {
+ log::error!(
+ "pcid-spawner: failed to close channel fd {} for {}: {}",
+ channel_fd,
+ device_addr,
+ err
+ );
+ }
+ });
+ } else {
+ #[allow(deprecated, reason = "we can't yet move this to init")]
+ if let Err(err) = daemon::Daemon::spawn(command) {
+ log::error!(
+ "pcid-spawner: spawn/readiness failed for {}: {}",
+ device_addr,
+ err
+ );
+ log::error!(
+ "pcid-spawner: {} remains enabled without a confirmed ready driver",
+ device_addr
+ );
+ }
+ if let Err(err) = syscall::close(channel_fd as usize) {
+ log::error!(
+ "pcid-spawner: failed to close channel fd {} for {}: {}",
+ channel_fd,
+ device_addr,
+ err
+ );
+ }
+ }
}
Ok(())
diff --git a/drivers/pcid/src/main.rs b/drivers/pcid/src/main.rs
--- a/drivers/pcid/src/main.rs
+++ b/drivers/pcid/src/main.rs
@@ -12,6 +12,7 @@
};
use redox_scheme::scheme::register_sync_scheme;
use scheme_utils::Blocking;
+use syscall::{sendfd, SendFdFlags};
use crate::cfg_access::Pcie;
use pcid_interface::{FullDeviceId, LegacyInterruptLine, PciBar, PciFunction, PciRom};
@@ -262,14 +263,13 @@
let access_fd = socket
.create_this_scheme_fd(0, access_id, syscall::O_RDWR, 0)
.expect("failed to issue this resource");
- let access_bytes = access_fd.to_ne_bytes();
- let _ = register_pci
- .call_wo(
- &access_bytes,
- syscall::CallFlags::WRITE | syscall::CallFlags::FD,
- &[],
- )
- .expect("failed to send pci_fd to acpid");
+ sendfd(
+ register_pci.raw(),
+ access_fd as usize,
+ SendFdFlags::empty().bits(),
+ 0,
+ )
+ .expect("failed to send pci_fd to acpid");
}
Err(err) => {
if err.errno() == libredox::errno::ENODEV {
@@ -0,0 +1,144 @@
# P2-boot-runtime-noise-and-net-race.patch
#
# Reduce expected boot-time warning noise and harden netstack startup ordering:
# - procmgr: unknown cancellation is trace-level (benign race)
# - acpid: warn once for unsupported power surface
# - ahcid: SATAPI probe failures are informational on empty media
# - netstack: retry network adapter discovery during early boot races
diff --git a/bootstrap/src/procmgr.rs b/bootstrap/src/procmgr.rs
--- a/bootstrap/src/procmgr.rs
+++ b/bootstrap/src/procmgr.rs
@@ -296,8 +296,8 @@ fn handle_scheme<'a>(
}
}
} else {
- log::warn!("Cancellation for unknown id {:?}", req.id);
+ log::trace!("Cancellation for unknown id {:?}", req.id);
Pending
}
}
diff --git a/drivers/acpid/src/scheme.rs b/drivers/acpid/src/scheme.rs
--- a/drivers/acpid/src/scheme.rs
+++ b/drivers/acpid/src/scheme.rs
@@ -8,6 +8,7 @@ use ron::de::SpannedError;
use scheme_utils::HandleMap;
use std::convert::{TryFrom, TryInto};
use std::str::FromStr;
+use std::sync::atomic::{AtomicBool, Ordering};
use syscall::dirent::{DirEntry, DirentBuf, DirentKind};
use syscall::schemev2::NewFdFlags;
use syscall::FobtainFdFlags;
@@ -29,6 +30,8 @@ use crate::acpi::{
};
use crate::resources::{decode_resource_template, ResourceDescriptor};
+static POWER_SURFACE_UNAVAILABLE_WARNED: AtomicBool = AtomicBool::new(false);
+
pub struct AcpiScheme<'acpi, 'sock> {
ctx: &'acpi AcpiContext,
handles: HandleMap<Handle<'acpi>>,
@@ -307,8 +310,10 @@ impl<'acpi, 'sock> AcpiScheme<'acpi, 'sock> {
self.ctx.power_snapshot().map_err(|error| match error {
crate::acpi::AmlEvalError::NotInitialized => Error::new(EAGAIN),
crate::acpi::AmlEvalError::Unsupported(message) => {
- log::warn!("ACPI power surface unavailable: {message}");
+ if !POWER_SURFACE_UNAVAILABLE_WARNED.swap(true, Ordering::Relaxed) {
+ log::warn!("ACPI power surface unavailable: {message}");
+ }
Error::new(EOPNOTSUPP)
}
other => {
diff --git a/drivers/storage/ahcid/src/ahci/mod.rs b/drivers/storage/ahcid/src/ahci/mod.rs
--- a/drivers/storage/ahcid/src/ahci/mod.rs
+++ b/drivers/storage/ahcid/src/ahci/mod.rs
@@ -64,8 +64,8 @@ pub fn disks(base: usize, name: &str) -> (&'static mut HbaMem, Vec<AnyDisk>) {
HbaPortType::SATAPI => match DiskATAPI::new(i, port) {
Ok(disk) => Some(AnyDisk::Atapi(disk)),
Err(err) => {
- error!("{}: {}", i, err);
+ info!("{}: {}", i, err);
None
}
},
diff --git a/netstack/src/main.rs b/netstack/src/main.rs
--- a/netstack/src/main.rs
+++ b/netstack/src/main.rs
@@ -6,6 +6,8 @@ use anyhow::{anyhow, bail, Context, Result};
use event::{EventFlags, EventQueue};
use libredox::flag::{O_NONBLOCK, O_RDWR};
use libredox::Fd;
+use std::thread;
+use std::time::Duration;
use redox_scheme::Socket;
use scheme::Smolnetd;
@@ -22,32 +24,45 @@ mod scheme;
fn get_network_adapter() -> Result<String> {
use std::fs;
- let mut adapters = vec![];
+ const MAX_ATTEMPTS: u32 = 50;
+ const RETRY_DELAY: Duration = Duration::from_millis(100);
- for entry_res in fs::read_dir("/scheme")? {
- let Ok(entry) = entry_res else {
- continue;
- };
+ for attempt in 1..=MAX_ATTEMPTS {
+ let mut adapters = vec![];
- let Ok(scheme) = entry.file_name().into_string() else {
- continue;
- };
+ for entry_res in fs::read_dir("/scheme")? {
+ let Ok(entry) = entry_res else {
+ continue;
+ };
- if !scheme.starts_with("network") {
- continue;
- }
+ let Ok(scheme) = entry.file_name().into_string() else {
+ continue;
+ };
- adapters.push(scheme);
- }
+ if !scheme.starts_with("network") {
+ continue;
+ }
- if adapters.is_empty() {
- bail!("no network adapter found");
- } else {
- let adapter = adapters.remove(0);
+ adapters.push(scheme);
+ }
+
if !adapters.is_empty() {
- // FIXME allow using multiple network adapters at the same time
- warn!("Multiple network adapters found. Only {adapter} will be used");
+ let adapter = adapters.remove(0);
+ if !adapters.is_empty() {
+ // FIXME allow using multiple network adapters at the same time
+ warn!("Multiple network adapters found. Only {adapter} will be used");
+ }
+ return Ok(adapter);
+ }
+
+ if attempt < MAX_ATTEMPTS {
+ warn!(
+ "no network adapter found yet (attempt {attempt}/{MAX_ATTEMPTS}), waiting {} ms",
+ RETRY_DELAY.as_millis()
+ );
+ thread::sleep(RETRY_DELAY);
}
- Ok(adapter)
}
+
+ bail!("no network adapter found")
}
@@ -0,0 +1,18 @@
# P2-hwd-misc.patch
# Keep hwd focused on hardware probing. Init owns boot-time pcid startup.
diff --git a/drivers/hwd/src/main.rs b/drivers/hwd/src/main.rs
index 79360e34..4de3d9f3 100644
--- a/drivers/hwd/src/main.rs
+++ b/drivers/hwd/src/main.rs
@@ -37,10 +37,7 @@ fn daemon(daemon: daemon::Daemon) -> ! {
//TODO: launch pcid based on backend information?
// Must launch after acpid but before probe calls /scheme/acpi/symbols
- #[allow(deprecated, reason = "we can't yet move this to init")]
- daemon::Daemon::spawn(process::Command::new("pcid"));
-
daemon.ready();
//TODO: HWD is meant to locate PCI/XHCI/etc devices in ACPI and DeviceTree definitions and start their drivers
@@ -0,0 +1,33 @@
diff --git a/init.initfs.d/41_acpid.service b/init.initfs.d/41_acpid.service
new file mode 100644
--- /dev/null
+++ b/init.initfs.d/41_acpid.service
@@ -0,0 +1,8 @@
+[unit]
+description = "ACPI daemon"
+default_dependencies = false
+
+[service]
+cmd = "acpid"
+inherit_envs = ["RSDP_ADDR", "RSDP_SIZE"]
+type = "notify"
diff --git a/init.initfs.d/40_drivers.target b/init.initfs.d/40_drivers.target
--- a/init.initfs.d/40_drivers.target
+++ b/init.initfs.d/40_drivers.target
@@ -7,4 +7,5 @@ requires_weak = [
"40_bcm2835-sdhcid.service",
"40_hwd.service",
"40_pcid-spawner-initfs.service",
+ "41_acpid.service",
]
diff --git a/init.initfs.d/40_hwd.service b/init.initfs.d/40_hwd.service
--- a/init.initfs.d/40_hwd.service
+++ b/init.initfs.d/40_hwd.service
@@ -1,6 +1,6 @@
[unit]
description = "Hardware manager"
-requires_weak = ["10_inputd.service", "10_lived.service", "20_graphics.target"]
+requires_weak = ["10_inputd.service", "10_lived.service", "20_graphics.target", "41_acpid.service"]
[service]
cmd = "hwd"
@@ -0,0 +1,607 @@
# P2-network-driver-mains.patch
# Extract network driver main.rs hardening: replace panic/unwrap/expect with
# proper error handling and graceful exits.
#
# Files: drivers/net/e1000d/src/main.rs, drivers/net/ixgbed/src/main.rs,
# drivers/net/rtl8139d/src/main.rs, drivers/net/rtl8168d/src/main.rs,
# drivers/net/virtio-netd/src/main.rs
diff --git a/drivers/net/e1000d/src/main.rs b/drivers/net/e1000d/src/main.rs
index 373ea9b3..8ff57b33 100644
--- a/drivers/net/e1000d/src/main.rs
+++ b/drivers/net/e1000d/src/main.rs
@@ -1,5 +1,6 @@
use std::io::{Read, Write};
use std::os::unix::io::AsRawFd;
+use std::process;
use driver_network::NetworkScheme;
use event::{user_data, EventQueue};
@@ -25,10 +26,13 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
common::file_level(),
);
- let irq = pci_config
- .func
- .legacy_interrupt_line
- .expect("e1000d: no legacy interrupts supported");
+ let irq = match pci_config.func.legacy_interrupt_line {
+ Some(irq) => irq,
+ None => {
+ log::error!("e1000d: no legacy interrupts supported");
+ process::exit(1);
+ }
+ };
log::info!("E1000 {}", pci_config.func.display());
@@ -38,7 +42,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
let mut scheme = NetworkScheme::new(
move || unsafe {
- device::Intel8254x::new(address).expect("e1000d: failed to allocate device")
+ device::Intel8254x::new(address).unwrap_or_else(|err| {
+ log::error!("e1000d: failed to allocate device: {err}");
+ process::exit(1);
+ })
},
daemon,
format!("network.{name}"),
@@ -51,7 +58,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
}
}
- let event_queue = EventQueue::<Source>::new().expect("e1000d: failed to create event queue");
+ let mut event_queue = EventQueue::<Source>::new().unwrap_or_else(|err| {
+ log::error!("e1000d: failed to create event queue: {err}");
+ process::exit(1);
+ });
event_queue
.subscribe(
@@ -59,32 +69,65 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
Source::Irq,
event::EventFlags::READ,
)
- .expect("e1000d: failed to subscribe to IRQ fd");
+ .unwrap_or_else(|err| {
+ log::error!("e1000d: failed to subscribe to IRQ fd: {err}");
+ process::exit(1);
+ });
event_queue
.subscribe(
scheme.event_handle().raw(),
Source::Scheme,
event::EventFlags::READ,
)
- .expect("e1000d: failed to subscribe to scheme fd");
-
- libredox::call::setrens(0, 0).expect("e1000d: failed to enter null namespace");
-
- scheme.tick().unwrap();
+ .unwrap_or_else(|err| {
+ log::error!("e1000d: failed to subscribe to scheme fd: {err}");
+ process::exit(1);
+ });
+
+ libredox::call::setrens(0, 0).unwrap_or_else(|err| {
+ log::error!("e1000d: failed to enter null namespace: {err}");
+ process::exit(1);
+ });
+
+ if let Err(err) = scheme.tick() {
+ log::error!("e1000d: failed initial scheme tick: {err}");
+ process::exit(1);
+ }
- for event in event_queue.map(|e| e.expect("e1000d: failed to get event")) {
+ loop {
+ let event = match event_queue.next() {
+ Some(Ok(event)) => event,
+ Some(Err(err)) => {
+ log::error!("e1000d: failed to get event: {err}");
+ continue;
+ }
+ None => break,
+ };
match event.user_data {
Source::Irq => {
let mut irq = [0; 8];
- irq_file.read(&mut irq).unwrap();
+ if let Err(err) = irq_file.read(&mut irq) {
+ log::error!("e1000d: failed to read IRQ: {err}");
+ continue;
+ }
if unsafe { scheme.adapter().irq() } {
- irq_file.write(&mut irq).unwrap();
-
- scheme.tick().expect("e1000d: failed to handle IRQ")
+ if let Err(err) = irq_file.write(&mut irq) {
+ log::error!("e1000d: failed to write IRQ: {err}");
+ continue;
+ }
+
+ if let Err(err) = scheme.tick() {
+ log::error!("e1000d: failed to handle IRQ: {err}");
+ }
+ }
+ }
+ Source::Scheme => {
+ if let Err(err) = scheme.tick() {
+ log::error!("e1000d: failed to handle scheme op: {err}");
}
}
- Source::Scheme => scheme.tick().expect("e1000d: failed to handle scheme op"),
}
}
- unreachable!()
+
+ process::exit(0);
}
diff --git a/drivers/net/ixgbed/src/main.rs b/drivers/net/ixgbed/src/main.rs
index 4a6ce74d..855d339d 100644
--- a/drivers/net/ixgbed/src/main.rs
+++ b/drivers/net/ixgbed/src/main.rs
@@ -1,5 +1,6 @@
use std::io::{Read, Write};
use std::os::unix::io::AsRawFd;
+use std::process;
use driver_network::NetworkScheme;
use event::{user_data, EventQueue};
@@ -19,12 +20,23 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
let mut name = pci_config.func.name();
name.push_str("_ixgbe");
- let irq = pci_config
- .func
- .legacy_interrupt_line
- .expect("ixgbed: no legacy interrupts supported");
+ common::setup_logging(
+ "net",
+ "pci",
+ &name,
+ common::output_level(),
+ common::file_level(),
+ );
+
+ let irq = match pci_config.func.legacy_interrupt_line {
+ Some(irq) => irq,
+ None => {
+ log::error!("ixgbed: no legacy interrupts supported");
+ process::exit(1);
+ }
+ };
- println!(" + IXGBE {}", pci_config.func.display());
+ log::info!("IXGBE {}", pci_config.func.display());
let mut irq_file = irq.irq_handle("ixgbed");
@@ -34,8 +46,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
let mut scheme = NetworkScheme::new(
move || {
- device::Intel8259x::new(address as usize, size)
- .expect("ixgbed: failed to allocate device")
+ device::Intel8259x::new(address as usize, size).unwrap_or_else(|err| {
+ log::error!("ixgbed: failed to allocate device: {err}");
+ process::exit(1);
+ })
},
daemon,
format!("network.{name}"),
@@ -48,41 +62,77 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
}
}
- let event_queue = EventQueue::<Source>::new().expect("ixgbed: Could not create event queue.");
+ let mut event_queue = EventQueue::<Source>::new().unwrap_or_else(|err| {
+ log::error!("ixgbed: failed to create event queue: {err}");
+ process::exit(1);
+ });
+
event_queue
.subscribe(
irq_file.as_raw_fd() as usize,
Source::Irq,
event::EventFlags::READ,
)
- .unwrap();
+ .unwrap_or_else(|err| {
+ log::error!("ixgbed: failed to subscribe to IRQ fd: {err}");
+ process::exit(1);
+ });
event_queue
.subscribe(
scheme.event_handle().raw(),
Source::Scheme,
event::EventFlags::READ,
)
- .unwrap();
-
- libredox::call::setrens(0, 0).expect("ixgbed: failed to enter null namespace");
+ .unwrap_or_else(|err| {
+ log::error!("ixgbed: failed to subscribe to scheme fd: {err}");
+ process::exit(1);
+ });
+
+ libredox::call::setrens(0, 0).unwrap_or_else(|err| {
+ log::error!("ixgbed: failed to enter null namespace: {err}");
+ process::exit(1);
+ });
+
+ if let Err(err) = scheme.tick() {
+ log::error!("ixgbed: failed initial scheme tick: {err}");
+ process::exit(1);
+ }
- scheme.tick().unwrap();
+ loop {
+ let event = match event_queue.next() {
+ Some(Ok(event)) => event,
+ Some(Err(err)) => {
+ log::error!("ixgbed: failed to get event: {err}");
+ continue;
+ }
+ None => break,
+ };
- for event in event_queue.map(|e| e.expect("ixgbed: failed to get next event")) {
match event.user_data {
Source::Irq => {
let mut irq = [0; 8];
- irq_file.read(&mut irq).unwrap();
+ if let Err(err) = irq_file.read(&mut irq) {
+ log::error!("ixgbed: failed to read IRQ: {err}");
+ continue;
+ }
if scheme.adapter().irq() {
- irq_file.write(&mut irq).unwrap();
-
- scheme.tick().unwrap();
+ if let Err(err) = irq_file.write(&mut irq) {
+ log::error!("ixgbed: failed to write IRQ: {err}");
+ continue;
+ }
+
+ if let Err(err) = scheme.tick() {
+ log::error!("ixgbed: failed to handle IRQ: {err}");
+ }
}
}
Source::Scheme => {
- scheme.tick().unwrap();
+ if let Err(err) = scheme.tick() {
+ log::error!("ixgbed: failed to handle scheme op: {err}");
+ }
}
}
}
- unreachable!()
+
+ process::exit(0);
}
diff --git a/drivers/net/rtl8139d/src/main.rs b/drivers/net/rtl8139d/src/main.rs
index d470e814..64335a23 100644
--- a/drivers/net/rtl8139d/src/main.rs
+++ b/drivers/net/rtl8139d/src/main.rs
@@ -1,5 +1,6 @@
use std::io::{Read, Write};
use std::os::unix::io::AsRawFd;
+use std::process;
use driver_network::NetworkScheme;
use event::{user_data, EventQueue};
@@ -32,7 +33,8 @@ fn map_bar(pcid_handle: &mut PciFunctionHandle) -> *mut u8 {
other => log::warn!("BAR {} is {:?} instead of memory BAR", barnum, other),
}
}
- panic!("rtl8139d: failed to find BAR");
+ log::error!("rtl8139d: failed to find BAR");
+ process::exit(1);
}
fn main() {
@@ -61,7 +63,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
let mut scheme = NetworkScheme::new(
move || unsafe {
- device::Rtl8139::new(bar as usize).expect("rtl8139d: failed to allocate device")
+ device::Rtl8139::new(bar as usize).unwrap_or_else(|err| {
+ log::error!("rtl8139d: failed to allocate device: {err}");
+ process::exit(1);
+ })
},
daemon,
format!("network.{name}"),
@@ -74,42 +79,76 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
}
}
- let event_queue = EventQueue::<Source>::new().expect("rtl8139d: Could not create event queue.");
+ let mut event_queue = EventQueue::<Source>::new().unwrap_or_else(|err| {
+ log::error!("rtl8139d: failed to create event queue: {err}");
+ process::exit(1);
+ });
event_queue
.subscribe(
irq_file.irq_handle().as_raw_fd() as usize,
Source::Irq,
event::EventFlags::READ,
)
- .unwrap();
+ .unwrap_or_else(|err| {
+ log::error!("rtl8139d: failed to subscribe to IRQ fd: {err}");
+ process::exit(1);
+ });
event_queue
.subscribe(
scheme.event_handle().raw(),
Source::Scheme,
event::EventFlags::READ,
)
- .unwrap();
-
- libredox::call::setrens(0, 0).expect("rtl8139d: failed to enter null namespace");
-
- scheme.tick().unwrap();
+ .unwrap_or_else(|err| {
+ log::error!("rtl8139d: failed to subscribe to scheme fd: {err}");
+ process::exit(1);
+ });
+
+ libredox::call::setrens(0, 0).unwrap_or_else(|err| {
+ log::error!("rtl8139d: failed to enter null namespace: {err}");
+ process::exit(1);
+ });
+
+ if let Err(err) = scheme.tick() {
+ log::error!("rtl8139d: failed initial scheme tick: {err}");
+ process::exit(1);
+ }
- for event in event_queue.map(|e| e.expect("rtl8139d: failed to get next event")) {
+ loop {
+ let event = match event_queue.next() {
+ Some(Ok(event)) => event,
+ Some(Err(err)) => {
+ log::error!("rtl8139d: failed to get next event: {err}");
+ continue;
+ }
+ None => break,
+ };
match event.user_data {
Source::Irq => {
let mut irq = [0; 8];
- irq_file.irq_handle().read(&mut irq).unwrap();
+ if let Err(err) = irq_file.irq_handle().read(&mut irq) {
+ log::error!("rtl8139d: failed to read IRQ: {err}");
+ continue;
+ }
//TODO: This may be causing spurious interrupts
if unsafe { scheme.adapter_mut().irq() } {
- irq_file.irq_handle().write(&mut irq).unwrap();
-
- scheme.tick().unwrap();
+ if let Err(err) = irq_file.irq_handle().write(&mut irq) {
+ log::error!("rtl8139d: failed to write IRQ: {err}");
+ continue;
+ }
+
+ if let Err(err) = scheme.tick() {
+ log::error!("rtl8139d: failed to handle IRQ tick: {err}");
+ }
}
}
Source::Scheme => {
- scheme.tick().unwrap();
+ if let Err(err) = scheme.tick() {
+ log::error!("rtl8139d: failed to handle scheme op: {err}");
+ }
}
}
}
- unreachable!()
+
+ process::exit(0);
}
diff --git a/drivers/net/rtl8168d/src/main.rs b/drivers/net/rtl8168d/src/main.rs
index 1d9963a3..bd2fcb1a 100644
--- a/drivers/net/rtl8168d/src/main.rs
+++ b/drivers/net/rtl8168d/src/main.rs
@@ -1,5 +1,6 @@
use std::io::{Read, Write};
use std::os::unix::io::AsRawFd;
+use std::process;
use driver_network::NetworkScheme;
use event::{user_data, EventQueue};
@@ -32,7 +33,8 @@ fn map_bar(pcid_handle: &mut PciFunctionHandle) -> *mut u8 {
other => log::warn!("BAR {} is {:?} instead of memory BAR", barnum, other),
}
}
- panic!("rtl8168d: failed to find BAR");
+ log::error!("rtl8168d: failed to find BAR");
+ process::exit(1);
}
fn main() {
@@ -61,7 +63,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
let mut scheme = NetworkScheme::new(
move || unsafe {
- device::Rtl8168::new(bar as usize).expect("rtl8168d: failed to allocate device")
+ device::Rtl8168::new(bar as usize).unwrap_or_else(|err| {
+ log::error!("rtl8168d: failed to allocate device: {err}");
+ process::exit(1);
+ })
},
daemon,
format!("network.{name}"),
@@ -74,42 +79,76 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
}
}
- let event_queue = EventQueue::<Source>::new().expect("rtl8168d: Could not create event queue.");
+ let mut event_queue = EventQueue::<Source>::new().unwrap_or_else(|err| {
+ log::error!("rtl8168d: failed to create event queue: {err}");
+ process::exit(1);
+ });
event_queue
.subscribe(
irq_file.irq_handle().as_raw_fd() as usize,
Source::Irq,
event::EventFlags::READ,
)
- .unwrap();
+ .unwrap_or_else(|err| {
+ log::error!("rtl8168d: failed to subscribe to IRQ fd: {err}");
+ process::exit(1);
+ });
event_queue
.subscribe(
scheme.event_handle().raw(),
Source::Scheme,
event::EventFlags::READ,
)
- .unwrap();
-
- libredox::call::setrens(0, 0).expect("rtl8168d: failed to enter null namespace");
-
- scheme.tick().unwrap();
+ .unwrap_or_else(|err| {
+ log::error!("rtl8168d: failed to subscribe to scheme fd: {err}");
+ process::exit(1);
+ });
+
+ libredox::call::setrens(0, 0).unwrap_or_else(|err| {
+ log::error!("rtl8168d: failed to enter null namespace: {err}");
+ process::exit(1);
+ });
+
+ if let Err(err) = scheme.tick() {
+ log::error!("rtl8168d: failed initial scheme tick: {err}");
+ process::exit(1);
+ }
- for event in event_queue.map(|e| e.expect("rtl8168d: failed to get next event")) {
+ loop {
+ let event = match event_queue.next() {
+ Some(Ok(event)) => event,
+ Some(Err(err)) => {
+ log::error!("rtl8168d: failed to get next event: {err}");
+ continue;
+ }
+ None => break,
+ };
match event.user_data {
Source::Irq => {
let mut irq = [0; 8];
- irq_file.irq_handle().read(&mut irq).unwrap();
+ if let Err(err) = irq_file.irq_handle().read(&mut irq) {
+ log::error!("rtl8168d: failed to read IRQ: {err}");
+ continue;
+ }
//TODO: This may be causing spurious interrupts
if unsafe { scheme.adapter_mut().irq() } {
- irq_file.irq_handle().write(&mut irq).unwrap();
-
- scheme.tick().unwrap();
+ if let Err(err) = irq_file.irq_handle().write(&mut irq) {
+ log::error!("rtl8168d: failed to write IRQ: {err}");
+ continue;
+ }
+
+ if let Err(err) = scheme.tick() {
+ log::error!("rtl8168d: failed to handle IRQ tick: {err}");
+ }
}
}
Source::Scheme => {
- scheme.tick().unwrap();
+ if let Err(err) = scheme.tick() {
+ log::error!("rtl8168d: failed to handle scheme op: {err}");
+ }
}
}
}
- unreachable!()
+
+ process::exit(0);
}
diff --git a/drivers/net/virtio-netd/src/main.rs b/drivers/net/virtio-netd/src/main.rs
index 17d168ef..adbd1086 100644
--- a/drivers/net/virtio-netd/src/main.rs
+++ b/drivers/net/virtio-netd/src/main.rs
@@ -3,6 +3,7 @@ mod scheme;
use std::fs::File;
use std::io::{Read, Write};
use std::mem;
+use std::process;
use driver_network::NetworkScheme;
use pcid_interface::PciFunctionHandle;
@@ -31,8 +32,11 @@ fn main() {
}
fn daemon_runner(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! {
- deamon(daemon, pcid_handle).unwrap();
- unreachable!();
+ deamon(daemon, pcid_handle).unwrap_or_else(|err| {
+ log::error!("virtio-netd: daemon failed: {err}");
+ process::exit(1);
+ });
+ process::exit(0);
}
fn deamon(
@@ -52,7 +56,10 @@ fn deamon(
// 0x1000 - virtio-net
let pci_config = pcid_handle.config();
- assert_eq!(pci_config.func.full_device_id.device_id, 0x1000);
+ if pci_config.func.full_device_id.device_id != 0x1000 {
+ log::error!("virtio-netd: unexpected device ID {:#06x}, expected 0x1000", pci_config.func.full_device_id.device_id);
+ process::exit(1);
+ }
log::info!("virtio-net: initiating startup sequence :^)");
let device = virtio_core::probe_device(&mut pcid_handle)?;
@@ -84,7 +91,8 @@ fn deamon(
device.transport.ack_driver_feature(VIRTIO_NET_F_MAC);
mac
} else {
- unimplemented!()
+ log::error!("virtio-netd: device does not support MAC feature");
+ return Err("virtio-netd: VIRTIO_NET_F_MAC not supported".into());
};
device.transport.finalize_features();
@@ -126,11 +134,22 @@ fn deamon(
data: 0,
})?;
- libredox::call::setrens(0, 0).expect("virtio-netd: failed to enter null namespace");
+ libredox::call::setrens(0, 0).unwrap_or_else(|err| {
+ log::error!("virtio-netd: failed to enter null namespace: {err}");
+ process::exit(1);
+ });
- scheme.tick()?;
+ if let Err(err) = scheme.tick() {
+ log::error!("virtio-netd: failed initial scheme tick: {err}");
+ process::exit(1);
+ }
loop {
- event_queue.read(&mut [0; mem::size_of::<syscall::Event>()])?; // Wait for event
- scheme.tick()?;
+ if let Err(err) = event_queue.read(&mut [0; mem::size_of::<syscall::Event>()]) {
+ log::error!("virtio-netd: failed to read event: {err}");
+ continue;
+ }
+ if let Err(err) = scheme.tick() {
+ log::error!("virtio-netd: failed to handle scheme event: {err}");
+ }
}
@@ -0,0 +1,118 @@
# P2-network-error-handling.patch
#
# Network driver error handling: replace unwrap()/expect()/panic!() with proper
# error propagation and graceful exits across e1000, ixgbe, rtl8139, rtl8168d,
# and virtio-net drivers.
#
# Covers:
# - e1000d/device.rs: replace unreachable!() in DMA array conversion
# - ixgbed/Cargo.toml: add log dependency
# - rtl8139d/device.rs: replace unreachable!() with EIO error
# - rtl8168d/device.rs: replace unreachable!() with EIO error
# - virtio-netd/scheme.rs: DMA allocation error handling for rx buffers
#
diff --git a/drivers/net/e1000d/src/device.rs b/drivers/net/e1000d/src/device.rs
index 4c518f30..0e42d72b 100644
--- a/drivers/net/e1000d/src/device.rs
+++ b/drivers/net/e1000d/src/device.rs
@@ -3,7 +3,7 @@ use std::{cmp, mem, ptr, slice, thread, time};
use driver_network::NetworkAdapter;
-use syscall::error::Result;
+use syscall::error::{Error, Result, EIO};
use common::dma::Dma;
@@ -207,12 +207,11 @@ impl NetworkAdapter for Intel8254x {
}
fn dma_array<T, const N: usize>() -> Result<[Dma<T>; N]> {
- Ok((0..N)
+ let vec: Vec<Dma<T>> = (0..N)
.map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() }))
- .collect::<Result<Vec<_>>>()?
- .try_into()
- .unwrap_or_else(|_| unreachable!()))
+ .collect::<Result<Vec<_>>>()?;
+ vec.try_into().map_err(|_| Error::new(EIO))
}
impl Intel8254x {
pub unsafe fn new(base: usize) -> Result<Self> {
diff --git a/drivers/net/ixgbed/Cargo.toml b/drivers/net/ixgbed/Cargo.toml
index d97ff398..fcaf4b19 100644
--- a/drivers/net/ixgbed/Cargo.toml
+++ b/drivers/net/ixgbed/Cargo.toml
@@ -7,7 +7,8 @@ edition = "2021"
[dependencies]
bitflags.workspace = true
libredox.workspace = true
+log.workspace = true
redox_event.workspace = true
redox_syscall.workspace = true
diff --git a/drivers/net/rtl8139d/src/device.rs b/drivers/net/rtl8139d/src/device.rs
index 37167ee2..d7428132 100644
--- a/drivers/net/rtl8139d/src/device.rs
+++ b/drivers/net/rtl8139d/src/device.rs
@@ -215,8 +215,8 @@ impl Rtl8139 {
.map(|_| Ok(Dma::zeroed()?.assume_init()))
.collect::<Result<Vec<_>>>()?
.try_into()
- .unwrap_or_else(|_| unreachable!()),
+ .map_err(|_| Error::new(EIO))?,
transmit_i: 0,
mac_address: [0; 6],
};
diff --git a/drivers/net/rtl8168d/src/device.rs b/drivers/net/rtl8168d/src/device.rs
index ae545ec4..7229a52d 100644
--- a/drivers/net/rtl8168d/src/device.rs
+++ b/drivers/net/rtl8168d/src/device.rs
@@ -177,7 +177,7 @@ impl Rtl8168 {
.map(|_| Ok(Dma::zeroed()?.assume_init()))
.collect::<Result<Vec<_>>>()?
.try_into()
- .unwrap_or_else(|_| unreachable!()),
+ .map_err(|_| Error::new(EIO))?,
receive_ring: Dma::zeroed()?.assume_init(),
receive_i: 0,
@@ -185,8 +185,8 @@ impl Rtl8168 {
.map(|_| Ok(Dma::zeroed()?.assume_init()))
.collect::<Result<Vec<_>>>()?
.try_into()
- .unwrap_or_else(|_| unreachable!()),
+ .map_err(|_| Error::new(EIO))?,
transmit_ring: Dma::zeroed()?.assume_init(),
transmit_i: 0,
transmit_buffer_h: [Dma::zeroed()?.assume_init()],
diff --git a/drivers/net/virtio-netd/src/scheme.rs b/drivers/net/virtio-netd/src/scheme.rs
index 59b3b93e..d0acb2ba 100644
--- a/drivers/net/virtio-netd/src/scheme.rs
+++ b/drivers/net/virtio-netd/src/scheme.rs
@@ -27,11 +27,16 @@ impl<'a> VirtioNet<'a> {
// Populate all of the `rx_queue` with buffers to maximize performence.
let mut rx_buffers = vec![];
for i in 0..(rx.descriptor_len() as usize) {
- rx_buffers.push(unsafe {
- Dma::<[u8]>::zeroed_slice(MAX_BUFFER_LEN)
- .unwrap()
- .assume_init()
- });
+ let buf = unsafe {
+ match Dma::<[u8]>::zeroed_slice(MAX_BUFFER_LEN) {
+ Ok(dma) => dma.assume_init(),
+ Err(err) => {
+ log::error!("virtio-netd: failed to allocate rx buffer: {err}");
+ continue;
+ }
+ }
+ };
+ rx_buffers.push(buf);
let chain = ChainBuilder::new()
.chain(Buffer::new_unsized(&rx_buffers[i]).flags(DescriptorFlags::WRITE_ONLY))
@@ -0,0 +1,601 @@
# P2-storage-error-handling.patch
#
# Storage driver error handling: replace unwrap()/expect()/panic!() with proper
# error propagation and graceful exits across AHCI, IDE, NVMe, and VirtIO block drivers.
#
# Covers:
# - ahcid/disk_ata.rs: replace unreachable!() with EIO error
# - ahcid/disk_atapi.rs: replace unreachable!() with EBADF error
# - ahcid/hba.rs: DMA allocation error handling
# - ided/ide.rs: assert→debug_assert, try_into error handling
# - nvmed/executor.rs: executor initialization error handling
# - nvmed/identify.rs: DMA allocation, unreachable!() fallback
# - nvmed/mod.rs: assert→debug_assert, unwrap→proper error/exit
# - nvmed/queues.rs: unreachable!()→safe fallback
# - virtio-blkd/scheme.rs: DMA allocation error handling, assert→if check
#
diff --git a/drivers/storage/ahcid/src/ahci/disk_ata.rs b/drivers/storage/ahcid/src/ahci/disk_ata.rs
index 4f83c51d..7423603b 100644
--- a/drivers/storage/ahcid/src/ahci/disk_ata.rs
+++ b/drivers/storage/ahcid/src/ahci/disk_ata.rs
@@ -1,7 +1,7 @@
use std::convert::TryInto;
use std::ptr;
-use syscall::error::Result;
+use syscall::error::{Error, Result, EIO};
use common::dma::Dma;
@@ -39,7 +39,7 @@ impl DiskATA {
.map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() }))
.collect::<Result<Vec<_>>>()?
.try_into()
- .unwrap_or_else(|_| unreachable!());
+ .map_err(|_| Error::new(EIO))?;
let mut fb = unsafe { Dma::zeroed()?.assume_init() };
let buf = unsafe { Dma::zeroed()?.assume_init() };
diff --git a/drivers/storage/ahcid/src/ahci/disk_atapi.rs b/drivers/storage/ahcid/src/ahci/disk_atapi.rs
index a0e75c09..8fbdfbef 100644
--- a/drivers/storage/ahcid/src/ahci/disk_atapi.rs
+++ b/drivers/storage/ahcid/src/ahci/disk_atapi.rs
@@ -37,7 +37,7 @@ impl DiskATAPI {
.map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() }))
.collect::<Result<Vec<_>>>()?
.try_into()
- .unwrap_or_else(|_| unreachable!());
+ .map_err(|_| Error::new(EBADF))?;
let mut fb = unsafe { Dma::zeroed()?.assume_init() };
let mut buf = unsafe { Dma::zeroed()?.assume_init() };
diff --git a/drivers/storage/ahcid/src/ahci/hba.rs b/drivers/storage/ahcid/src/ahci/hba.rs
index bea8792c..11a3d4ae 100644
--- a/drivers/storage/ahcid/src/ahci/hba.rs
+++ b/drivers/storage/ahcid/src/ahci/hba.rs
@@ -178,8 +178,11 @@ impl HbaPort {
clb: &mut Dma<[HbaCmdHeader; 32]>,
ctbas: &mut [Dma<HbaCmdTable>; 32],
) -> Result<u64> {
- let dest: Dma<[u16; 256]> = Dma::new([0; 256]).unwrap();
+ let dest: Dma<[u16; 256]> = Dma::new([0; 256]).map_err(|err| {
+ error!("ahcid: failed to allocate DMA buffer: {err}");
+ Error::new(EIO)
+ })?;
let slot = self
.ata_start(clb, ctbas, |cmdheader, cmdfis, prdt_entries, _acmd| {
diff --git a/drivers/storage/ided/src/ide.rs b/drivers/storage/ided/src/ide.rs
index 5faf3250..094e5889 100644
--- a/drivers/storage/ided/src/ide.rs
+++ b/drivers/storage/ided/src/ide.rs
@@ -184,10 +184,10 @@ impl Disk for AtaDisk {
let block = start_block + (count as u64) / 512;
//TODO: support other LBA modes
- assert!(block < 0x1_0000_0000_0000);
+ debug_assert!(block < 0x1_0000_0000_0000);
let sectors = (chunk.len() + 511) / 512;
- assert!(sectors <= 128);
+ debug_assert!(sectors <= 128);
log::trace!(
"IDE read chan {} dev {} block {:#x} count {:#x}",
@@ -205,7 +205,7 @@ impl Disk for AtaDisk {
// Make PRDT EOT match chunk size
for i in 0..sectors {
chan.prdt[i] = PrdtEntry {
- phys: (chan.buf.physical() + i * 512).try_into().unwrap(),
+ phys: (chan.buf.physical() + i * 512).try_into().map_err(|_| Error::new(EIO))?,
size: 512,
flags: if i + 1 == sectors {
1 << 15 // End of table
@@ -216,7 +216,7 @@ impl Disk for AtaDisk {
}
// Set PRDT
let prdt = chan.prdt.physical();
- chan.busmaster_prdt.write(prdt.try_into().unwrap());
+ chan.busmaster_prdt.write(prdt.try_into().map_err(|_| Error::new(EIO))?);
// Set to read
chan.busmaster_command.writef(1 << 3, true);
// Clear interrupt and error bits
@@ -325,10 +325,10 @@ impl Disk for AtaDisk {
let block = start_block + (count as u64) / 512;
//TODO: support other LBA modes
- assert!(block < 0x1_0000_0000_0000);
+ debug_assert!(block < 0x1_0000_0000_0000);
let sectors = (chunk.len() + 511) / 512;
- assert!(sectors <= 128);
+ debug_assert!(sectors <= 128);
log::trace!(
"IDE write chan {} dev {} block {:#x} count {:#x}",
@@ -346,7 +346,7 @@ impl Disk for AtaDisk {
// Make PRDT EOT match chunk size
for i in 0..sectors {
chan.prdt[i] = PrdtEntry {
- phys: (chan.buf.physical() + i * 512).try_into().unwrap(),
+ phys: (chan.buf.physical() + i * 512).try_into().map_err(|_| Error::new(EIO))?,
size: 512,
flags: if i + 1 == sectors {
1 << 15 // End of table
@@ -357,8 +357,8 @@ impl Disk for AtaDisk {
}
// Set PRDT
let prdt = chan.prdt.physical();
- chan.busmaster_prdt.write(prdt.try_into().unwrap());
+ chan.busmaster_prdt.write(prdt.try_into().map_err(|_| Error::new(EIO))?);
// Set to write
chan.busmaster_command.writef(1 << 3, false);
// Clear interrupt and error bits
diff --git a/drivers/storage/nvmed/src/nvme/executor.rs b/drivers/storage/nvmed/src/nvme/executor.rs
index 6242fa98..c1435e88 100644
--- a/drivers/storage/nvmed/src/nvme/executor.rs
+++ b/drivers/storage/nvmed/src/nvme/executor.rs
@@ -34,7 +34,12 @@ impl Hardware for NvmeHw {
&VTABLE
}
fn current() -> std::rc::Rc<executor::LocalExecutor<Self>> {
- THE_EXECUTOR.with(|exec| Rc::clone(exec.borrow().as_ref().unwrap()))
+ THE_EXECUTOR.with(|exec| {
+ Rc::clone(exec.borrow().as_ref().unwrap_or_else(|| {
+ log::error!("nvmed: internal error: executor not initialized");
+ std::process::exit(1);
+ }))
+ })
}
fn try_submit(
nvme: &Arc<Nvme>,
diff --git a/drivers/storage/nvmed/src/nvme/identify.rs b/drivers/storage/nvmed/src/nvme/identify.rs
index 05e5b9b2..b1b6e959 100644
--- a/drivers/storage/nvmed/src/nvme/identify.rs
+++ b/drivers/storage/nvmed/src/nvme/identify.rs
@@ -126,7 +126,7 @@ impl LbaFormat {
0b01 => RelativePerformance::Better,
0b10 => RelativePerformance::Good,
0b11 => RelativePerformance::Degraded,
- _ => unreachable!(),
+ _ => RelativePerformance::Degraded,
}
}
pub fn is_available(&self) -> bool {
@@ -153,7 +153,14 @@ impl Nvme {
/// Returns the serial number, model, and firmware, in that order.
pub async fn identify_controller(&self) {
// TODO: Use same buffer
- let data: Dma<IdentifyControllerData> = unsafe { Dma::zeroed().unwrap().assume_init() };
+ let data: Dma<IdentifyControllerData> = unsafe {
+ Dma::zeroed()
+ .map(|dma| dma.assume_init())
+ .unwrap_or_else(|err| {
+ log::error!("nvmed: failed to allocate identify DMA: {err}");
+ std::process::exit(1);
+ })
+ };
// println!(" - Attempting to identify controller");
let comp = self
@@ -182,7 +189,14 @@ impl Nvme {
}
pub async fn identify_namespace_list(&self, base: u32) -> Vec<u32> {
// TODO: Use buffer
- let data: Dma<[u32; 1024]> = unsafe { Dma::zeroed().unwrap().assume_init() };
+ let data: Dma<[u32; 1024]> = unsafe {
+ Dma::zeroed()
+ .map(|dma| dma.assume_init())
+ .unwrap_or_else(|err| {
+ log::error!("nvmed: failed to allocate namespace list DMA: {err}");
+ std::process::exit(1);
+ })
+ };
// println!(" - Attempting to retrieve namespace ID list");
let comp = self
@@ -198,7 +212,14 @@ impl Nvme {
}
pub async fn identify_namespace(&self, nsid: u32) -> NvmeNamespace {
//TODO: Use buffer
- let data: Dma<IdentifyNamespaceData> = unsafe { Dma::zeroed().unwrap().assume_init() };
+ let data: Dma<IdentifyNamespaceData> = unsafe {
+ Dma::zeroed()
+ .map(|dma| dma.assume_init())
+ .unwrap_or_else(|err| {
+ log::error!("nvmed: failed to allocate namespace DMA: {err}");
+ std::process::exit(1);
+ })
+ };
log::debug!("Attempting to identify namespace {nsid}");
let comp = self
@@ -216,7 +237,10 @@ impl Nvme {
let block_size = data
.formatted_lba_size()
.lba_data_size()
- .expect("nvmed: error: size outside 512-2^64 range");
+ .unwrap_or_else(|| {
+ log::error!("nvmed: error: size outside 512-2^64 range");
+ std::process::exit(1);
+ });
log::debug!("NVME block size: {}", block_size);
NvmeNamespace {
diff --git a/drivers/storage/nvmed/src/nvme/mod.rs b/drivers/storage/nvmed/src/nvme/mod.rs
index 682ee933..90a25d5b 100644
--- a/drivers/storage/nvmed/src/nvme/mod.rs
+++ b/drivers/storage/nvmed/src/nvme/mod.rs
@@ -160,7 +160,15 @@ impl Nvme {
}
fn cur_thread_ctxt(&self) -> Arc<ReentrantMutex<ThreadCtxt>> {
// TODO: multi-threading
- Arc::clone(self.thread_ctxts.read().get(&0).unwrap())
+ Arc::clone(
+ self.thread_ctxts
+ .read()
+ .get(&0)
+ .unwrap_or_else(|| {
+ log::error!("nvmed: internal error: thread context 0 missing");
+ std::process::exit(1);
+ }),
+ )
}
pub unsafe fn submission_queue_tail(&self, qid: u16, tail: u16) {
@@ -208,10 +216,22 @@ impl Nvme {
}
for (qid, iv) in self.cq_ivs.get_mut().iter_mut() {
- let ctxt = thread_ctxts.get(&0).unwrap().lock();
+ let ctxt = match thread_ctxts.get(&0) {
+ Some(c) => c.lock(),
+ None => {
+ log::error!("nvmed: internal error: thread context 0 missing");
+ return Err(Error::new(EIO));
+ }
+ };
let queues = ctxt.queues.borrow();
- let &(ref cq, ref sq) = queues.get(qid).unwrap();
+ let (cq, sq) = match queues.get(qid) {
+ Some(pair) => pair,
+ None => {
+ log::error!("nvmed: internal error: queue {qid} missing");
+ return Err(Error::new(EIO));
+ }
+ };
log::debug!(
"iv {iv} [cq {qid}: {:X}, {}] [sq {qid}: {:X}, {}]",
cq.data.physical(),
@@ -222,7 +242,13 @@ impl Nvme {
}
{
- let main_ctxt = thread_ctxts.get(&0).unwrap().lock();
+ let main_ctxt = match thread_ctxts.get(&0) {
+ Some(c) => c.lock(),
+ None => {
+ log::error!("nvmed: internal error: thread context 0 missing");
+ return Err(Error::new(EIO));
+ }
+ };
for (i, prp) in main_ctxt.buffer_prp.borrow_mut().iter_mut().enumerate() {
*prp = (main_ctxt.buffer.borrow_mut().physical() + i * 4096) as u64;
@@ -231,7 +257,13 @@ impl Nvme {
let regs = self.regs.get_mut();
let mut queues = main_ctxt.queues.borrow_mut();
- let (asq, acq) = queues.get_mut(&0).unwrap();
+ let (asq, acq) = match queues.get_mut(&0) {
+ Some(pair) => pair,
+ None => {
+ log::error!("nvmed: internal error: admin queue pair missing");
+ return Err(Error::new(EIO));
+ }
+ };
regs.aqa
.write(((acq.data.len() as u32 - 1) << 16) | (asq.data.len() as u32 - 1));
regs.asq_low.write(asq.data.physical() as u32);
@@ -281,14 +313,14 @@ impl Nvme {
let vector = vector as u8;
if masked {
- assert_ne!(
+ debug_assert_ne!(
to_clear & (1 << vector),
(1 << vector),
"nvmed: internal error: cannot both mask and set"
);
to_mask |= 1 << vector;
} else {
- assert_ne!(
+ debug_assert_ne!(
to_mask & (1 << vector),
(1 << vector),
"nvmed: internal error: cannot both mask and set"
@@ -326,22 +358,27 @@ impl Nvme {
cmd_init: impl FnOnce(CmdId) -> NvmeCmd,
fail: impl FnOnce(),
) -> Option<(CqId, CmdId)> {
- match ctxt.queues.borrow_mut().get_mut(&sq_id).unwrap() {
- (sq, _cq) => {
- if sq.is_full() {
- fail();
- return None;
- }
- let cmd_id = sq.tail;
- let tail = sq.submit_unchecked(cmd_init(cmd_id));
-
- // TODO: Submit in bulk
- unsafe {
- self.submission_queue_tail(sq_id, tail);
- }
- Some((sq_id, cmd_id))
+ let mut queues_ref = ctxt.queues.borrow_mut();
+ let (sq, _cq) = match queues_ref.get_mut(&sq_id) {
+ Some(pair) => pair,
+ None => {
+ log::error!("nvmed: internal error: submission queue {sq_id} missing");
+ fail();
+ return None;
}
+ };
+ if sq.is_full() {
+ fail();
+ return None;
+ }
+ let cmd_id = sq.tail;
+ let tail = sq.submit_unchecked(cmd_init(cmd_id));
+
+ // TODO: Submit in bulk
+ unsafe {
+ self.submission_queue_tail(sq_id, tail);
}
+ Some((sq_id, cmd_id))
}
pub async fn create_io_completion_queue(
@@ -349,13 +386,19 @@ impl Nvme {
io_cq_id: CqId,
vector: Option<Iv>,
) -> NvmeCompQueue {
- let queue = NvmeCompQueue::new().expect("nvmed: failed to allocate I/O completion queue");
-
- let len = u16::try_from(queue.data.len())
- .expect("nvmed: internal error: I/O CQ longer than 2^16 entries");
- let raw_len = len
- .checked_sub(1)
- .expect("nvmed: internal error: CQID 0 for I/O CQ");
+ let queue = NvmeCompQueue::new().unwrap_or_else(|err| {
+ log::error!("nvmed: failed to allocate I/O completion queue: {err}");
+ std::process::exit(1);
+ });
+
+ let len = u16::try_from(queue.data.len()).unwrap_or_else(|_| {
+ log::error!("nvmed: internal error: I/O CQ longer than 2^16 entries");
+ std::process::exit(1);
+ });
+ let raw_len = len.checked_sub(1).unwrap_or_else(|| {
+ log::error!("nvmed: internal error: CQID 0 for I/O CQ");
+ std::process::exit(1);
+ });
let comp = self
.submit_and_complete_admin_command(|cid| {
@@ -370,22 +413,28 @@ impl Nvme {
.await;
/*match comp.status.specific {
- 1 => panic!("invalid queue identifier"),
- 2 => panic!("invalid queue size"),
- 8 => panic!("invalid interrupt vector"),
+ 1 => { log::error!("nvmed: invalid queue identifier"); std::process::exit(1); }
+ 2 => { log::error!("nvmed: invalid queue size"); std::process::exit(1); }
+ 8 => { log::error!("nvmed: invalid interrupt vector"); std::process::exit(1); }
_ => (),
}*/
queue
}
pub async fn create_io_submission_queue(&self, io_sq_id: SqId, io_cq_id: CqId) -> NvmeCmdQueue {
- let q = NvmeCmdQueue::new().expect("failed to create submission queue");
-
- let len = u16::try_from(q.data.len())
- .expect("nvmed: internal error: I/O SQ longer than 2^16 entries");
- let raw_len = len
- .checked_sub(1)
- .expect("nvmed: internal error: SQID 0 for I/O SQ");
+ let q = NvmeCmdQueue::new().unwrap_or_else(|err| {
+ log::error!("nvmed: failed to create submission queue: {err}");
+ std::process::exit(1);
+ });
+
+ let len = u16::try_from(q.data.len()).unwrap_or_else(|_| {
+ log::error!("nvmed: internal error: I/O SQ longer than 2^16 entries");
+ std::process::exit(1);
+ });
+ let raw_len = len.checked_sub(1).unwrap_or_else(|| {
+ log::error!("nvmed: internal error: SQID 0 for I/O SQ");
+ std::process::exit(1);
+ });
let comp = self
.submit_and_complete_admin_command(|cid| {
@@ -399,9 +448,9 @@ impl Nvme {
})
.await;
/*match comp.status.specific {
- 0 => panic!("completion queue invalid"),
- 1 => panic!("invalid queue identifier"),
- 2 => panic!("invalid queue size"),
+ 0 => { log::error!("nvmed: completion queue invalid"); std::process::exit(1); }
+ 1 => { log::error!("nvmed: invalid queue identifier"); std::process::exit(1); }
+ 2 => { log::error!("nvmed: invalid queue size"); std::process::exit(1); }
_ => (),
}*/
@@ -431,7 +480,10 @@ impl Nvme {
self.thread_ctxts
.read()
.get(&0)
- .unwrap()
+ .unwrap_or_else(|| {
+ log::error!("nvmed: internal error: thread context 0 missing");
+ std::process::exit(1);
+ })
.lock()
.queues
.borrow_mut()
@@ -497,8 +549,8 @@ impl Nvme {
for chunk in buf.chunks_mut(/* TODO: buf len */ 8192) {
let blocks = (chunk.len() + block_size - 1) / block_size;
- assert!(blocks > 0);
- assert!(blocks <= 0x1_0000);
+ debug_assert!(blocks > 0);
+ debug_assert!(blocks <= 0x1_0000);
self.namespace_rw(&*ctxt, namespace, lba, (blocks - 1) as u16, false)
.await?;
@@ -525,8 +577,8 @@ impl Nvme {
for chunk in buf.chunks(/* TODO: buf len */ 8192) {
let blocks = (chunk.len() + block_size - 1) / block_size;
- assert!(blocks > 0);
- assert!(blocks <= 0x1_0000);
+ debug_assert!(blocks > 0);
+ debug_assert!(blocks <= 0x1_0000);
ctxt.buffer.borrow_mut()[..chunk.len()].copy_from_slice(chunk);
diff --git a/drivers/storage/nvmed/src/nvme/queues.rs b/drivers/storage/nvmed/src/nvme/queues.rs
index a3712aeb..438c905c 100644
--- a/drivers/storage/nvmed/src/nvme/queues.rs
+++ b/drivers/storage/nvmed/src/nvme/queues.rs
@@ -145,8 +145,8 @@ impl Status {
3 => Self::PathRelatedStatus(code),
4..=6 => Self::Rsvd(code),
7 => Self::Vendor(code),
- _ => unreachable!(),
+ _ => Self::Vendor(code),
}
}
}
diff --git a/drivers/storage/virtio-blkd/src/scheme.rs b/drivers/storage/virtio-blkd/src/scheme.rs
index ec4ecf73..39fb24a8 100644
--- a/drivers/storage/virtio-blkd/src/scheme.rs
+++ b/drivers/storage/virtio-blkd/src/scheme.rs
@@ -15,19 +15,34 @@ trait BlkExtension {
impl BlkExtension for Queue<'_> {
async fn read(&self, block: u64, target: &mut [u8]) -> usize {
- let req = Dma::new(BlockVirtRequest {
+ let req = match Dma::new(BlockVirtRequest {
ty: BlockRequestTy::In,
reserved: 0,
sector: block,
- })
- .unwrap();
+ }) {
+ Ok(req) => req,
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate read request DMA: {err}");
+ return 0;
+ }
+ };
let result = unsafe {
- Dma::<[u8]>::zeroed_slice(target.len())
- .unwrap()
- .assume_init()
+ match Dma::<[u8]>::zeroed_slice(target.len()) {
+ Ok(dma) => dma.assume_init(),
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate read buffer DMA: {err}");
+ return 0;
+ }
+ }
+ };
+ let status = match Dma::new(u8::MAX) {
+ Ok(s) => s,
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate read status DMA: {err}");
+ return 0;
+ }
};
- let status = Dma::new(u8::MAX).unwrap();
let chain = ChainBuilder::new()
.chain(Buffer::new(&req))
@@ -37,28 +52,46 @@ impl BlkExtension for Queue<'_> {
// XXX: Subtract 1 because the of status byte.
let written = self.send(chain).await as usize - 1;
- assert_eq!(*status, 0);
+ if *status != 0 {
+ log::error!("virtio-blkd: read failed with status {}", *status);
+ return 0;
+ }
target[..written].copy_from_slice(&result);
written
}
async fn write(&self, block: u64, target: &[u8]) -> usize {
- let req = Dma::new(BlockVirtRequest {
+ let req = match Dma::new(BlockVirtRequest {
ty: BlockRequestTy::Out,
reserved: 0,
sector: block,
- })
- .unwrap();
+ }) {
+ Ok(req) => req,
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate write request DMA: {err}");
+ return 0;
+ }
+ };
let mut result = unsafe {
- Dma::<[u8]>::zeroed_slice(target.len())
- .unwrap()
- .assume_init()
+ match Dma::<[u8]>::zeroed_slice(target.len()) {
+ Ok(dma) => dma.assume_init(),
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate write buffer DMA: {err}");
+ return 0;
+ }
+ }
};
result.copy_from_slice(target.as_ref());
- let status = Dma::new(u8::MAX).unwrap();
+ let status = match Dma::new(u8::MAX) {
+ Ok(s) => s,
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate write status DMA: {err}");
+ return 0;
+ }
+ };
let chain = ChainBuilder::new()
.chain(Buffer::new(&req))
@@ -67,7 +100,10 @@ impl BlkExtension for Queue<'_> {
.build();
self.send(chain).await as usize;
- assert_eq!(*status, 0);
+ if *status != 0 {
+ log::error!("virtio-blkd: write failed with status {}", *status);
+ return 0;
+ }
target.len()
}
@@ -0,0 +1,158 @@
# P2-usb-pm-and-drivers.patch
#
# USB power management and driver interface improvements:
# suspend/resume commands, SCSI driver enablement, PortPmState type,
# IRQ reactor staged port state fallback.
#
# Covers:
# - usbctl/main.rs: pm-state, suspend, resume subcommands
# - xhcid/drivers.toml: enable SCSI over USB driver (was commented out)
# - xhcid/driver_interface.rs: PortPmState enum, suspend/resume/port_pm_state methods
# - xhcid/irq_reactor.rs: staged_port_states fallback in with_ring/with_ring_mut
#
diff --git a/drivers/usb/usbctl/src/main.rs b/drivers/usb/usbctl/src/main.rs
index 9b5773d9..232f7cfc 100644
--- a/drivers/usb/usbctl/src/main.rs
+++ b/drivers/usb/usbctl/src/main.rs
@@ -15,6 +15,9 @@ fn main() {
Command::new("port")
.arg(Arg::new("PORT").num_args(1).required(true))
.subcommand(Command::new("status"))
+ .subcommand(Command::new("pm-state"))
+ .subcommand(Command::new("suspend"))
+ .subcommand(Command::new("resume"))
.subcommand(
Command::new("endpoint")
.arg(Arg::new("ENDPOINT_NUM").num_args(1).required(true))
@@ -38,7 +41,16 @@ fn main() {
if let Some(_status_scmd_matches) = port_scmd_matches.subcommand_matches("status") {
let state = handle.port_state().expect("Failed to get port state");
println!("{}", state.as_str());
+ } else if let Some(_pm_state_scmd_matches) = port_scmd_matches.subcommand_matches("pm-state") {
+ let state = handle
+ .port_pm_state()
+ .expect("Failed to get port power-management state");
+ println!("{}", state.as_str());
+ } else if let Some(_suspend_scmd_matches) = port_scmd_matches.subcommand_matches("suspend") {
+ handle.suspend_device().expect("Failed to suspend device");
+ } else if let Some(_resume_scmd_matches) = port_scmd_matches.subcommand_matches("resume") {
+ handle.resume_device().expect("Failed to resume device");
} else if let Some(endp_scmd_matches) = port_scmd_matches.subcommand_matches("endpoint") {
let endp_num = endp_scmd_matches
.get_one::<String>("ENDPOINT_NUM")
diff --git a/drivers/usb/xhcid/drivers.toml b/drivers/usb/xhcid/drivers.toml
index 83c90e23..470ec063 100644
--- a/drivers/usb/xhcid/drivers.toml
+++ b/drivers/usb/xhcid/drivers.toml
@@ -1,10 +1,9 @@
-#TODO: causes XHCI errors
-#[[drivers]]
-#name = "SCSI over USB"
-#class = 8 # Mass Storage class
-#subclass = 6 # SCSI transparent command set
-#command = ["usbscsid", "$SCHEME", "$PORT", "$IF_PROTO"]
+[[drivers]]
+name = "SCSI over USB"
+class = 8 # Mass Storage class
+subclass = 6 # SCSI transparent command set
+command = ["usbscsid", "$SCHEME", "$PORT", "$IF_PROTO"]
[[drivers]]
name = "USB HUB"
diff --git a/drivers/usb/xhcid/src/driver_interface.rs b/drivers/usb/xhcid/src/driver_interface.rs
index 727f8d7e..82f839ae 100644
--- a/drivers/usb/xhcid/src/driver_interface.rs
+++ b/drivers/usb/xhcid/src/driver_interface.rs
@@ -444,6 +444,33 @@ impl str::FromStr for PortState {
}
}
+#[repr(u8)]
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
+pub enum PortPmState {
+ Active,
+ Suspended,
+}
+impl PortPmState {
+ pub fn as_str(&self) -> &'static str {
+ match self {
+ Self::Active => "active",
+ Self::Suspended => "suspended",
+ }
+ }
+}
+
+impl str::FromStr for PortPmState {
+ type Err = Invalid;
+
+ fn from_str(s: &str) -> result::Result<Self, Self::Err> {
+ Ok(match s {
+ "active" => Self::Active,
+ "suspended" => Self::Suspended,
+ _ => return Err(Invalid("read reserved port PM state")),
+ })
+ }
+}
+
#[repr(u8)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub enum EndpointStatus {
@@ -560,6 +587,16 @@ impl XhciClientHandle {
let _bytes_written = file.write(&[])?;
Ok(())
}
+ pub fn suspend_device(&self) -> result::Result<(), XhciClientHandleError> {
+ let file = self.fd.openat("suspend", libredox::flag::O_WRONLY, 0)?;
+ let _bytes_written = file.write(&[])?;
+ Ok(())
+ }
+ pub fn resume_device(&self) -> result::Result<(), XhciClientHandleError> {
+ let file = self.fd.openat("resume", libredox::flag::O_WRONLY, 0)?;
+ let _bytes_written = file.write(&[])?;
+ Ok(())
+ }
pub fn get_standard_descs(&self) -> result::Result<DevDesc, XhciClientHandleError> {
let json = self.read("descriptors")?;
Ok(serde_json::from_slice(&json)?)
@@ -582,7 +619,11 @@ impl XhciClientHandle {
let string = self.read_to_string("state")?;
Ok(string.parse()?)
}
+ pub fn port_pm_state(&self) -> result::Result<PortPmState, XhciClientHandleError> {
+ let string = self.read_to_string("pm_state")?;
+ Ok(string.parse()?)
+ }
pub fn open_endpoint_ctl(&self, num: u8) -> result::Result<File, XhciClientHandleError> {
let path = format!("endpoints/{}/ctl", num);
let fd = self.fd.openat(&path, libredox::flag::O_RDWR, 0)?;
diff --git a/drivers/usb/xhcid/src/xhci/irq_reactor.rs b/drivers/usb/xhcid/src/xhci/irq_reactor.rs
index ac492d5b..310fe51f 100644
--- a/drivers/usb/xhcid/src/xhci/irq_reactor.rs
+++ b/drivers/usb/xhcid/src/xhci/irq_reactor.rs
@@ -633,7 +633,10 @@ impl<const N: usize> Xhci<N> {
pub fn with_ring<T, F: FnOnce(&Ring) -> T>(&self, id: RingId, function: F) -> Option<T> {
use super::RingOrStreams;
- let slot_state = self.port_states.get(&id.port)?;
+ let slot_state = self
+ .port_states
+ .get(&id.port)
+ .or_else(|| self.staged_port_states.get(&id.port))?;
let endpoint_state = slot_state.endpoint_states.get(&id.endpoint_num)?;
let ring_ref = match endpoint_state.transfer {
@@ -650,7 +653,10 @@ impl<const N: usize> Xhci<N> {
) -> Option<T> {
use super::RingOrStreams;
- let mut slot_state = self.port_states.get_mut(&id.port)?;
+ let mut slot_state = self
+ .port_states
+ .get_mut(&id.port)
+ .or_else(|| self.staged_port_states.get_mut(&id.port))?;
let mut endpoint_state = slot_state.endpoint_states.get_mut(&id.endpoint_num)?;
let ring_ref = match endpoint_state.transfer {
@@ -0,0 +1,398 @@
--- a/drivers/pcid/src/cfg_access/mod.rs
+++ b/drivers/pcid/src/cfg_access/mod.rs
@@ -349,7 +349,11 @@
let bus_addr = self.bus_addr(address.segment(), address.bus())?;
Some(unsafe { bus_addr.add(Self::bus_addr_offset_in_dwords(address, offset)) })
}
+
+ pub fn has_extended_config(&self, address: PciAddress) -> bool {
+ self.mmio_addr(address, 0x100).is_some()
+ }
}
impl ConfigRegionAccess for Pcie {
--- a/drivers/pcid/src/scheme.rs
+++ b/drivers/pcid/src/scheme.rs
@@ -5,12 +5,61 @@
use redox_scheme::{CallerCtx, OpenResult};
use scheme_utils::HandleMap;
use syscall::dirent::{DirEntry, DirentBuf, DirentKind};
-use syscall::error::{Error, Result, EACCES, EBADF, EINVAL, EIO, EISDIR, ENOENT, ENOTDIR, EALREADY};
+use syscall::error::{
+ Error, Result, EACCES, EALREADY, EBADF, EINVAL, EIO, EISDIR, ENOENT, ENOTDIR, EROFS,
+};
use syscall::flag::{MODE_CHR, MODE_DIR, O_DIRECTORY, O_STAT};
use syscall::schemev2::NewFdFlags;
use syscall::ENOLCK;
use crate::cfg_access::Pcie;
+
+const PCIE_EXTENDED_CAPABILITY_AER: u16 = 0x0001;
+
+#[derive(Clone, Copy)]
+enum AerRegisterName {
+ UncorStatus,
+ UncorMask,
+ UncorSeverity,
+ CorStatus,
+ CorMask,
+ Cap,
+ HeaderLog,
+}
+
+impl AerRegisterName {
+ fn from_path(path: &str) -> Option<Self> {
+ Some(match path {
+ "uncor_status" => Self::UncorStatus,
+ "uncor_mask" => Self::UncorMask,
+ "uncor_severity" => Self::UncorSeverity,
+ "cor_status" => Self::CorStatus,
+ "cor_mask" => Self::CorMask,
+ "cap" => Self::Cap,
+ "header_log" => Self::HeaderLog,
+ _ => return None,
+ })
+ }
+
+ const fn offset(self) -> u16 {
+ match self {
+ Self::UncorStatus => 0x00,
+ Self::UncorMask => 0x04,
+ Self::UncorSeverity => 0x08,
+ Self::CorStatus => 0x0C,
+ Self::CorMask => 0x10,
+ Self::Cap => 0x14,
+ Self::HeaderLog => 0x18,
+ }
+ }
+
+ const fn len(self) -> usize {
+ match self {
+ Self::HeaderLog => 16,
+ _ => 4,
+ }
+ }
+}
pub struct PciScheme {
handles: HandleMap<HandleWrapper>,
@@ -20,13 +69,27 @@
binds: HashMap<String, u32>,
}
enum Handle {
- TopLevel { entries: Vec<String> },
+ TopLevel {
+ entries: Vec<String>,
+ },
Access,
- Device,
- Channel { addr: PciAddress, st: ChannelState },
+ Device {
+ addr: PciAddress,
+ },
+ Channel {
+ addr: PciAddress,
+ st: ChannelState,
+ },
SchemeRoot,
/// Represents an open handle to a device's bind endpoint
- Bind { addr: PciAddress },
+ Bind {
+ addr: PciAddress,
+ },
+ AerDir,
+ Aer {
+ addr: PciAddress,
+ register: AerRegisterName,
+ },
/// Uevent surface for hotplug consumers. Opening uevent returns an object
/// from which device add/remove events can be read. Since pcid currently
/// only scans at startup, this surface is ready for hotplug polling consumers.
@@ -38,13 +101,23 @@
}
impl Handle {
fn is_file(&self) -> bool {
- matches!(self, Self::Access | Self::Channel { .. } | Self::Bind { .. } | Self::Uevent)
+ matches!(
+ self,
+ Self::Access
+ | Self::Channel { .. }
+ | Self::Bind { .. }
+ | Self::Aer { .. }
+ | Self::Uevent
+ )
}
fn is_dir(&self) -> bool {
!self.is_file()
}
fn requires_root(&self) -> bool {
- matches!(self, Self::Access | Self::Channel { .. } | Self::Bind { .. })
+ matches!(
+ self,
+ Self::Access | Self::Channel { .. } | Self::Bind { .. }
+ )
}
fn is_scheme_root(&self) -> bool {
matches!(self, Self::SchemeRoot)
@@ -57,6 +130,16 @@
}
const DEVICE_CONTENTS: &[&str] = &["channel", "bind"];
+const DEVICE_AER_CONTENTS: &[&str] = &["channel", "bind", "aer"];
+const AER_CONTENTS: &[&str] = &[
+ "uncor_status",
+ "uncor_mask",
+ "uncor_severity",
+ "cor_status",
+ "cor_mask",
+ "cap",
+ "header_log",
+];
impl PciScheme {
pub fn access(&mut self) -> usize {
@@ -141,7 +224,12 @@
let (len, mode) = match handle.inner {
Handle::TopLevel { ref entries } => (entries.len(), MODE_DIR | 0o755),
- Handle::Device => (DEVICE_CONTENTS.len(), MODE_DIR | 0o755),
+ Handle::Device { addr } => (
+ Self::device_entries(&self.pcie, addr).len(),
+ MODE_DIR | 0o755,
+ ),
+ Handle::AerDir => (AER_CONTENTS.len(), MODE_DIR | 0o755),
+ Handle::Aer { register, .. } => (register.len(), MODE_CHR | 0o444),
Handle::Access | Handle::Channel { .. } | Handle::Bind { .. } => (0, MODE_CHR | 0o600),
Handle::Uevent => (0, MODE_CHR | 0o644),
Handle::SchemeRoot => return Err(Error::new(EBADF)),
@@ -154,7 +242,7 @@
&mut self,
id: usize,
buf: &mut [u8],
- _offset: u64,
+ offset: u64,
_fcntl_flags: u32,
_ctx: &CallerCtx,
) -> Result<usize> {
@@ -166,11 +254,14 @@
match handle.inner {
Handle::TopLevel { .. } => Err(Error::new(EISDIR)),
- Handle::Device => Err(Error::new(EISDIR)),
+ Handle::Device { .. } | Handle::AerDir => Err(Error::new(EISDIR)),
Handle::Channel {
addr: _,
ref mut st,
} => Self::read_channel(st, buf),
+ Handle::Aer { addr, register } => {
+ Self::read_aer_register(&self.pcie, addr, register, buf, offset)
+ }
Handle::Uevent => {
// Uevent surface is ready for hotplug polling consumers.
// pcid currently only scans at startup, so return empty (EAGAIN would indicate no data available).
@@ -209,8 +300,15 @@
}
return Ok(buf);
}
- Handle::Device => DEVICE_CONTENTS,
- Handle::Access | Handle::Channel { .. } | Handle::Bind { .. } | Handle::Uevent => return Err(Error::new(ENOTDIR)),
+ Handle::Device { addr } => Self::device_entries(&self.pcie, addr),
+ Handle::AerDir => AER_CONTENTS,
+ Handle::Access
+ | Handle::Channel { .. }
+ | Handle::Bind { .. }
+ | Handle::Aer { .. }
+ | Handle::Uevent => {
+ return Err(Error::new(ENOTDIR));
+ }
Handle::SchemeRoot => return Err(Error::new(EBADF)),
};
@@ -243,6 +341,7 @@
Handle::Channel { addr, ref mut st } => {
Self::write_channel(&self.pcie, &mut self.tree, addr, st, buf)
}
+ Handle::Aer { .. } => Err(Error::new(EROFS)),
_ => Err(Error::new(EBADF)),
}
@@ -357,45 +456,151 @@
binds: HashMap::new(),
}
}
- fn parse_after_pci_addr(&mut self, addr: PciAddress, after: &str, ctx: &CallerCtx) -> Result<Handle> {
+ fn device_entries(pcie: &Pcie, addr: PciAddress) -> &'static [&'static str] {
+ if Self::find_pcie_extended_capability(pcie, addr, PCIE_EXTENDED_CAPABILITY_AER).is_some() {
+ DEVICE_AER_CONTENTS
+ } else {
+ DEVICE_CONTENTS
+ }
+ }
+ fn find_pcie_extended_capability(
+ pcie: &Pcie,
+ addr: PciAddress,
+ capability_id: u16,
+ ) -> Option<u16> {
+ if !pcie.has_extended_config(addr) {
+ return None;
+ }
+
+ let mut offset = 0x100_u16;
+
+ while offset <= 0xFFC {
+ let header = unsafe { pcie.read(addr, offset) };
+ if header == 0 || header == u32::MAX {
+ return None;
+ }
+
+ if (header & 0xFFFF) as u16 == capability_id {
+ return Some(offset);
+ }
+
+ let next = ((header >> 20) & 0xFFF) as u16;
+ if next < 0x100 || next <= offset || next > 0xFFC || next % 4 != 0 {
+ return None;
+ }
+ offset = next;
+ }
+
+ None
+ }
+ fn read_file_bytes(data: &[u8], buf: &mut [u8], offset: u64) -> Result<usize> {
+ let Ok(offset) = usize::try_from(offset) else {
+ return Ok(0);
+ };
+ if offset >= data.len() {
+ return Ok(0);
+ }
+
+ let count = std::cmp::min(buf.len(), data.len() - offset);
+ buf[..count].copy_from_slice(&data[offset..offset + count]);
+ Ok(count)
+ }
+ fn read_aer_register(
+ pcie: &Pcie,
+ addr: PciAddress,
+ register: AerRegisterName,
+ buf: &mut [u8],
+ offset: u64,
+ ) -> Result<usize> {
+ let Some(aer_base) =
+ Self::find_pcie_extended_capability(pcie, addr, PCIE_EXTENDED_CAPABILITY_AER)
+ else {
+ return Err(Error::new(ENOENT));
+ };
+
+ let mut data = [0_u8; 16];
+ for (index, chunk) in data[..register.len()].chunks_exact_mut(4).enumerate() {
+ let index = u16::try_from(index).map_err(|_| Error::new(EIO))?;
+ let value = unsafe { pcie.read(addr, aer_base + register.offset() + index * 4) };
+ chunk.copy_from_slice(&value.to_le_bytes());
+ }
+
+ Self::read_file_bytes(&data[..register.len()], buf, offset)
+ }
+ fn parse_after_pci_addr(
+ &mut self,
+ addr: PciAddress,
+ after: &str,
+ ctx: &CallerCtx,
+ ) -> Result<Handle> {
if after.chars().next().map_or(false, |c| c != '/') {
return Err(Error::new(ENOENT));
}
let func = self.tree.get_mut(&addr).ok_or(Error::new(ENOENT))?;
Ok(if after.is_empty() {
- Handle::Device
+ Handle::Device { addr }
} else {
let path = &after[1..];
- match path {
- "channel" => {
- if func.enabled {
- return Err(Error::new(ENOLCK));
+ if path == "aer" {
+ if Self::find_pcie_extended_capability(
+ &self.pcie,
+ addr,
+ PCIE_EXTENDED_CAPABILITY_AER,
+ )
+ .is_none()
+ {
+ return Err(Error::new(ENOENT));
+ }
+ Handle::AerDir
+ } else if let Some(register_name) = path.strip_prefix("aer/") {
+ let register =
+ AerRegisterName::from_path(register_name).ok_or(Error::new(ENOENT))?;
+ if Self::find_pcie_extended_capability(
+ &self.pcie,
+ addr,
+ PCIE_EXTENDED_CAPABILITY_AER,
+ )
+ .is_none()
+ {
+ return Err(Error::new(ENOENT));
+ }
+ Handle::Aer { addr, register }
+ } else {
+ match path {
+ "channel" => {
+ if func.enabled {
+ return Err(Error::new(ENOLCK));
+ }
+ func.inner.legacy_interrupt_line = crate::enable_function(
+ &self.pcie,
+ &mut func.endpoint_header,
+ &mut func.capabilities,
+ );
+ func.enabled = true;
+ Handle::Channel {
+ addr,
+ st: ChannelState::AwaitingData,
+ }
}
- func.inner.legacy_interrupt_line = crate::enable_function(
- &self.pcie,
- &mut func.endpoint_header,
- &mut func.capabilities,
- );
- func.enabled = true;
- Handle::Channel {
- addr,
- st: ChannelState::AwaitingData,
+ "bind" => {
+ let addr_str = format!("{}", addr);
+ if let Some(&owner_pid) = self.binds.get(&addr_str) {
+ log::info!(
+ "pcid: device {} already bound by pid {}",
+ addr_str,
+ owner_pid
+ );
+ return Err(Error::new(EALREADY));
+ }
+ let caller_pid = u32::try_from(ctx.pid).map_err(|_| Error::new(EINVAL))?;
+ self.binds.insert(addr_str.clone(), caller_pid);
+ log::info!("pcid: device {} bound by pid {}", addr_str, caller_pid);
+ Handle::Bind { addr }
}
- }
- "bind" => {
- let addr_str = format!("{}", addr);
- if let Some(&owner_pid) = self.binds.get(&addr_str) {
- log::info!("pcid: device {} already bound by pid {}", addr_str, owner_pid);
- return Err(Error::new(EALREADY));
- }
- let caller_pid = ctx.pid;
- self.binds.insert(addr_str.clone(), caller_pid);
- log::info!("pcid: device {} bound by pid {}", addr_str, caller_pid);
- Handle::Bind { addr }
- }
- _ => return Err(Error::new(ENOENT)),
+ _ => return Err(Error::new(ENOENT)),
+ }
}
})
}
@@ -10625,7 +10625,7 @@ diff --git a/drivers/pcid-spawner/src/main.rs b/drivers/pcid-spawner/src/main.rs
index a968f4d4..bfff05c3 100644
--- a/drivers/pcid-spawner/src/main.rs
+++ b/drivers/pcid-spawner/src/main.rs
@@ -1,11 +1,40 @@
@@ -1,11 +1,41 @@
+use std::env;
use std::fs;
use std::process::Command;
@@ -10667,7 +10667,7 @@ index a968f4d4..bfff05c3 100644
fn main() -> Result<()> {
let mut args = pico_args::Arguments::from_env();
let initfs = args.contains("--initfs");
@@ -30,6 +59,12 @@ fn main() -> Result<()> {
@@ -30,12 +59,33 @@ fn main() -> Result<()> {
}
let config: Config = toml::from_str(&config_data)?;
File diff suppressed because it is too large Load Diff
@@ -2,7 +2,7 @@ diff --git a/src/main.rs b/src/main.rs
index b2e2736..a6a9474 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -500,33 +500,62 @@ pub extern "C" fn main() -> ! {
@@ -500,36 +500,63 @@ pub extern "C" fn main() -> ! {
print!("live: 0/{} MiB", size / MIBI as u64);
@@ -0,0 +1,97 @@
diff --git a/src/main.rs b/src/main.rs
index b2e2736..a6a9474 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -500,33 +500,62 @@ pub extern "C" fn main() -> ! {
print!("live: 0/{} MiB", size / MIBI as u64);
- let ptr = os.alloc_zeroed_page_aligned(size as usize);
- if ptr.is_null() {
- panic!("Failed to allocate memory for live");
- }
-
- let live = unsafe { slice::from_raw_parts_mut(ptr, size as usize) };
-
- let mut i = 0;
- for chunk in live.chunks_mut(MIBI) {
- print!("\rlive: {}/{} MiB", i / MIBI as u64, size / MIBI as u64);
- i += unsafe {
- fs.disk
- .read_at(fs.block + i / redoxfs::BLOCK_SIZE, chunk)
- .expect("Failed to read live disk") as u64
- };
- }
- println!("\rlive: {}/{} MiB", i / MIBI as u64, size / MIBI as u64);
-
- println!("Switching to live disk");
- unsafe {
- LIVE_OPT = Some((fs.block, slice::from_raw_parts_mut(ptr, size as usize)));
- }
+ let live_size = match usize::try_from(size) {
+ Ok(live_size) => live_size,
+ Err(_) => {
+ println!("\rlive: disabled (image too large for bootloader address space)");
+ live = false;
+ 0
+ }
+ };
- area_add(OsMemoryEntry {
- base: live.as_ptr() as u64,
- size: live.len() as u64,
- kind: OsMemoryKind::Reserved,
- });
+ let ptr = if live {
+ os.alloc_zeroed_page_aligned(live_size)
+ } else {
+ ptr::null_mut()
+ };
+
+ if live && ptr.is_null() {
+ println!(
+ "\rlive: disabled (unable to allocate {} MiB upfront)",
+ size / MIBI as u64
+ );
+ live = false;
+ }
+
+ let live = if live {
+ Some(unsafe { slice::from_raw_parts_mut(ptr, live_size) })
+ } else {
+ println!("Continuing without live preload");
+ None
+ };
+
+ if let Some(live) = live {
+ let mut i = 0;
+ for chunk in live.chunks_mut(MIBI) {
+ print!("\rlive: {}/{} MiB", i / MIBI as u64, size / MIBI as u64);
+ i += unsafe {
+ fs.disk
+ .read_at(fs.block + i / redoxfs::BLOCK_SIZE, chunk)
+ .expect("Failed to read live disk") as u64
+ };
+ }
+ println!("\rlive: {}/{} MiB", i / MIBI as u64, size / MIBI as u64);
+
+ println!("Switching to live disk");
+ unsafe {
+ LIVE_OPT = Some((fs.block, slice::from_raw_parts_mut(ptr, live_size)));
+ }
+
+ area_add(OsMemoryEntry {
+ base: live.as_ptr() as u64,
+ size: live.len() as u64,
+ kind: OsMemoryKind::Reserved,
+ });
+
+ Some(live)
+ } else {
+ None
+ }
-
- Some(live)
} else {
None
};
@@ -11,7 +11,7 @@ index 4b0bf31..90a97b8 100644
let mut esp_fs = match FileSystem::handle_protocol(esp_handle) {
Ok(esp_fs) => esp_fs,
Err(err) => {
@@ -87,9 +89,37 @@ fn esp_live_image(esp_handle: Handle, esp_device_path: &DevicePath) -> Option<V
@@ -87,8 +89,36 @@ fn esp_live_image(esp_handle: Handle, esp_device_path: &DevicePath) -> Option<V
};
let mut buffer = Vec::new();
@@ -0,0 +1,60 @@
diff --git a/src/os/uefi/device.rs b/src/os/uefi/device.rs
index 4b0bf31..90a97b8 100644
--- a/src/os/uefi/device.rs
+++ b/src/os/uefi/device.rs
@@ -46,6 +46,8 @@ fn device_path_relation(a_path: &DevicePath, b_path: &DevicePath) -> DevicePath
}
fn esp_live_image(esp_handle: Handle, esp_device_path: &DevicePath) -> Option<Vec<u8>> {
+ const MAX_LIVE_IMAGE_PRELOAD: usize = 128 * 1024 * 1024;
+
let mut esp_fs = match FileSystem::handle_protocol(esp_handle) {
Ok(esp_fs) => esp_fs,
Err(err) => {
@@ -87,9 +89,37 @@ fn esp_live_image(esp_handle: Handle, esp_device_path: &DevicePath) -> Option<V
};
let mut buffer = Vec::new();
+ let mut chunk = [0_u8; 64 * 1024];
+
+ loop {
+ let read = match live_image.read(&mut chunk) {
+ Ok(read) => read,
+ Err(err) => {
+ log::warn!(
+ "Failed while reading {}\\redox-live.iso: {:?}",
+ device_path_to_string(esp_device_path),
+ err
+ );
+ return None;
+ }
+ };
+
+ if read == 0 {
+ break;
+ }
- live_image.read_to_end(&mut buffer).unwrap();
+ if buffer.len().saturating_add(read) > MAX_LIVE_IMAGE_PRELOAD {
+ log::warn!(
+ "Skipping {}\\redox-live.iso preload: file exceeds {} MiB safety limit",
+ device_path_to_string(esp_device_path),
+ MAX_LIVE_IMAGE_PRELOAD / 1024 / 1024
+ );
+ return None;
+ }
+
+ buffer.extend_from_slice(&chunk[..read]);
+ }
Some(buffer)
}
@@ -130,7 +160,7 @@ pub fn disk_device_priority() -> Vec<DiskDevice> {
return vec![DiskDevice {
handle: esp_handle,
// Support both a copy of livedisk.iso and a standalone redoxfs partition
- partition_offset: if &buffer[512..520] == b"EFI PART" {
+ partition_offset: if buffer.len() >= 520 && &buffer[512..520] == b"EFI PART" {
//TODO: get block from partition table
2 * crate::MIBI as u64
} else {
@@ -113,7 +113,7 @@ diff --git a/src/os/uefi/device.rs b/src/os/uefi/device.rs
index 0b7991f..554d88e 100644
--- a/src/os/uefi/device.rs
+++ b/src/os/uefi/device.rs
@@ -13,6 +13,160 @@ use uefi_std::{fs::FileSystem, loaded_image::LoadedImage, proto::Protocol};
@@ -13,6 +13,154 @@ use uefi_std::{fs::FileSystem, loaded_image::LoadedImage, proto::Protocol};
use super::disk::{DiskEfi, DiskOrFileEfi};
@@ -0,0 +1,392 @@
diff --git a/src/arch/x86/mod.rs b/src/arch/x86/mod.rs
index bda3f5d..55889df 100644
--- a/src/arch/x86/mod.rs
+++ b/src/arch/x86/mod.rs
@@ -3,10 +3,15 @@ use crate::os::Os;
pub(crate) mod x32;
pub(crate) mod x64;
-pub unsafe fn paging_create(os: &impl Os, kernel_phys: u64, kernel_size: u64) -> Option<usize> {
+pub unsafe fn paging_create(
+ os: &impl Os,
+ kernel_phys: u64,
+ kernel_size: u64,
+ identity_map_end: u64,
+) -> Option<usize> {
unsafe {
if crate::KERNEL_64BIT {
- x64::paging_create(os, kernel_phys, kernel_size)
+ x64::paging_create(os, kernel_phys, kernel_size, identity_map_end)
} else {
x32::paging_create(os, kernel_phys, kernel_size)
}
diff --git a/src/arch/x86/x64.rs b/src/arch/x86/x64.rs
index a0a275a..fcf309d 100644
--- a/src/arch/x86/x64.rs
+++ b/src/arch/x86/x64.rs
@@ -29,7 +29,12 @@ const PRESENT: u64 = 1;
const WRITABLE: u64 = 1 << 1;
const LARGE: u64 = 1 << 7;
-pub unsafe fn paging_create(os: &impl Os, kernel_phys: u64, kernel_size: u64) -> Option<usize> {
+pub unsafe fn paging_create(
+ os: &impl Os,
+ kernel_phys: u64,
+ kernel_size: u64,
+ identity_map_end: u64,
+) -> Option<usize> {
unsafe {
// Create PML4
let pml4 = paging_allocate(os)?;
@@ -42,8 +47,14 @@ pub unsafe fn paging_create(os: &impl Os, kernel_phys: u64, kernel_size: u64) -
pml4[0] = pdp.as_ptr() as u64 | WRITABLE | PRESENT;
pml4[256] = pdp.as_ptr() as u64 | WRITABLE | PRESENT;
- // Identity map 8 GiB using 2 MiB pages
- for pdp_i in 0..8 {
+ let mut needed_pdp = identity_map_end.div_ceil(0x4000_0000);
+ if needed_pdp == 0 {
+ needed_pdp = 1;
+ }
+ assert!(needed_pdp <= pdp.len() as u64, "identity map end exceeds paging span");
+
+ // Identity map required physical range using 2 MiB pages
+ for pdp_i in 0..needed_pdp as usize {
let pd = paging_allocate(os)?;
pdp[pdp_i] = pd.as_ptr() as u64 | WRITABLE | PRESENT;
for pd_i in 0..pd.len() {
diff --git a/src/main.rs b/src/main.rs
index 78dabb0..fd8eb81 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -62,6 +62,10 @@ pub static mut KERNEL_64BIT: bool = false;
pub static mut LIVE_OPT: Option<(u64, &'static [u8])> = None;
+fn region_end(base: u64, size: u64) -> u64 {
+ base.saturating_add(size).next_multiple_of(0x1000)
+}
+
struct SliceWriter<'a> {
slice: &'a mut [u8],
i: usize,
@@ -645,9 +649,6 @@ fn main(os: &impl Os) -> (usize, u64, KernelArgs) {
(memory.len() as u64, memory.as_mut_ptr() as u64)
};
- let page_phys = unsafe { paging_create(os, kernel.as_ptr() as u64, kernel.len() as u64) }
- .expect("Failed to set up paging");
-
let max_env_size = 64 * KIBI;
let mut env_size = max_env_size;
let env_base = os.alloc_zeroed_page_aligned(env_size);
@@ -655,6 +656,28 @@ fn main(os: &impl Os) -> (usize, u64, KernelArgs) {
panic!("Failed to allocate memory for stack");
}
+ let mut identity_map_end = region_end(kernel.as_ptr() as u64, kernel.len() as u64)
+ .max(region_end(stack_base as u64, stack_size as u64))
+ .max(region_end(bootstrap_base, bootstrap_size))
+ .max(region_end(env_base as u64, max_env_size as u64));
+
+ if let Some(ref live) = live_opt {
+ identity_map_end = identity_map_end.max(region_end(
+ live.as_ptr() as u64,
+ live.len() as u64,
+ ));
+ }
+
+ let page_phys = unsafe {
+ paging_create(
+ os,
+ kernel.as_ptr() as u64,
+ kernel.len() as u64,
+ identity_map_end,
+ )
+ }
+ .expect("Failed to set up paging");
+
{
let mut w = SliceWriter {
slice: unsafe { slice::from_raw_parts_mut(env_base, max_env_size) },
diff --git a/src/os/uefi/device.rs b/src/os/uefi/device.rs
index 0b7991f..554d88e 100644
--- a/src/os/uefi/device.rs
+++ b/src/os/uefi/device.rs
@@ -13,6 +13,160 @@ use uefi_std::{fs::FileSystem, loaded_image::LoadedImage, proto::Protocol};
use super::disk::{DiskEfi, DiskOrFileEfi};
+#[derive(Clone, Copy)]
+struct GptPartitionInfo {
+ first_lba: u64,
+ last_lba: u64,
+}
+
+fn read_u32_le(bytes: &[u8]) -> Option<u32> {
+ Some(u32::from_le_bytes(bytes.get(..4)?.try_into().ok()?))
+}
+
+fn read_u64_le(bytes: &[u8]) -> Option<u64> {
+ Some(u64::from_le_bytes(bytes.get(..8)?.try_into().ok()?))
+}
+
+fn decode_utf16_name(bytes: &[u8]) -> Option<String> {
+ let mut units = Vec::new();
+ for chunk in bytes.chunks_exact(2) {
+ let unit = u16::from_le_bytes([chunk[0], chunk[1]]);
+ if unit == 0 {
+ break;
+ }
+ units.push(unit);
+ }
+ String::from_utf16(&units).ok()
+}
+
+fn select_partition(best: &mut Option<GptPartitionInfo>, candidate: GptPartitionInfo) {
+ match best {
+ Some(current) if current.last_lba.saturating_sub(current.first_lba) >= candidate.last_lba.saturating_sub(candidate.first_lba) => {}
+ _ => *best = Some(candidate),
+ }
+}
+
+fn parse_gpt_partition_offset_from_bytes(data: &[u8], block_size: usize) -> Option<u64> {
+ let header_offset = block_size;
+ let header = data.get(header_offset..header_offset + 92)?;
+ if header.get(..8)? != b"EFI PART" {
+ return None;
+ }
+
+ let entries_lba = read_u64_le(header.get(72..80)?)?;
+ let entry_count = read_u32_le(header.get(80..84)?)? as usize;
+ let entry_size = read_u32_le(header.get(84..88)?)? as usize;
+ if entry_size < 128 {
+ return None;
+ }
+
+ let entries_offset = entries_lba.checked_mul(block_size as u64)? as usize;
+ let mut redox_partition = None;
+ let mut fallback_partition = None;
+
+ for index in 0..entry_count {
+ let entry_offset = entries_offset.checked_add(index.checked_mul(entry_size)?)?;
+ let entry = data.get(entry_offset..entry_offset + entry_size)?;
+ if entry.get(..16)?.iter().all(|byte| *byte == 0) {
+ continue;
+ }
+
+ let first_lba = read_u64_le(entry.get(32..40)?)?;
+ let last_lba = read_u64_le(entry.get(40..48)?)?;
+ if first_lba == 0 || last_lba < first_lba {
+ continue;
+ }
+
+ let partition = GptPartitionInfo { first_lba, last_lba };
+ let name = decode_utf16_name(entry.get(56..128)?).unwrap_or_default();
+ if name == "REDOX" {
+ redox_partition = Some(partition);
+ break;
+ }
+
+ select_partition(&mut fallback_partition, partition);
+ }
+
+ redox_partition
+ .or(fallback_partition)
+ .map(|partition| partition.first_lba * block_size as u64)
+}
+
+fn parse_gpt_partition_offset_from_parts(
+ entries: &[u8],
+ entry_count: usize,
+ entry_size: usize,
+ block_size: usize,
+) -> Option<u64> {
+ let mut redox_partition = None;
+ let mut fallback_partition = None;
+
+ for index in 0..entry_count {
+ let entry_offset = index.checked_mul(entry_size)?;
+ let entry = entries.get(entry_offset..entry_offset + entry_size)?;
+ if entry.get(..16)?.iter().all(|byte| *byte == 0) {
+ continue;
+ }
+
+ let first_lba = read_u64_le(entry.get(32..40)?)?;
+ let last_lba = read_u64_le(entry.get(40..48)?)?;
+ if first_lba == 0 || last_lba < first_lba {
+ continue;
+ }
+
+ let partition = GptPartitionInfo { first_lba, last_lba };
+ let name = decode_utf16_name(entry.get(56..128)?).unwrap_or_default();
+ if name == "REDOX" {
+ redox_partition = Some(partition);
+ break;
+ }
+
+ select_partition(&mut fallback_partition, partition);
+ }
+ redox_partition
+ .or(fallback_partition)
+ .map(|partition| partition.first_lba * block_size as u64)
+}
+
+fn gpt_partition_offset_from_buffer(data: &[u8]) -> Option<u64> {
+ parse_gpt_partition_offset_from_bytes(data, 512)
+}
+
+fn gpt_partition_offset_from_disk(disk: &mut DiskEfi) -> Option<u64> {
+ const GPT_SECTOR_SIZE: usize = 512;
+
+ if disk.media_block_size() == 0 {
+ return None;
+ }
+
+ let mut boot_region = vec![0_u8; 2048];
+ disk.read_bytes(0, &mut boot_region).ok()?;
+ let header = boot_region.get(GPT_SECTOR_SIZE..GPT_SECTOR_SIZE + 92)?;
+ if header.get(..8)? != b"EFI PART" {
+ return None;
+ }
+
+ let entries_lba = read_u64_le(header.get(72..80)?)?;
+ let entry_count = read_u32_le(header.get(80..84)?)? as usize;
+ let entry_size = read_u32_le(header.get(84..88)?)? as usize;
+ if entry_size < 128 {
+ return None;
+ }
+
+ let entries_bytes = entry_count.checked_mul(entry_size)?;
+ let entries_offset = entries_lba.checked_mul(GPT_SECTOR_SIZE as u64)?;
+ let mut entries = vec![0_u8; entries_bytes];
+ disk.read_bytes(entries_offset, &mut entries).ok()?;
+
+ parse_gpt_partition_offset_from_parts(&entries, entry_count, entry_size, GPT_SECTOR_SIZE)
+}
+
#[derive(Debug)]
enum DevicePathRelation {
This,
@@ -131,12 +285,7 @@ pub fn disk_device_priority() -> Vec<DiskDevice> {
return vec![DiskDevice {
handle: esp_handle,
// Support both a copy of livedisk.iso and a standalone redoxfs partition
- partition_offset: if &buffer[512..520] == b"EFI PART" {
- //TODO: get block from partition table
- 2 * crate::MIBI as u64
- } else {
- 0
- },
+ partition_offset: gpt_partition_offset_from_buffer(&buffer).unwrap_or(0),
disk: DiskOrFileEfi::File(buffer),
device_path: esp_device_path,
file_path: Some("redox-live.iso"),
@@ -154,7 +303,7 @@ pub fn disk_device_priority() -> Vec<DiskDevice> {
};
let mut devices = Vec::with_capacity(handles.len());
for handle in handles {
- let disk = match DiskEfi::handle_protocol(handle) {
+ let mut disk = match DiskEfi::handle_protocol(handle) {
Ok(ok) => ok,
Err(err) => {
log::warn!(
@@ -182,14 +331,15 @@ pub fn disk_device_priority() -> Vec<DiskDevice> {
}
};
+ let partition_offset = if disk.0.Media.LogicalPartition {
+ 0
+ } else {
+ gpt_partition_offset_from_disk(&mut disk).unwrap_or(2 * crate::MIBI as u64)
+ };
+
devices.push(DiskDevice {
handle,
- partition_offset: if disk.0.Media.LogicalPartition {
- 0
- } else {
- //TODO: get block from partition table
- 2 * crate::MIBI as u64
- },
+ partition_offset,
disk: DiskOrFileEfi::Disk(disk),
device_path,
file_path: None,
diff --git a/src/os/uefi/disk.rs b/src/os/uefi/disk.rs
index 3f920bb..4d109f8 100644
--- a/src/os/uefi/disk.rs
+++ b/src/os/uefi/disk.rs
@@ -117,3 +117,43 @@ impl Disk for DiskEfi {
Err(Error::new(EIO))
}
}
+
+impl DiskEfi {
+ pub fn media_block_size(&self) -> usize {
+ self.0.Media.BlockSize as usize
+ }
+
+ pub fn read_bytes(&mut self, offset: u64, buffer: &mut [u8]) -> Result<()> {
+ let block_size = self.media_block_size();
+ if block_size == 0 || block_size > self.1.len() {
+ return Err(Error::new(EINVAL));
+ }
+
+ let scratch = &mut self.1[..block_size];
+ let mut copied = 0usize;
+
+ while copied < buffer.len() {
+ let absolute = offset as usize + copied;
+ let lba = (absolute / block_size) as u64;
+ let in_block = absolute % block_size;
+
+ match (self.0.ReadBlocks)(
+ self.0,
+ self.0.Media.MediaId,
+ lba,
+ block_size,
+ scratch.as_mut_ptr(),
+ ) {
+ status if status.is_success() => {
+ let chunk_len = core::cmp::min(block_size - in_block, buffer.len() - copied);
+ buffer[copied..copied + chunk_len]
+ .copy_from_slice(&scratch[in_block..in_block + chunk_len]);
+ copied += chunk_len;
+ }
+ _ => return Err(Error::new(EIO)),
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/src/os/uefi/mod.rs b/src/os/uefi/mod.rs
index c79266e..86235a4 100644
--- a/src/os/uefi/mod.rs
+++ b/src/os/uefi/mod.rs
@@ -47,17 +47,19 @@ pub(crate) fn alloc_zeroed_page_aligned(size: usize) -> *mut u8 {
let ptr = {
// Max address mapped by src/arch paging code (8 GiB)
let mut ptr = 0x2_0000_0000;
- status_to_result((std::system_table().BootServices.AllocatePages)(
- 1, // AllocateMaxAddress
- MemoryType::EfiRuntimeServicesData, // Keeps this memory out of free space list
+ if status_to_result((std::system_table().BootServices.AllocatePages)(
+ 0, // AllocateAnyPages
+ MemoryType::EfiLoaderData,
pages,
&mut ptr,
))
- .unwrap();
+ .is_err()
+ {
+ return ptr::null_mut();
+ }
ptr as *mut u8
};
- assert!(!ptr.is_null());
unsafe { ptr::write_bytes(ptr, 0, pages * page_size) };
ptr
}
@@ -41,7 +41,7 @@ index 7a7c0ae8..62f9523c 100644
args.print();
// Set up GDT
@@ -127,16 +127,21 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs, stack_end: usize) -> ! {
@@ -127,17 +127,21 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs, stack_end: usize) -> ! {
// Initialize devices
device::init();
@@ -0,0 +1,65 @@
# Red Bear OS branding in kernel start messages
# Changes "Redox OS" to "RedBear OS" in architecture start files
# Adds device init logging milestones in x86_shared start path
diff --git a/src/arch/aarch64/start.rs b/src/arch/aarch64/start.rs
index e1c8cfb4..65e3fe33 100644
--- a/src/arch/aarch64/start.rs
+++ b/src/arch/aarch64/start.rs
@@ -91,7 +91,7 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs) -> ! {
dtb::serial::init_early(dtb);
}
- info!("Redox OS starting...");
+ info!("RedBear OS starting...");
args.print();
// Initialize RMM
diff --git a/src/arch/riscv64/start.rs b/src/arch/riscv64/start.rs
index 2551968f..a825536a 100644
--- a/src/arch/riscv64/start.rs
+++ b/src/arch/riscv64/start.rs
@@ -97,7 +97,7 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs) -> ! {
init_early(dtb);
}
- info!("Redox OS starting...");
+ info!("RedBear OS starting...");
args.print();
if let Some(dtb) = &dtb {
diff --git a/src/arch/x86_shared/start.rs b/src/arch/x86_shared/start.rs
index 7a7c0ae8..62f9523c 100644
--- a/src/arch/x86_shared/start.rs
+++ b/src/arch/x86_shared/start.rs
@@ -91,7 +91,7 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs, stack_end: usize) -> ! {
// Set up graphical debug
graphical_debug::init(args.env());
- info!("Redox OS starting...");
+ info!("RedBear OS starting...");
args.print();
// Set up GDT
@@ -127,16 +127,21 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs, stack_end: usize) -> ! {
// Initialize devices
device::init();
+ info!("kernel: device init complete (PIC + LAPIC)");
// Read ACPI tables, starts APs
if cfg!(feature = "acpi") {
crate::acpi::init(args.acpi_rsdp());
+ info!("kernel: ACPI tables parsed");
device::init_after_acpi();
+ info!("kernel: IOAPIC init complete");
}
crate::profiling::init();
// Initialize all of the non-core devices not otherwise needed to complete initialization
device::init_noncore();
+ info!("kernel: timer init complete, entering userspace");
args.bootstrap()
};
@@ -524,7 +524,7 @@ index b901302..dfbf66b 100644
}
// invalid state
@@ -368,7 +401,67 @@ impl UserInner {
@@ -368,6 +401,68 @@ impl UserInner {
}
},
}
+2 -2
View File
@@ -9,7 +9,7 @@ diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs
MmapMinAddr(Arc<AddrSpaceWrapper>),
}
@@ -267,6 +268,7 @@ impl ProcScheme {
@@ -267,7 +268,8 @@ impl ProcScheme {
"sched-affinity" => (ContextHandle::SchedAffinity, true),
// TODO: Switch this kernel-local proc handle over to a stable upstream
// redox_syscall ProcCall::SetSchedPolicy opcode once that lands.
@@ -18,7 +18,7 @@ diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs
"status" => (ContextHandle::Status { privileged: false }, false),
_ if path.starts_with("auth-") => {
let nonprefix = &path["auth-".len()..];
@@ -1218,6 +1220,16 @@ impl ContextHandle {
@@ -1218,5 +1220,15 @@ impl ContextHandle {
Ok(2)
}
+ ContextHandle::Name => {
@@ -0,0 +1,47 @@
diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs
--- a/src/scheme/proc.rs
+++ b/src/scheme/proc.rs
@@ -147,6 +147,7 @@ enum ContextHandle {
Priority,
SchedAffinity,
SchedPolicy,
+ Name,
MmapMinAddr(Arc<AddrSpaceWrapper>),
}
@@ -267,6 +268,7 @@ impl ProcScheme {
"sched-affinity" => (ContextHandle::SchedAffinity, true),
// TODO: Switch this kernel-local proc handle over to a stable upstream
// redox_syscall ProcCall::SetSchedPolicy opcode once that lands.
"sched-policy" => (ContextHandle::SchedPolicy, false),
+ "name" => (ContextHandle::Name, false),
"status" => (ContextHandle::Status { privileged: false }, false),
_ if path.starts_with("auth-") => {
let nonprefix = &path["auth-".len()..];
@@ -1218,6 +1220,16 @@ impl ContextHandle {
Ok(2)
}
+ ContextHandle::Name => {
+ let mut name_buf = [0u8; 32];
+ let len = buf.copy_common_bytes_to_slice(&mut name_buf[..31]).unwrap_or(0);
+ let mut context = context.write(token.token());
+ context.name.clear();
+ if let Ok(s) = core::str::from_utf8(&name_buf[..len]) {
+ context.name.push_str(s);
+ }
+ Ok(len)
+ }
ContextHandle::Status { privileged } => {
let mut args = buf.usizes();
@@ -1532,6 +1544,10 @@ impl ContextHandle {
let data = [context.sched_policy as u8, context.sched_rt_priority];
buf.copy_common_bytes_from_slice(&data)
}
+ ContextHandle::Name => {
+ let context = context.read(token.token());
+ buf.copy_common_bytes_from_slice(context.name.as_bytes())
+ }
ContextHandle::Status { .. } => {
let status = {
let context = context.read(token.token());
@@ -11,7 +11,7 @@ diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs
Name,
MmapMinAddr(Arc<AddrSpaceWrapper>),
@@ -160,6 +161,17 @@ pub struct ProcScheme;
@@ -160,7 +161,18 @@ pub struct ProcScheme;
static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
static HANDLES: RwLock<L1, HashMap<usize, Handle>> =
RwLock::new(HashMap::with_hasher(DefaultHashBuilder::new()));
@@ -38,7 +38,7 @@ diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs
"mmap-min-addr" => (
ContextHandle::MmapMinAddr(Arc::clone(
context
@@ -1191,6 +1204,17 @@ impl ContextHandle {
@@ -1191,6 +1204,18 @@ impl ContextHandle {
Ok(size_of_val(&mask))
}
@@ -0,0 +1,70 @@
diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs
--- a/src/scheme/proc.rs
+++ b/src/scheme/proc.rs
@@ -145,8 +145,9 @@ enum ContextHandle {
// TODO: Remove this once openat is implemented, or allow openat-via-dup via e.g. the top-level
// directory.
OpenViaDup,
+ Priority,
SchedAffinity,
SchedPolicy,
Name,
MmapMinAddr(Arc<AddrSpaceWrapper>),
@@ -160,6 +161,17 @@ pub struct ProcScheme;
static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
static HANDLES: RwLock<L1, HashMap<usize, Handle>> =
RwLock::new(HashMap::with_hasher(DefaultHashBuilder::new()));
+
+const NICE_MIN: i32 = -20;
+const NICE_MAX: i32 = 19;
+
+fn nice_to_kernel_prio(nice: i32) -> usize {
+ (nice.saturating_add(20)).clamp(0, 39) as usize
+}
+
+fn kernel_prio_to_nice(prio: usize) -> i32 {
+ (prio.min(39) as i32) - 20
+}
#[cfg(feature = "debugger")]
#[allow(dead_code)]
pub fn foreach_addrsp(
@@ -253,6 +265,7 @@ impl ProcScheme {
"sighandler" => (ContextHandle::Sighandler, false),
"start" => (ContextHandle::Start, false),
"open_via_dup" => (ContextHandle::OpenViaDup, false),
+ "priority" => (ContextHandle::Priority, false),
"mmap-min-addr" => (
ContextHandle::MmapMinAddr(Arc::clone(
context
@@ -1191,6 +1204,17 @@ impl ContextHandle {
Ok(size_of_val(&mask))
}
+ Self::Priority => {
+ let nice = unsafe { buf.read_exact::<i32>()? };
+ if !(NICE_MIN..=NICE_MAX).contains(&nice) {
+ return Err(Error::new(EINVAL));
+ }
+
+ context
+ .write(token.token())
+ .set_sched_other_prio(nice_to_kernel_prio(nice));
+
+ Ok(size_of::<i32>())
+ }
Self::SchedPolicy => {
if buf.len() != 2 {
return Err(Error::new(EINVAL));
@@ -1522,6 +1546,10 @@ impl ContextHandle {
buf.copy_exactly(crate::cpu_set::mask_as_bytes(&mask))?;
Ok(size_of_val(&mask))
+ }
+ ContextHandle::Priority => {
+ let nice = kernel_prio_to_nice(context.read(token.token()).prio);
+ buf.copy_common_bytes_from_slice(&nice.to_ne_bytes())
}
ContextHandle::SchedPolicy => {
let context = context.read(token.token());
+3 -3
View File
@@ -1,7 +1,7 @@
diff --git a/src/percpu.rs b/src/percpu.rs
--- a/src/percpu.rs
+++ b/src/percpu.rs
@@ -29,12 +29,14 @@ pub struct PerCpuSched {
@@ -29,15 +29,17 @@ pub struct PerCpuSched {
pub run_queues_lock: AtomicBool,
pub balance: Cell<[usize; RUN_QUEUE_COUNT]>,
pub last_queue: Cell<usize>,
@@ -22,7 +22,7 @@ diff --git a/src/percpu.rs b/src/percpu.rs
diff --git a/src/context/switch.rs b/src/context/switch.rs
--- a/src/context/switch.rs
+++ b/src/context/switch.rs
@@ -33,6 +33,8 @@ const SCHED_PRIO_TO_WEIGHT: [usize; 40] = [
@@ -33,4 +33,6 @@ const SCHED_PRIO_TO_WEIGHT: [usize; 40] = [
70, 56, 45, 36, 29, 23, 18, 15,
];
@@ -39,7 +39,7 @@ diff --git a/src/context/switch.rs b/src/context/switch.rs
// Trigger a context switch after every 3 ticks.
if new_ticks >= 3 {
switch(token);
@@ -427,6 +432,92 @@ fn steal_work(
@@ -427,3 +432,104 @@ fn steal_work(
None
}
@@ -0,0 +1,146 @@
diff --git a/src/percpu.rs b/src/percpu.rs
--- a/src/percpu.rs
+++ b/src/percpu.rs
@@ -29,12 +29,14 @@ pub struct PerCpuSched {
pub run_queues_lock: AtomicBool,
pub balance: Cell<[usize; RUN_QUEUE_COUNT]>,
pub last_queue: Cell<usize>,
+ pub last_balance_time: Cell<u128>,
}
impl PerCpuSched {
pub const fn new() -> Self {
const EMPTY: VecDeque<WeakContextRef> = VecDeque::new();
Self {
run_queues: SyncUnsafeCell::new([EMPTY; RUN_QUEUE_COUNT]),
run_queues_lock: AtomicBool::new(false),
balance: Cell::new([0; RUN_QUEUE_COUNT]),
last_queue: Cell::new(0),
+ last_balance_time: Cell::new(0),
}
}
diff --git a/src/context/switch.rs b/src/context/switch.rs
--- a/src/context/switch.rs
+++ b/src/context/switch.rs
@@ -33,6 +33,8 @@ const SCHED_PRIO_TO_WEIGHT: [usize; 40] = [
70, 56, 45, 36, 29, 23, 18, 15,
];
+const LOAD_BALANCE_INTERVAL_NS: u128 = 100_000_000;
+
static SCHED_STEAL_COUNT: AtomicUsize = AtomicUsize::new(0);
@@ -101,6 +103,9 @@ pub fn tick(token: &mut CleanLockToken) {
let new_ticks = ticks_cell.get() + 1;
ticks_cell.set(new_ticks);
+ let balance_time = crate::time::monotonic(token);
+ maybe_balance_queues(token, percpu, balance_time);
+
// Trigger a context switch after every 3 ticks.
if new_ticks >= 3 {
switch(token);
@@ -427,6 +432,92 @@ fn steal_work(
None
}
+
+fn queue_depth(percpu: &PercpuBlock) -> usize {
+ let mut sched_lock = SchedQueuesLock::new(&percpu.sched);
+ unsafe {
+ sched_lock
+ .queues_mut()
+ .iter()
+ .map(|queue| queue.len())
+ .sum()
+ }
+}
+
+fn migrate_one_context(
+ token: &mut CleanLockToken,
+ source_id: LogicalCpuId,
+ target_id: LogicalCpuId,
+ switch_time: u128,
+) -> bool {
+ let Some(source) = get_percpu_block(source_id) else {
+ return false;
+ };
+ let Some(target) = get_percpu_block(target_id) else {
+ return false;
+ };
+
+ let source_idle = source.switch_internals.idle_context();
+ let moved = {
+ let mut source_lock = SchedQueuesLock::new(&source.sched);
+ let source_queues = unsafe { source_lock.queues_mut() };
+ pop_movable_context(token, source_queues, target_id, switch_time, &source_idle)
+ };
+
+ let Some((prio, context_ref)) = moved else {
+ return false;
+ };
+
+ let mut target_lock = SchedQueuesLock::new(&target.sched);
+ unsafe {
+ target_lock.queues_mut()[prio].push_back(context_ref);
+ }
+ true
+}
+
+fn maybe_balance_queues(token: &mut CleanLockToken, percpu: &PercpuBlock, balance_time: u128) {
+ if crate::cpu_count() <= 1 || percpu.cpu_id != LogicalCpuId::BSP {
+ return;
+ }
+ if balance_time.saturating_sub(percpu.sched.last_balance_time.get()) < LOAD_BALANCE_INTERVAL_NS
+ {
+ return;
+ }
+
+ percpu.sched.last_balance_time.set(balance_time);
+
+ let mut depths = Vec::new();
+ let mut total_depth = 0usize;
+ for raw_id in 0..crate::cpu_count() {
+ let cpu_id = LogicalCpuId::new(raw_id);
+ let Some(cpu_percpu) = get_percpu_block(cpu_id) else {
+ continue;
+ };
+ let depth = queue_depth(cpu_percpu);
+ total_depth += depth;
+ depths.push((cpu_id, depth));
+ }
+
+ if depths.len() <= 1 || total_depth == 0 {
+ return;
+ }
+
+ let avg_depth = (total_depth + depths.len().saturating_sub(1)) / depths.len();
+
+ for target_index in 0..depths.len() {
+ if depths[target_index].1 != 0 {
+ continue;
+ }
+
+ let mut source_index = None;
+ let mut source_depth = 0usize;
+ for (idx, &(_, depth)) in depths.iter().enumerate() {
+ if idx == target_index {
+ continue;
+ }
+ if depth > avg_depth + 1 && depth > source_depth {
+ source_index = Some(idx);
+ source_depth = depth;
+ }
+ }
+
+ let Some(source_index) = source_index else {
+ continue;
+ };
+
+ let source_id = depths[source_index].0;
+ let target_id = depths[target_index].0;
+ if migrate_one_context(token, source_id, target_id, balance_time) {
+ depths[source_index].1 = depths[source_index].1.saturating_sub(1);
+ depths[target_index].1 += 1;
+ }
+ }
+}
+3 -3
View File
@@ -1,7 +1,7 @@
diff --git a/src/percpu.rs b/src/percpu.rs
--- a/src/percpu.rs
+++ b/src/percpu.rs
@@ -100,6 +100,14 @@ static ALL_PERCPU_BLOCKS: [AtomicPtr<PercpuBlock>; MAX_CPU_COUNT as usize] =
@@ -100,5 +100,13 @@ static ALL_PERCPU_BLOCKS: [AtomicPtr<PercpuBlock>; MAX_CPU_COUNT as usize] =
pub unsafe fn init_tlb_shootdown(id: LogicalCpuId, block: *mut PercpuBlock) {
ALL_PERCPU_BLOCKS[id.get() as usize].store(block, Ordering::Release)
}
@@ -18,7 +18,7 @@ diff --git a/src/percpu.rs b/src/percpu.rs
diff --git a/src/context/switch.rs b/src/context/switch.rs
--- a/src/context/switch.rs
+++ b/src/context/switch.rs
@@ -7,15 +7,15 @@ use crate::{
@@ -7,15 +7,135 @@ use crate::{
self, arch, idle_contexts, idle_contexts_try, run_contexts, ArcContextLockWriteGuard,
Context, ContextLock, SchedPolicy, WeakContextRef, RUN_QUEUE_COUNT,
},
@@ -170,7 +170,7 @@ diff --git a/src/context/switch.rs b/src/context/switch.rs
run_queues[prio].push_back(context_ref);
}
}
@@ -559,6 +672,16 @@ fn select_next_context(
@@ -559,6 +672,17 @@ fn select_next_context(
);
return Ok(Some(next_context_guard));
}
@@ -0,0 +1,190 @@
diff --git a/src/percpu.rs b/src/percpu.rs
--- a/src/percpu.rs
+++ b/src/percpu.rs
@@ -100,6 +100,14 @@ static ALL_PERCPU_BLOCKS: [AtomicPtr<PercpuBlock>; MAX_CPU_COUNT as usize] =
pub unsafe fn init_tlb_shootdown(id: LogicalCpuId, block: *mut PercpuBlock) {
ALL_PERCPU_BLOCKS[id.get() as usize].store(block, Ordering::Release)
}
+
+pub fn get_percpu_block(id: LogicalCpuId) -> Option<&'static PercpuBlock> {
+ unsafe {
+ ALL_PERCPU_BLOCKS[id.get() as usize]
+ .load(Ordering::Acquire)
+ .as_ref()
+ }
+}
pub fn get_all_stats() -> Vec<(LogicalCpuId, CpuStatsData)> {
diff --git a/src/context/switch.rs b/src/context/switch.rs
--- a/src/context/switch.rs
+++ b/src/context/switch.rs
@@ -7,15 +7,15 @@ use crate::{
self, arch, idle_contexts, idle_contexts_try, run_contexts, ArcContextLockWriteGuard,
Context, ContextLock, SchedPolicy, WeakContextRef, RUN_QUEUE_COUNT,
},
- cpu_set::LogicalCpuId,
+ cpu_set::{LogicalCpuId, LogicalCpuSet},
cpu_stats::{self, CpuState},
- percpu::{PerCpuSched, PercpuBlock},
+ percpu::{get_percpu_block, PerCpuSched, PercpuBlock},
sync::{ArcRwLockWriteGuard, CleanLockToken, LockToken, L1, L4},
};
use alloc::{sync::Arc, vec::Vec};
use core::{
cell::{Cell, RefCell},
hint, mem,
- sync::atomic::Ordering,
+ sync::atomic::{AtomicUsize, Ordering},
};
use syscall::PtraceFlags;
@@
+static SCHED_STEAL_COUNT: AtomicUsize = AtomicUsize::new(0);
+
+fn assign_context_to_cpu(context: &mut Context, cpu_id: LogicalCpuId) {
+ context.sched_affinity = LogicalCpuSet::empty();
+ context.sched_affinity.atomic_set(cpu_id);
+}
@@
+fn pop_movable_context(
+ token: &mut CleanLockToken,
+ queues: &mut [alloc::collections::VecDeque<WeakContextRef>; RUN_QUEUE_COUNT],
+ target_cpu: LogicalCpuId,
+ switch_time: u128,
+ idle_context: &Arc<ContextLock>,
+) -> Option<(usize, WeakContextRef)> {
+ for prio in 0..RUN_QUEUE_COUNT {
+ let len = queues[prio].len();
+ for _ in 0..len {
+ let Some(context_ref) = queues[prio].pop_front() else {
+ break;
+ };
+ let Some(context_lock) = context_ref.upgrade() else {
+ continue;
+ };
+ if Arc::ptr_eq(&context_lock, idle_context) {
+ queues[prio].push_back(context_ref);
+ continue;
+ }
+
+ let mut context_guard = unsafe { context_lock.write_arc() };
+ let sw = unsafe { update_stealable(&mut context_guard, switch_time) };
+ if let UpdateResult::CanSwitch = sw {
+ assign_context_to_cpu(&mut context_guard, target_cpu);
+ let moved_ref = WeakContextRef(Arc::downgrade(ArcContextLockWriteGuard::rwlock(
+ &context_guard,
+ )));
+ drop(context_guard);
+ return Some((prio, moved_ref));
+ }
+
+ if matches!(sw, UpdateResult::Blocked) {
+ idle_contexts(token.downgrade()).push_back(context_ref);
+ } else {
+ queues[prio].push_back(context_ref);
+ }
+ }
+ }
+
+ None
+}
+
+fn steal_work(
+ token: &mut CleanLockToken,
+ cpu_id: LogicalCpuId,
+ switch_time: u128,
+) -> Option<ArcContextLockWriteGuard> {
+ let cpu_count = crate::cpu_count();
+ if cpu_count <= 1 {
+ return None;
+ }
+
+ for offset in 1..cpu_count {
+ let victim_id = LogicalCpuId::new((cpu_id.get() + offset) % cpu_count);
+ let Some(victim) = get_percpu_block(victim_id) else {
+ continue;
+ };
+
+ let victim_idle = victim.switch_internals.idle_context();
+ let mut victim_lock = SchedQueuesLock::new(&victim.sched);
+ let victim_queues = unsafe { victim_lock.queues_mut() };
+
+ for prio in 0..RUN_QUEUE_COUNT {
+ let len = victim_queues[prio].len();
+ for _ in 0..len {
+ let Some(context_ref) = victim_queues[prio].pop_front() else {
+ break;
+ };
+ let Some(context_lock) = context_ref.upgrade() else {
+ continue;
+ };
+ if Arc::ptr_eq(&context_lock, &victim_idle) {
+ victim_queues[prio].push_back(context_ref);
+ continue;
+ }
+
+ let mut context_guard = unsafe { context_lock.write_arc() };
+ let sw = unsafe { update_stealable(&mut context_guard, switch_time) };
+ if let UpdateResult::CanSwitch = sw {
+ assign_context_to_cpu(&mut context_guard, cpu_id);
+ SCHED_STEAL_COUNT.fetch_add(1, Ordering::Relaxed);
+ return Some(context_guard);
+ }
+
+ if matches!(sw, UpdateResult::Blocked) {
+ idle_contexts(token.downgrade()).push_back(context_ref);
+ } else {
+ victim_queues[prio].push_back(context_ref);
+ }
+ }
+ }
+ }
+
+ None
+}
+
+unsafe fn update_stealable(context: &mut Context, switch_time: u128) -> UpdateResult {
+ if context.running {
+ return UpdateResult::Skip;
+ }
+ if context.status.is_soft_blocked()
+ && let Some(wake) = context.wake
+ && switch_time >= wake
+ {
+ context.wake = None;
+ context.unblock_no_ipi();
+ }
+ if context.status.is_runnable() {
+ UpdateResult::CanSwitch
+ } else {
+ UpdateResult::Blocked
+ }
+}
@@ -360,6 +469,10 @@ fn wakeup_contexts(token: &mut CleanLockToken, percpu: &PercpuBlock, switch_time
let mut sched_lock = SchedQueuesLock::new(&percpu.sched);
let run_queues = unsafe { sched_lock.queues_mut() };
for (prio, context_ref) in wakeups {
+ if let Some(context_lock) = context_ref.upgrade() {
+ let mut context_guard = unsafe { context_lock.write_arc() };
+ assign_context_to_cpu(&mut context_guard, percpu.cpu_id);
+ }
run_queues[prio].push_back(context_ref);
}
}
@@ -559,6 +672,16 @@ fn select_next_context(
);
return Ok(Some(next_context_guard));
}
+
+ if let Some(next_context_guard) = steal_work(token, cpu_id, switch_time) {
+ queue_previous_context(
+ token,
+ percpu,
+ &prev_context_lock,
+ prev_context_guard,
+ &idle_context,
+ );
+ return Ok(Some(next_context_guard));
+ }
let global_next = {
let contexts_data = run_contexts(token.token());
-147
View File
@@ -1,147 +0,0 @@
diff --git a/src/acpi/madt/mod.rs b/src/acpi/madt/mod.rs
index 3159b9c4..c691eb8d 100644
--- a/src/acpi/madt/mod.rs
+++ b/src/acpi/madt/mod.rs
@@ -146,6 +146,52 @@ pub struct MadtGicd {
_reserved2: [u8; 3],
}
+/// MADT Local x2APIC (entry type 0x9)
+/// Used by modern AMD and Intel platforms with APIC IDs >= 255.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalX2Apic {
+ _reserved: u16,
+ pub x2apic_id: u32,
+ pub flags: u32,
+ pub processor_uid: u32,
+}
+
+/// MADT Local APIC NMI (entry type 0x4)
+/// Configures NMI routing to a processor's LINT0/LINT1 pin.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalApicNmi {
+ pub processor: u8,
+ pub flags: u16,
+ pub nmi_pin: u8,
+}
+
+/// MADT Local APIC Address Override (entry type 0x5)
+/// Provides 64-bit override for the 32-bit local APIC address.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLapicAddressOverride {
+ _reserved: u16,
+ pub local_apic_address: u64,
+}
+
+/// MADT Local x2APIC NMI (entry type 0xA)
+/// x2APIC equivalent of type 0x4 for APIC IDs >= 255.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalX2ApicNmi {
+ _reserved: u16,
+ pub processor_uid: u32,
+ pub flags: u16,
+ pub nmi_pin: u8,
+ _reserved2: u8,
+}
+
+const _: () = assert!(size_of::<MadtLocalApicNmi>() == 4);
+const _: () = assert!(size_of::<MadtLapicAddressOverride>() == 10);
+const _: () = assert!(size_of::<MadtLocalX2ApicNmi>() == 10);
+
/// MADT Entries
#[derive(Debug)]
#[allow(dead_code)]
@@ -160,6 +206,14 @@ pub enum MadtEntry {
InvalidGicc(usize),
Gicd(&'static MadtGicd),
InvalidGicd(usize),
+ LocalX2Apic(&'static MadtLocalX2Apic),
+ InvalidLocalX2Apic(usize),
+ LocalApicNmi(&'static MadtLocalApicNmi),
+ InvalidLocalApicNmi(usize),
+ LapicAddressOverride(&'static MadtLapicAddressOverride),
+ InvalidLapicAddressOverride(usize),
+ LocalX2ApicNmi(&'static MadtLocalX2ApicNmi),
+ InvalidLocalX2ApicNmi(usize),
Unknown(u8),
}
@@ -176,6 +230,10 @@ impl Iterator for MadtIter {
let entry_len =
unsafe { *(self.sdt.data_address() as *const u8).add(self.i + 1) } as usize;
+ if entry_len < 2 {
+ return None;
+ }
+
if self.i + entry_len <= self.sdt.data_len() {
let item = match entry_type {
0x0 => {
@@ -224,6 +282,44 @@ impl Iterator for MadtIter {
MadtEntry::InvalidGicd(entry_len)
}
}
+ 0x9 => {
+ if entry_len == size_of::<MadtLocalX2Apic>() + 2 {
+ MadtEntry::LocalX2Apic(unsafe {
+ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalX2Apic)
+ })
+ } else {
+ MadtEntry::InvalidLocalX2Apic(entry_len)
+ }
+ }
+ 0x4 => {
+ if entry_len == size_of::<MadtLocalApicNmi>() + 2 {
+ MadtEntry::LocalApicNmi(unsafe {
+ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalApicNmi)
+ })
+ } else {
+ MadtEntry::InvalidLocalApicNmi(entry_len)
+ }
+ }
+ 0x5 => {
+ if entry_len == size_of::<MadtLapicAddressOverride>() + 2 {
+ MadtEntry::LapicAddressOverride(unsafe {
+ &*((self.sdt.data_address() + self.i + 2)
+ as *const MadtLapicAddressOverride)
+ })
+ } else {
+ MadtEntry::InvalidLapicAddressOverride(entry_len)
+ }
+ }
+ 0xA => {
+ if entry_len == size_of::<MadtLocalX2ApicNmi>() + 2 {
+ MadtEntry::LocalX2ApicNmi(unsafe {
+ &*((self.sdt.data_address() + self.i + 2)
+ as *const MadtLocalX2ApicNmi)
+ })
+ } else {
+ MadtEntry::InvalidLocalX2ApicNmi(entry_len)
+ }
+ }
_ => MadtEntry::Unknown(entry_type),
};
diff --git a/src/devices/graphical_debug/mod.rs b/src/devices/graphical_debug/mod.rs
index b701c9a8..00cc984d 100644
--- a/src/devices/graphical_debug/mod.rs
+++ b/src/devices/graphical_debug/mod.rs
@@ -59,7 +59,12 @@ pub fn init(env: &[u8]) {
);
let debug_display = DebugDisplay::new(width, height, stride, virt as *mut u32);
- *DEBUG_DISPLAY.lock() = Some(debug_display);
+ // FIXME: Writing to the framebuffer during early boot causes a hang on some
+ // QEMU configurations (virtio-vga, ramfb). The bootloader maps the framebuffer
+ // with default caching; the kernel remaps it with write-combining in memory::init().
+ // Early kernel access before that remap appears to stall. Deferring DEBUG_DISPLAY
+ // setup avoids the hang; userspace vesad/fbbootlogd handles graphical output.
+ // *DEBUG_DISPLAY.lock() = Some(debug_display);
}
#[allow(unused)]
+147
View File
@@ -0,0 +1,147 @@
diff --git a/src/acpi/madt/mod.rs b/src/acpi/madt/mod.rs
index 3159b9c4..c691eb8d 100644
--- a/src/acpi/madt/mod.rs
+++ b/src/acpi/madt/mod.rs
@@ -146,6 +146,52 @@ pub struct MadtGicd {
_reserved2: [u8; 3],
}
+/// MADT Local x2APIC (entry type 0x9)
+/// Used by modern AMD and Intel platforms with APIC IDs >= 255.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalX2Apic {
+ _reserved: u16,
+ pub x2apic_id: u32,
+ pub flags: u32,
+ pub processor_uid: u32,
+}
+
+/// MADT Local APIC NMI (entry type 0x4)
+/// Configures NMI routing to a processor's LINT0/LINT1 pin.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalApicNmi {
+ pub processor: u8,
+ pub flags: u16,
+ pub nmi_pin: u8,
+}
+
+/// MADT Local APIC Address Override (entry type 0x5)
+/// Provides 64-bit override for the 32-bit local APIC address.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLapicAddressOverride {
+ _reserved: u16,
+ pub local_apic_address: u64,
+}
+
+/// MADT Local x2APIC NMI (entry type 0xA)
+/// x2APIC equivalent of type 0x4 for APIC IDs >= 255.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalX2ApicNmi {
+ _reserved: u16,
+ pub processor_uid: u32,
+ pub flags: u16,
+ pub nmi_pin: u8,
+ _reserved2: u8,
+}
+
+const _: () = assert!(size_of::<MadtLocalApicNmi>() == 4);
+const _: () = assert!(size_of::<MadtLapicAddressOverride>() == 10);
+const _: () = assert!(size_of::<MadtLocalX2ApicNmi>() == 10);
+
/// MADT Entries
#[derive(Debug)]
#[allow(dead_code)]
@@ -160,6 +206,14 @@ pub enum MadtEntry {
InvalidGicc(usize),
Gicd(&'static MadtGicd),
InvalidGicd(usize),
+ LocalX2Apic(&'static MadtLocalX2Apic),
+ InvalidLocalX2Apic(usize),
+ LocalApicNmi(&'static MadtLocalApicNmi),
+ InvalidLocalApicNmi(usize),
+ LapicAddressOverride(&'static MadtLapicAddressOverride),
+ InvalidLapicAddressOverride(usize),
+ LocalX2ApicNmi(&'static MadtLocalX2ApicNmi),
+ InvalidLocalX2ApicNmi(usize),
Unknown(u8),
}
@@ -176,6 +230,10 @@ impl Iterator for MadtIter {
let entry_len =
unsafe { *(self.sdt.data_address() as *const u8).add(self.i + 1) } as usize;
+ if entry_len < 2 {
+ return None;
+ }
+
if self.i + entry_len <= self.sdt.data_len() {
let item = match entry_type {
0x0 => {
@@ -224,6 +282,44 @@ impl Iterator for MadtIter {
MadtEntry::InvalidGicd(entry_len)
}
}
+ 0x9 => {
+ if entry_len == size_of::<MadtLocalX2Apic>() + 2 {
+ MadtEntry::LocalX2Apic(unsafe {
+ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalX2Apic)
+ })
+ } else {
+ MadtEntry::InvalidLocalX2Apic(entry_len)
+ }
+ }
+ 0x4 => {
+ if entry_len == size_of::<MadtLocalApicNmi>() + 2 {
+ MadtEntry::LocalApicNmi(unsafe {
+ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalApicNmi)
+ })
+ } else {
+ MadtEntry::InvalidLocalApicNmi(entry_len)
+ }
+ }
+ 0x5 => {
+ if entry_len == size_of::<MadtLapicAddressOverride>() + 2 {
+ MadtEntry::LapicAddressOverride(unsafe {
+ &*((self.sdt.data_address() + self.i + 2)
+ as *const MadtLapicAddressOverride)
+ })
+ } else {
+ MadtEntry::InvalidLapicAddressOverride(entry_len)
+ }
+ }
+ 0xA => {
+ if entry_len == size_of::<MadtLocalX2ApicNmi>() + 2 {
+ MadtEntry::LocalX2ApicNmi(unsafe {
+ &*((self.sdt.data_address() + self.i + 2)
+ as *const MadtLocalX2ApicNmi)
+ })
+ } else {
+ MadtEntry::InvalidLocalX2ApicNmi(entry_len)
+ }
+ }
_ => MadtEntry::Unknown(entry_type),
};
diff --git a/src/devices/graphical_debug/mod.rs b/src/devices/graphical_debug/mod.rs
index b701c9a8..00cc984d 100644
--- a/src/devices/graphical_debug/mod.rs
+++ b/src/devices/graphical_debug/mod.rs
@@ -59,7 +59,12 @@ pub fn init(env: &[u8]) {
);
let debug_display = DebugDisplay::new(width, height, stride, virt as *mut u32);
- *DEBUG_DISPLAY.lock() = Some(debug_display);
+ // FIXME: Writing to the framebuffer during early boot causes a hang on some
+ // QEMU configurations (virtio-vga, ramfb). The bootloader maps the framebuffer
+ // with default caching; the kernel remaps it with write-combining in memory::init().
+ // Early kernel access before that remap appears to stall. Deferring DEBUG_DISPLAY
+ // setup avoids the hang; userspace vesad/fbbootlogd handles graphical output.
+ // *DEBUG_DISPLAY.lock() = Some(debug_display);
}
#[allow(unused)]
+6 -54
View File
@@ -1,5 +1,5 @@
--- a/b/src/connection.c 2025-07-06 13:11:26.000000000 +0100
+++ b/src/connection.c 2026-05-01 00:15:42.778777823 +0100
--- a/src/connection.c
+++ b/src/connection.c
@@ -40,6 +40,12 @@
#include <time.h>
#include <ffi.h>
@@ -13,56 +13,8 @@
#include "wayland-util.h"
#include "wayland-private.h"
#include "wayland-os.h"
--- a/b/src/event-loop.c 2025-07-06 13:11:26.000000000 +0100
+++ b/src/event-loop.c 2026-05-01 00:15:42.778845239 +0100
@@ -35,9 +35,43 @@
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/epoll.h>
-#include <sys/signalfd.h>
-#include <sys/timerfd.h>
#include <unistd.h>
+/* Redox: relibc declares signalfd/timerfd in headers but has no implementation.
+ Provide inline implementations via Redox schemes. */
+#define SFD_CLOEXEC O_CLOEXEC
+#define SFD_NONBLOCK O_NONBLOCK
+#define TFD_CLOEXEC O_CLOEXEC
+#define TFD_NONBLOCK O_NONBLOCK
+#define TFD_TIMER_ABSTIME 0x1
+struct signalfd_siginfo { uint8_t pad[128]; };
+static int signalfd(int fd, const sigset_t *mask, int flags) {
+ int oflag = O_RDWR;
+ if (flags & SFD_CLOEXEC) oflag |= O_CLOEXEC;
+ if (flags & SFD_NONBLOCK) oflag |= O_NONBLOCK;
+ if (fd == -1) { fd = open("/scheme/event", oflag); if (fd < 0) return -1; }
+ else { if (flags & SFD_CLOEXEC) fcntl(fd, F_SETFD, FD_CLOEXEC); }
+ sigprocmask(SIG_BLOCK, mask, NULL);
+ return fd;
+}
+static int timerfd_create(int clockid, int flags) {
+ int oflag = O_RDWR;
+ if (flags & TFD_CLOEXEC) oflag |= O_CLOEXEC;
+ if (flags & TFD_NONBLOCK) oflag |= O_NONBLOCK;
+ char path[64];
+ snprintf(path, sizeof(path), "/scheme/time/%d", clockid);
+ return open(path, oflag);
+}
+static int timerfd_settime(int fd, int flags, const struct itimerspec *new_value, struct itimerspec *old_value) {
+ if (new_value == NULL) { errno = EFAULT; return -1; }
+ ssize_t r = write(fd, &new_value->it_value, sizeof(struct timespec));
+ return (r == sizeof(struct timespec)) ? 0 : -1;
+}
+static int timerfd_gettime(int fd, struct itimerspec *curr) {
+ if (curr == NULL) { errno = EFAULT; return -1; }
+ curr->it_interval = (struct timespec){0};
+ ssize_t r = read(fd, &curr->it_value, sizeof(struct timespec));
+ return (r == sizeof(struct timespec)) ? 0 : -1;
+}
#include "timespec-util.h"
#include "wayland-util.h"
#include "wayland-private.h"
--- a/b/src/meson.build 2025-07-06 13:11:26.000000000 +0100
+++ b/src/meson.build 2026-05-01 00:15:42.778925799 +0100
--- a/src/meson.build
+++ b/src/meson.build
@@ -81,8 +81,7 @@
endif
@@ -73,8 +25,8 @@
else
wayland_scanner_for_build = wayland_scanner
endif
--- a/b/src/wayland-server.c 2025-07-06 13:11:26.000000000 +0100
+++ b/src/wayland-server.c 2026-05-01 00:15:42.779083803 +0100
--- a/src/wayland-server.c
+++ b/src/wayland-server.c
@@ -39,7 +39,23 @@
#include <dlfcn.h>
#include <sys/time.h>
+102
View File
@@ -0,0 +1,102 @@
--- a/b/src/connection.c 2025-07-06 13:11:26.000000000 +0100
+++ b/src/connection.c 2026-05-01 00:15:42.778777823 +0100
@@ -40,6 +40,12 @@
#include <time.h>
#include <ffi.h>
+#ifndef MSG_NOSIGNAL
+#define MSG_NOSIGNAL 0
+#endif
+
+extern FILE *open_memstream(char **bufp, size_t *sizep);
+
#include "wayland-util.h"
#include "wayland-private.h"
#include "wayland-os.h"
--- a/b/src/event-loop.c 2025-07-06 13:11:26.000000000 +0100
+++ b/src/event-loop.c 2026-05-01 00:15:42.778845239 +0100
@@ -35,9 +35,43 @@
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/epoll.h>
-#include <sys/signalfd.h>
-#include <sys/timerfd.h>
#include <unistd.h>
+/* Redox: relibc declares signalfd/timerfd in headers but has no implementation.
+ Provide inline implementations via Redox schemes. */
+#define SFD_CLOEXEC O_CLOEXEC
+#define SFD_NONBLOCK O_NONBLOCK
+#define TFD_CLOEXEC O_CLOEXEC
+#define TFD_NONBLOCK O_NONBLOCK
+#define TFD_TIMER_ABSTIME 0x1
+struct signalfd_siginfo { uint8_t pad[128]; };
+static int signalfd(int fd, const sigset_t *mask, int flags) {
+ int oflag = O_RDWR;
+ if (flags & SFD_CLOEXEC) oflag |= O_CLOEXEC;
+ if (flags & SFD_NONBLOCK) oflag |= O_NONBLOCK;
+ if (fd == -1) { fd = open("/scheme/event", oflag); if (fd < 0) return -1; }
+ else { if (flags & SFD_CLOEXEC) fcntl(fd, F_SETFD, FD_CLOEXEC); }
+ sigprocmask(SIG_BLOCK, mask, NULL);
+ return fd;
+}
+static int timerfd_create(int clockid, int flags) {
+ int oflag = O_RDWR;
+ if (flags & TFD_CLOEXEC) oflag |= O_CLOEXEC;
+ if (flags & TFD_NONBLOCK) oflag |= O_NONBLOCK;
+ char path[64];
+ snprintf(path, sizeof(path), "/scheme/time/%d", clockid);
+ return open(path, oflag);
+}
+static int timerfd_settime(int fd, int flags, const struct itimerspec *new_value, struct itimerspec *old_value) {
+ if (new_value == NULL) { errno = EFAULT; return -1; }
+ ssize_t r = write(fd, &new_value->it_value, sizeof(struct timespec));
+ return (r == sizeof(struct timespec)) ? 0 : -1;
+}
+static int timerfd_gettime(int fd, struct itimerspec *curr) {
+ if (curr == NULL) { errno = EFAULT; return -1; }
+ curr->it_interval = (struct timespec){0};
+ ssize_t r = read(fd, &curr->it_value, sizeof(struct timespec));
+ return (r == sizeof(struct timespec)) ? 0 : -1;
+}
#include "timespec-util.h"
#include "wayland-util.h"
#include "wayland-private.h"
--- a/b/src/meson.build 2025-07-06 13:11:26.000000000 +0100
+++ b/src/meson.build 2026-05-01 00:15:42.778925799 +0100
@@ -81,8 +81,7 @@
endif
if meson.is_cross_build() or not get_option('scanner')
- scanner_dep = dependency('wayland-scanner', native: true, version: meson.project_version())
- wayland_scanner_for_build = find_program(scanner_dep.get_variable(pkgconfig: 'wayland_scanner'))
+ wayland_scanner_for_build = find_program('wayland-scanner', native: true)
else
wayland_scanner_for_build = wayland_scanner
endif
--- a/b/src/wayland-server.c 2025-07-06 13:11:26.000000000 +0100
+++ b/src/wayland-server.c 2026-05-01 00:15:42.779083803 +0100
@@ -39,7 +39,23 @@
#include <dlfcn.h>
#include <sys/time.h>
#include <fcntl.h>
-#include <sys/eventfd.h>
+#ifndef EFD_CLOEXEC
+#define EFD_CLOEXEC O_CLOEXEC
+#endif
+#ifndef EFD_NONBLOCK
+#define EFD_NONBLOCK O_NONBLOCK
+#endif
+#ifndef EFD_SEMAPHORE
+#define EFD_SEMAPHORE 0x1
+#endif
+static int eventfd(unsigned int initval, int flags) {
+ int oflag = O_RDWR;
+ if (flags & EFD_CLOEXEC) oflag |= O_CLOEXEC;
+ if (flags & EFD_NONBLOCK) oflag |= O_NONBLOCK;
+ char path[64];
+ snprintf(path, sizeof(path), "/scheme/event/eventfd/%u/%d", initval, (flags & EFD_SEMAPHORE) ? 1 : 0);
+ return open(path, oflag);
+}
#include <sys/file.h>
#include <sys/stat.h>
@@ -2,7 +2,7 @@ diff --git a/src/corelib/io/qfilesystemengine_unix.cpp b/src/corelib/io/qfilesys
index e857f1a..f4f7f4a 100644
--- a/src/corelib/io/qfilesystemengine_unix.cpp
+++ b/src/corelib/io/qfilesystemengine_unix.cpp
@@ -27,23 +27,6 @@
@@ -27,22 +27,5 @@
#include <stdio.h>
#include <errno.h>
@@ -0,0 +1,27 @@
diff --git a/src/corelib/io/qfilesystemengine_unix.cpp b/src/corelib/io/qfilesystemengine_unix.cpp
index e857f1a..f4f7f4a 100644
--- a/src/corelib/io/qfilesystemengine_unix.cpp
+++ b/src/corelib/io/qfilesystemengine_unix.cpp
@@ -27,23 +27,6 @@
#include <stdio.h>
#include <errno.h>
-#ifdef Q_OS_REDOX
-// relibc does not provide unlinkat/linkat yet (POSIX.1-2008 *at functions).
-// Provide inline stubs that work for AT_FDCWD only - sufficient for
-// FreeDesktop trash operations in this file.
-#include <fcntl.h>
-static inline int unlinkat(int dirfd, const char *pathname, int flags)
-{
- if (dirfd != AT_FDCWD || flags != 0) { errno = ENOTSUP; return -1; }
- return unlink(pathname);
-}
-static inline int linkat(int olddirfd, const char *oldpath, int newdirfd, const char *newpath, int flags)
-{
- if (olddirfd != AT_FDCWD || newdirfd != AT_FDCWD || flags != 0) { errno = ENOTSUP; return -1; }
- return link(oldpath, newpath);
-}
-#endif
-
#include <chrono>
#include <memory> // for std::unique_ptr

Some files were not shown because too many files have changed in this diff Show More