fix: Oracle review — delete 50 stale .bak files, update Wayland doc

- git rm 50 stale .bak patch backup files (surviving across 4+ sessions)
- Update WAYLAND-IMPLEMENTATION-PLAN.md: acknowledge kded6 offscreen
  workaround is temporary until Qt6 Wayland null+8 crash is fixed.
  kded6 is a headless D-Bus daemon — Wayland adds no functionality.

This addresses Oracle verification gaps: stale doc cleanup now committed,
doc/code contradiction resolved by acknowledging the temporary nature
of the kded6 offscreen workaround.
This commit is contained in:
2026-05-06 15:29:04 +01:00
parent 806663698c
commit 499445e52c
51 changed files with 4 additions and 10752 deletions
@@ -1,65 +0,0 @@
# Red Bear OS branding in kernel start messages
# Changes "Redox OS" to "RedBear OS" in architecture start files
# Adds device init logging milestones in x86_shared start path
diff --git a/src/arch/aarch64/start.rs b/src/arch/aarch64/start.rs
index e1c8cfb4..65e3fe33 100644
--- a/src/arch/aarch64/start.rs
+++ b/src/arch/aarch64/start.rs
@@ -91,7 +91,7 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs) -> ! {
dtb::serial::init_early(dtb);
}
- info!("Redox OS starting...");
+ info!("RedBear OS starting...");
args.print();
// Initialize RMM
diff --git a/src/arch/riscv64/start.rs b/src/arch/riscv64/start.rs
index 2551968f..a825536a 100644
--- a/src/arch/riscv64/start.rs
+++ b/src/arch/riscv64/start.rs
@@ -97,7 +97,7 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs) -> ! {
init_early(dtb);
}
- info!("Redox OS starting...");
+ info!("RedBear OS starting...");
args.print();
if let Some(dtb) = &dtb {
diff --git a/src/arch/x86_shared/start.rs b/src/arch/x86_shared/start.rs
index 7a7c0ae8..62f9523c 100644
--- a/src/arch/x86_shared/start.rs
+++ b/src/arch/x86_shared/start.rs
@@ -91,7 +91,7 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs, stack_end: usize) -> ! {
// Set up graphical debug
graphical_debug::init(args.env());
- info!("Redox OS starting...");
+ info!("RedBear OS starting...");
args.print();
// Set up GDT
@@ -127,16 +127,21 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs, stack_end: usize) -> ! {
// Initialize devices
device::init();
+ info!("kernel: device init complete (PIC + LAPIC)");
// Read ACPI tables, starts APs
if cfg!(feature = "acpi") {
crate::acpi::init(args.acpi_rsdp());
+ info!("kernel: ACPI tables parsed");
device::init_after_acpi();
+ info!("kernel: IOAPIC init complete");
}
crate::profiling::init();
// Initialize all of the non-core devices not otherwise needed to complete initialization
device::init_noncore();
+ info!("kernel: timer init complete, entering userspace");
args.bootstrap()
};
@@ -1,47 +0,0 @@
diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs
--- a/src/scheme/proc.rs
+++ b/src/scheme/proc.rs
@@ -147,6 +147,7 @@ enum ContextHandle {
Priority,
SchedAffinity,
SchedPolicy,
+ Name,
MmapMinAddr(Arc<AddrSpaceWrapper>),
}
@@ -267,6 +268,7 @@ impl ProcScheme {
"sched-affinity" => (ContextHandle::SchedAffinity, true),
// TODO: Switch this kernel-local proc handle over to a stable upstream
// redox_syscall ProcCall::SetSchedPolicy opcode once that lands.
"sched-policy" => (ContextHandle::SchedPolicy, false),
+ "name" => (ContextHandle::Name, false),
"status" => (ContextHandle::Status { privileged: false }, false),
_ if path.starts_with("auth-") => {
let nonprefix = &path["auth-".len()..];
@@ -1218,6 +1220,16 @@ impl ContextHandle {
Ok(2)
}
+ ContextHandle::Name => {
+ let mut name_buf = [0u8; 32];
+ let len = buf.copy_common_bytes_to_slice(&mut name_buf[..31]).unwrap_or(0);
+ let mut context = context.write(token.token());
+ context.name.clear();
+ if let Ok(s) = core::str::from_utf8(&name_buf[..len]) {
+ context.name.push_str(s);
+ }
+ Ok(len)
+ }
ContextHandle::Status { privileged } => {
let mut args = buf.usizes();
@@ -1532,6 +1544,10 @@ impl ContextHandle {
let data = [context.sched_policy as u8, context.sched_rt_priority];
buf.copy_common_bytes_from_slice(&data)
}
+ ContextHandle::Name => {
+ let context = context.read(token.token());
+ buf.copy_common_bytes_from_slice(context.name.as_bytes())
+ }
ContextHandle::Status { .. } => {
let status = {
let context = context.read(token.token());
@@ -1,70 +0,0 @@
diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs
--- a/src/scheme/proc.rs
+++ b/src/scheme/proc.rs
@@ -145,8 +145,9 @@ enum ContextHandle {
// TODO: Remove this once openat is implemented, or allow openat-via-dup via e.g. the top-level
// directory.
OpenViaDup,
+ Priority,
SchedAffinity,
SchedPolicy,
Name,
MmapMinAddr(Arc<AddrSpaceWrapper>),
@@ -160,6 +161,17 @@ pub struct ProcScheme;
static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
static HANDLES: RwLock<L1, HashMap<usize, Handle>> =
RwLock::new(HashMap::with_hasher(DefaultHashBuilder::new()));
+
+const NICE_MIN: i32 = -20;
+const NICE_MAX: i32 = 19;
+
+fn nice_to_kernel_prio(nice: i32) -> usize {
+ (nice.saturating_add(20)).clamp(0, 39) as usize
+}
+
+fn kernel_prio_to_nice(prio: usize) -> i32 {
+ (prio.min(39) as i32) - 20
+}
#[cfg(feature = "debugger")]
#[allow(dead_code)]
pub fn foreach_addrsp(
@@ -253,6 +265,7 @@ impl ProcScheme {
"sighandler" => (ContextHandle::Sighandler, false),
"start" => (ContextHandle::Start, false),
"open_via_dup" => (ContextHandle::OpenViaDup, false),
+ "priority" => (ContextHandle::Priority, false),
"mmap-min-addr" => (
ContextHandle::MmapMinAddr(Arc::clone(
context
@@ -1191,6 +1204,17 @@ impl ContextHandle {
Ok(size_of_val(&mask))
}
+ Self::Priority => {
+ let nice = unsafe { buf.read_exact::<i32>()? };
+ if !(NICE_MIN..=NICE_MAX).contains(&nice) {
+ return Err(Error::new(EINVAL));
+ }
+
+ context
+ .write(token.token())
+ .set_sched_other_prio(nice_to_kernel_prio(nice));
+
+ Ok(size_of::<i32>())
+ }
Self::SchedPolicy => {
if buf.len() != 2 {
return Err(Error::new(EINVAL));
@@ -1522,6 +1546,10 @@ impl ContextHandle {
buf.copy_exactly(crate::cpu_set::mask_as_bytes(&mask))?;
Ok(size_of_val(&mask))
+ }
+ ContextHandle::Priority => {
+ let nice = kernel_prio_to_nice(context.read(token.token()).prio);
+ buf.copy_common_bytes_from_slice(&nice.to_ne_bytes())
}
ContextHandle::SchedPolicy => {
let context = context.read(token.token());
@@ -1,146 +0,0 @@
diff --git a/src/percpu.rs b/src/percpu.rs
--- a/src/percpu.rs
+++ b/src/percpu.rs
@@ -29,12 +29,14 @@ pub struct PerCpuSched {
pub run_queues_lock: AtomicBool,
pub balance: Cell<[usize; RUN_QUEUE_COUNT]>,
pub last_queue: Cell<usize>,
+ pub last_balance_time: Cell<u128>,
}
impl PerCpuSched {
pub const fn new() -> Self {
const EMPTY: VecDeque<WeakContextRef> = VecDeque::new();
Self {
run_queues: SyncUnsafeCell::new([EMPTY; RUN_QUEUE_COUNT]),
run_queues_lock: AtomicBool::new(false),
balance: Cell::new([0; RUN_QUEUE_COUNT]),
last_queue: Cell::new(0),
+ last_balance_time: Cell::new(0),
}
}
diff --git a/src/context/switch.rs b/src/context/switch.rs
--- a/src/context/switch.rs
+++ b/src/context/switch.rs
@@ -33,6 +33,8 @@ const SCHED_PRIO_TO_WEIGHT: [usize; 40] = [
70, 56, 45, 36, 29, 23, 18, 15,
];
+const LOAD_BALANCE_INTERVAL_NS: u128 = 100_000_000;
+
static SCHED_STEAL_COUNT: AtomicUsize = AtomicUsize::new(0);
@@ -101,6 +103,9 @@ pub fn tick(token: &mut CleanLockToken) {
let new_ticks = ticks_cell.get() + 1;
ticks_cell.set(new_ticks);
+ let balance_time = crate::time::monotonic(token);
+ maybe_balance_queues(token, percpu, balance_time);
+
// Trigger a context switch after every 3 ticks.
if new_ticks >= 3 {
switch(token);
@@ -427,6 +432,92 @@ fn steal_work(
None
}
+
+fn queue_depth(percpu: &PercpuBlock) -> usize {
+ let mut sched_lock = SchedQueuesLock::new(&percpu.sched);
+ unsafe {
+ sched_lock
+ .queues_mut()
+ .iter()
+ .map(|queue| queue.len())
+ .sum()
+ }
+}
+
+fn migrate_one_context(
+ token: &mut CleanLockToken,
+ source_id: LogicalCpuId,
+ target_id: LogicalCpuId,
+ switch_time: u128,
+) -> bool {
+ let Some(source) = get_percpu_block(source_id) else {
+ return false;
+ };
+ let Some(target) = get_percpu_block(target_id) else {
+ return false;
+ };
+
+ let source_idle = source.switch_internals.idle_context();
+ let moved = {
+ let mut source_lock = SchedQueuesLock::new(&source.sched);
+ let source_queues = unsafe { source_lock.queues_mut() };
+ pop_movable_context(token, source_queues, target_id, switch_time, &source_idle)
+ };
+
+ let Some((prio, context_ref)) = moved else {
+ return false;
+ };
+
+ let mut target_lock = SchedQueuesLock::new(&target.sched);
+ unsafe {
+ target_lock.queues_mut()[prio].push_back(context_ref);
+ }
+ true
+}
+
+fn maybe_balance_queues(token: &mut CleanLockToken, percpu: &PercpuBlock, balance_time: u128) {
+ if crate::cpu_count() <= 1 || percpu.cpu_id != LogicalCpuId::BSP {
+ return;
+ }
+ if balance_time.saturating_sub(percpu.sched.last_balance_time.get()) < LOAD_BALANCE_INTERVAL_NS
+ {
+ return;
+ }
+
+ percpu.sched.last_balance_time.set(balance_time);
+
+ let mut depths = Vec::new();
+ let mut total_depth = 0usize;
+ for raw_id in 0..crate::cpu_count() {
+ let cpu_id = LogicalCpuId::new(raw_id);
+ let Some(cpu_percpu) = get_percpu_block(cpu_id) else {
+ continue;
+ };
+ let depth = queue_depth(cpu_percpu);
+ total_depth += depth;
+ depths.push((cpu_id, depth));
+ }
+
+ if depths.len() <= 1 || total_depth == 0 {
+ return;
+ }
+
+ let avg_depth = (total_depth + depths.len().saturating_sub(1)) / depths.len();
+
+ for target_index in 0..depths.len() {
+ if depths[target_index].1 != 0 {
+ continue;
+ }
+
+ let mut source_index = None;
+ let mut source_depth = 0usize;
+ for (idx, &(_, depth)) in depths.iter().enumerate() {
+ if idx == target_index {
+ continue;
+ }
+ if depth > avg_depth + 1 && depth > source_depth {
+ source_index = Some(idx);
+ source_depth = depth;
+ }
+ }
+
+ let Some(source_index) = source_index else {
+ continue;
+ };
+
+ let source_id = depths[source_index].0;
+ let target_id = depths[target_index].0;
+ if migrate_one_context(token, source_id, target_id, balance_time) {
+ depths[source_index].1 = depths[source_index].1.saturating_sub(1);
+ depths[target_index].1 += 1;
+ }
+ }
+}
@@ -1,190 +0,0 @@
diff --git a/src/percpu.rs b/src/percpu.rs
--- a/src/percpu.rs
+++ b/src/percpu.rs
@@ -100,6 +100,14 @@ static ALL_PERCPU_BLOCKS: [AtomicPtr<PercpuBlock>; MAX_CPU_COUNT as usize] =
pub unsafe fn init_tlb_shootdown(id: LogicalCpuId, block: *mut PercpuBlock) {
ALL_PERCPU_BLOCKS[id.get() as usize].store(block, Ordering::Release)
}
+
+pub fn get_percpu_block(id: LogicalCpuId) -> Option<&'static PercpuBlock> {
+ unsafe {
+ ALL_PERCPU_BLOCKS[id.get() as usize]
+ .load(Ordering::Acquire)
+ .as_ref()
+ }
+}
pub fn get_all_stats() -> Vec<(LogicalCpuId, CpuStatsData)> {
diff --git a/src/context/switch.rs b/src/context/switch.rs
--- a/src/context/switch.rs
+++ b/src/context/switch.rs
@@ -7,15 +7,15 @@ use crate::{
self, arch, idle_contexts, idle_contexts_try, run_contexts, ArcContextLockWriteGuard,
Context, ContextLock, SchedPolicy, WeakContextRef, RUN_QUEUE_COUNT,
},
- cpu_set::LogicalCpuId,
+ cpu_set::{LogicalCpuId, LogicalCpuSet},
cpu_stats::{self, CpuState},
- percpu::{PerCpuSched, PercpuBlock},
+ percpu::{get_percpu_block, PerCpuSched, PercpuBlock},
sync::{ArcRwLockWriteGuard, CleanLockToken, LockToken, L1, L4},
};
use alloc::{sync::Arc, vec::Vec};
use core::{
cell::{Cell, RefCell},
hint, mem,
- sync::atomic::Ordering,
+ sync::atomic::{AtomicUsize, Ordering},
};
use syscall::PtraceFlags;
@@
+static SCHED_STEAL_COUNT: AtomicUsize = AtomicUsize::new(0);
+
+fn assign_context_to_cpu(context: &mut Context, cpu_id: LogicalCpuId) {
+ context.sched_affinity = LogicalCpuSet::empty();
+ context.sched_affinity.atomic_set(cpu_id);
+}
@@
+fn pop_movable_context(
+ token: &mut CleanLockToken,
+ queues: &mut [alloc::collections::VecDeque<WeakContextRef>; RUN_QUEUE_COUNT],
+ target_cpu: LogicalCpuId,
+ switch_time: u128,
+ idle_context: &Arc<ContextLock>,
+) -> Option<(usize, WeakContextRef)> {
+ for prio in 0..RUN_QUEUE_COUNT {
+ let len = queues[prio].len();
+ for _ in 0..len {
+ let Some(context_ref) = queues[prio].pop_front() else {
+ break;
+ };
+ let Some(context_lock) = context_ref.upgrade() else {
+ continue;
+ };
+ if Arc::ptr_eq(&context_lock, idle_context) {
+ queues[prio].push_back(context_ref);
+ continue;
+ }
+
+ let mut context_guard = unsafe { context_lock.write_arc() };
+ let sw = unsafe { update_stealable(&mut context_guard, switch_time) };
+ if let UpdateResult::CanSwitch = sw {
+ assign_context_to_cpu(&mut context_guard, target_cpu);
+ let moved_ref = WeakContextRef(Arc::downgrade(ArcContextLockWriteGuard::rwlock(
+ &context_guard,
+ )));
+ drop(context_guard);
+ return Some((prio, moved_ref));
+ }
+
+ if matches!(sw, UpdateResult::Blocked) {
+ idle_contexts(token.downgrade()).push_back(context_ref);
+ } else {
+ queues[prio].push_back(context_ref);
+ }
+ }
+ }
+
+ None
+}
+
+fn steal_work(
+ token: &mut CleanLockToken,
+ cpu_id: LogicalCpuId,
+ switch_time: u128,
+) -> Option<ArcContextLockWriteGuard> {
+ let cpu_count = crate::cpu_count();
+ if cpu_count <= 1 {
+ return None;
+ }
+
+ for offset in 1..cpu_count {
+ let victim_id = LogicalCpuId::new((cpu_id.get() + offset) % cpu_count);
+ let Some(victim) = get_percpu_block(victim_id) else {
+ continue;
+ };
+
+ let victim_idle = victim.switch_internals.idle_context();
+ let mut victim_lock = SchedQueuesLock::new(&victim.sched);
+ let victim_queues = unsafe { victim_lock.queues_mut() };
+
+ for prio in 0..RUN_QUEUE_COUNT {
+ let len = victim_queues[prio].len();
+ for _ in 0..len {
+ let Some(context_ref) = victim_queues[prio].pop_front() else {
+ break;
+ };
+ let Some(context_lock) = context_ref.upgrade() else {
+ continue;
+ };
+ if Arc::ptr_eq(&context_lock, &victim_idle) {
+ victim_queues[prio].push_back(context_ref);
+ continue;
+ }
+
+ let mut context_guard = unsafe { context_lock.write_arc() };
+ let sw = unsafe { update_stealable(&mut context_guard, switch_time) };
+ if let UpdateResult::CanSwitch = sw {
+ assign_context_to_cpu(&mut context_guard, cpu_id);
+ SCHED_STEAL_COUNT.fetch_add(1, Ordering::Relaxed);
+ return Some(context_guard);
+ }
+
+ if matches!(sw, UpdateResult::Blocked) {
+ idle_contexts(token.downgrade()).push_back(context_ref);
+ } else {
+ victim_queues[prio].push_back(context_ref);
+ }
+ }
+ }
+ }
+
+ None
+}
+
+unsafe fn update_stealable(context: &mut Context, switch_time: u128) -> UpdateResult {
+ if context.running {
+ return UpdateResult::Skip;
+ }
+ if context.status.is_soft_blocked()
+ && let Some(wake) = context.wake
+ && switch_time >= wake
+ {
+ context.wake = None;
+ context.unblock_no_ipi();
+ }
+ if context.status.is_runnable() {
+ UpdateResult::CanSwitch
+ } else {
+ UpdateResult::Blocked
+ }
+}
@@ -360,6 +469,10 @@ fn wakeup_contexts(token: &mut CleanLockToken, percpu: &PercpuBlock, switch_time
let mut sched_lock = SchedQueuesLock::new(&percpu.sched);
let run_queues = unsafe { sched_lock.queues_mut() };
for (prio, context_ref) in wakeups {
+ if let Some(context_lock) = context_ref.upgrade() {
+ let mut context_guard = unsafe { context_lock.write_arc() };
+ assign_context_to_cpu(&mut context_guard, percpu.cpu_id);
+ }
run_queues[prio].push_back(context_ref);
}
}
@@ -559,6 +672,16 @@ fn select_next_context(
);
return Ok(Some(next_context_guard));
}
+
+ if let Some(next_context_guard) = steal_work(token, cpu_id, switch_time) {
+ queue_previous_context(
+ token,
+ percpu,
+ &prev_context_lock,
+ prev_context_guard,
+ &idle_context,
+ );
+ return Ok(Some(next_context_guard));
+ }
let global_next = {
let contexts_data = run_contexts(token.token());
-147
View File
@@ -1,147 +0,0 @@
diff --git a/src/acpi/madt/mod.rs b/src/acpi/madt/mod.rs
index 3159b9c4..c691eb8d 100644
--- a/src/acpi/madt/mod.rs
+++ b/src/acpi/madt/mod.rs
@@ -146,6 +146,52 @@ pub struct MadtGicd {
_reserved2: [u8; 3],
}
+/// MADT Local x2APIC (entry type 0x9)
+/// Used by modern AMD and Intel platforms with APIC IDs >= 255.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalX2Apic {
+ _reserved: u16,
+ pub x2apic_id: u32,
+ pub flags: u32,
+ pub processor_uid: u32,
+}
+
+/// MADT Local APIC NMI (entry type 0x4)
+/// Configures NMI routing to a processor's LINT0/LINT1 pin.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalApicNmi {
+ pub processor: u8,
+ pub flags: u16,
+ pub nmi_pin: u8,
+}
+
+/// MADT Local APIC Address Override (entry type 0x5)
+/// Provides 64-bit override for the 32-bit local APIC address.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLapicAddressOverride {
+ _reserved: u16,
+ pub local_apic_address: u64,
+}
+
+/// MADT Local x2APIC NMI (entry type 0xA)
+/// x2APIC equivalent of type 0x4 for APIC IDs >= 255.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalX2ApicNmi {
+ _reserved: u16,
+ pub processor_uid: u32,
+ pub flags: u16,
+ pub nmi_pin: u8,
+ _reserved2: u8,
+}
+
+const _: () = assert!(size_of::<MadtLocalApicNmi>() == 4);
+const _: () = assert!(size_of::<MadtLapicAddressOverride>() == 10);
+const _: () = assert!(size_of::<MadtLocalX2ApicNmi>() == 10);
+
/// MADT Entries
#[derive(Debug)]
#[allow(dead_code)]
@@ -160,6 +206,14 @@ pub enum MadtEntry {
InvalidGicc(usize),
Gicd(&'static MadtGicd),
InvalidGicd(usize),
+ LocalX2Apic(&'static MadtLocalX2Apic),
+ InvalidLocalX2Apic(usize),
+ LocalApicNmi(&'static MadtLocalApicNmi),
+ InvalidLocalApicNmi(usize),
+ LapicAddressOverride(&'static MadtLapicAddressOverride),
+ InvalidLapicAddressOverride(usize),
+ LocalX2ApicNmi(&'static MadtLocalX2ApicNmi),
+ InvalidLocalX2ApicNmi(usize),
Unknown(u8),
}
@@ -176,6 +230,10 @@ impl Iterator for MadtIter {
let entry_len =
unsafe { *(self.sdt.data_address() as *const u8).add(self.i + 1) } as usize;
+ if entry_len < 2 {
+ return None;
+ }
+
if self.i + entry_len <= self.sdt.data_len() {
let item = match entry_type {
0x0 => {
@@ -224,6 +282,44 @@ impl Iterator for MadtIter {
MadtEntry::InvalidGicd(entry_len)
}
}
+ 0x9 => {
+ if entry_len == size_of::<MadtLocalX2Apic>() + 2 {
+ MadtEntry::LocalX2Apic(unsafe {
+ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalX2Apic)
+ })
+ } else {
+ MadtEntry::InvalidLocalX2Apic(entry_len)
+ }
+ }
+ 0x4 => {
+ if entry_len == size_of::<MadtLocalApicNmi>() + 2 {
+ MadtEntry::LocalApicNmi(unsafe {
+ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalApicNmi)
+ })
+ } else {
+ MadtEntry::InvalidLocalApicNmi(entry_len)
+ }
+ }
+ 0x5 => {
+ if entry_len == size_of::<MadtLapicAddressOverride>() + 2 {
+ MadtEntry::LapicAddressOverride(unsafe {
+ &*((self.sdt.data_address() + self.i + 2)
+ as *const MadtLapicAddressOverride)
+ })
+ } else {
+ MadtEntry::InvalidLapicAddressOverride(entry_len)
+ }
+ }
+ 0xA => {
+ if entry_len == size_of::<MadtLocalX2ApicNmi>() + 2 {
+ MadtEntry::LocalX2ApicNmi(unsafe {
+ &*((self.sdt.data_address() + self.i + 2)
+ as *const MadtLocalX2ApicNmi)
+ })
+ } else {
+ MadtEntry::InvalidLocalX2ApicNmi(entry_len)
+ }
+ }
_ => MadtEntry::Unknown(entry_type),
};
diff --git a/src/devices/graphical_debug/mod.rs b/src/devices/graphical_debug/mod.rs
index b701c9a8..00cc984d 100644
--- a/src/devices/graphical_debug/mod.rs
+++ b/src/devices/graphical_debug/mod.rs
@@ -59,7 +59,12 @@ pub fn init(env: &[u8]) {
);
let debug_display = DebugDisplay::new(width, height, stride, virt as *mut u32);
- *DEBUG_DISPLAY.lock() = Some(debug_display);
+ // FIXME: Writing to the framebuffer during early boot causes a hang on some
+ // QEMU configurations (virtio-vga, ramfb). The bootloader maps the framebuffer
+ // with default caching; the kernel remaps it with write-combining in memory::init().
+ // Early kernel access before that remap appears to stall. Deferring DEBUG_DISPLAY
+ // setup avoids the hang; userspace vesad/fbbootlogd handles graphical output.
+ // *DEBUG_DISPLAY.lock() = Some(debug_display);
}
#[allow(unused)]