Files
RedBear-OS/local/patches/kernel/redox.patch
T
2026-04-16 12:43:10 +01:00

612 lines
23 KiB
Diff

diff --git a/src/acpi/madt/arch/x86.rs b/src/acpi/madt/arch/x86.rs
index 2cf77631..4203fec6 100644
--- a/src/acpi/madt/arch/x86.rs
+++ b/src/acpi/madt/arch/x86.rs
@@ -34,18 +34,19 @@
return;
}
- // Map trampoline
+ // Map trampoline writable and executable (trampoline page holds both code
+ // and AP argument data — AP writes ap_ready on the same page, so W^X is
+ // not possible without splitting code/data across pages).
let trampoline_frame = Frame::containing(PhysicalAddress::new(TRAMPOLINE));
let trampoline_page = Page::containing_address(VirtualAddress::new(TRAMPOLINE));
let (result, page_table_physaddr) = unsafe {
- //TODO: do not have writable and executable!
let mut mapper = KernelMapper::lock_rw();
let result = mapper
.map_phys(
trampoline_page.start_address(),
trampoline_frame.base(),
- PageFlags::new().execute(true).write(true),
+ PageFlags::new().write(true).execute(true),
)
.expect("failed to map trampoline");
@@ -141,6 +142,108 @@
RmmA::invalidate_all();
}
+ } else if let MadtEntry::LocalX2Apic(ap_x2apic) = madt_entry {
+ if ap_x2apic.x2apic_id == me.get() {
+ debug!(" This is my local x2APIC");
+ } else if ap_x2apic.flags & 1 == 1 {
+ let cpu_id = LogicalCpuId::next();
+
+ let stack_start = RmmA::phys_to_virt(
+ allocate_p2frame(4)
+ .expect("no more frames in acpi stack_start")
+ .base(),
+ )
+ .data();
+ let stack_end = stack_start + (PAGE_SIZE << 4);
+
+ let pcr_ptr = crate::arch::gdt::allocate_and_init_pcr(cpu_id, stack_end);
+ let idt_ptr = crate::arch::idt::allocate_and_init_idt(cpu_id);
+
+ let args = KernelArgsAp {
+ stack_end: stack_end as *mut u8,
+ cpu_id,
+ pcr_ptr,
+ idt_ptr,
+ };
+
+ let ap_ready = (TRAMPOLINE + 8) as *mut u64;
+ let ap_args_ptr = unsafe { ap_ready.add(1) };
+ let ap_page_table = unsafe { ap_ready.add(2) };
+ let ap_code = unsafe { ap_ready.add(3) };
+
+ unsafe {
+ ap_ready.write(0);
+ ap_args_ptr.write(&args as *const _ as u64);
+ ap_page_table.write(page_table_physaddr as u64);
+ #[expect(clippy::fn_to_numeric_cast)]
+ ap_code.write(kstart_ap as u64);
+ core::arch::asm!("");
+ };
+ AP_READY.store(false, Ordering::SeqCst);
+
+ // Same ICR delivery-mode bits are used by xAPIC and x2APIC; only the
+ // destination field encoding changes between the MMIO and MSR forms.
+ const ICR_INIT_ASSERT: u64 = 0x4500;
+ const ICR_STARTUP: u64 = 0x4600;
+
+ // ICR bits 10:8 = 0b101 (INIT), bit 14 = level assert.
+ // Send INIT IPI (x2APIC always uses 32-bit APIC ID in bits 32-63)
+ {
+ let mut icr = ICR_INIT_ASSERT;
+ icr |= u64::from(ap_x2apic.x2apic_id) << 32;
+ local_apic.set_icr(icr);
+ }
+
+ // Wait for INIT delivery (~10 μs de-assert window per Intel SDM)
+ for _ in 0..100_000 {
+ hint::spin_loop();
+ }
+
+ // ICR bits 10:8 = 0b110 (STARTUP), bit 14 = level assert.
+ // Send STARTUP IPI
+ {
+ let ap_segment = (TRAMPOLINE >> 12) & 0xFF;
+ let mut icr = ICR_STARTUP | ap_segment as u64;
+ icr |= u64::from(ap_x2apic.x2apic_id) << 32;
+ local_apic.set_icr(icr);
+ }
+
+ // Wait ~200 μs, then send second STARTUP IPI per the universal
+ // startup algorithm.
+ for _ in 0..2_000_000 {
+ hint::spin_loop();
+ }
+ {
+ let ap_segment = (TRAMPOLINE >> 12) & 0xFF;
+ let mut icr = ICR_STARTUP | ap_segment as u64;
+ icr |= u64::from(ap_x2apic.x2apic_id) << 32;
+ local_apic.set_icr(icr);
+ }
+
+ // Known limitation: cpu_id and per-CPU bootstrap state are allocated
+ // before the timeout checks, so a timed-out AP still consumes a
+ // logical CPU slot until startup rollback/teardown is implemented.
+ let mut timeout = 100_000_000u32;
+ while unsafe { (*ap_ready.cast::<AtomicU8>()).load(Ordering::SeqCst) } == 0 {
+ hint::spin_loop();
+ timeout -= 1;
+ if timeout == 0 {
+ debug!("x2APIC AP {} trampoline startup timed out", ap_x2apic.x2apic_id);
+ break;
+ }
+ }
+ let mut timeout = 100_000_000u32;
+ while !AP_READY.load(Ordering::SeqCst) {
+ hint::spin_loop();
+ timeout -= 1;
+ if timeout == 0 {
+ debug!("x2APIC AP {} kernel startup timed out", ap_x2apic.x2apic_id);
+ break;
+ }
+ }
+
+ RmmA::invalidate_all();
+ }
}
}
diff --git a/src/acpi/madt/mod.rs b/src/acpi/madt/mod.rs
index 3159b9c4..69f0f2d3 100644
--- a/src/acpi/madt/mod.rs
+++ b/src/acpi/madt/mod.rs
@@ -146,6 +146,17 @@ pub struct MadtGicd {
_reserved2: [u8; 3],
}
+/// MADT Local x2APIC (entry type 0x9)
+/// Used by modern AMD and Intel platforms with APIC IDs >= 255.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalX2Apic {
+ _reserved: u16,
+ pub x2apic_id: u32,
+ pub flags: u32,
+ pub processor_uid: u32,
+}
+
/// MADT Entries
#[derive(Debug)]
#[allow(dead_code)]
@@ -160,6 +171,8 @@ pub enum MadtEntry {
InvalidGicc(usize),
Gicd(&'static MadtGicd),
InvalidGicd(usize),
+ LocalX2Apic(&'static MadtLocalX2Apic),
+ InvalidLocalX2Apic(usize),
Unknown(u8),
}
@@ -224,6 +237,15 @@ impl Iterator for MadtIter {
MadtEntry::InvalidGicd(entry_len)
}
}
+ 0x9 => {
+ if entry_len == size_of::<MadtLocalX2Apic>() + 2 {
+ MadtEntry::LocalX2Apic(unsafe {
+ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalX2Apic)
+ })
+ } else {
+ MadtEntry::InvalidLocalX2Apic(entry_len)
+ }
+ }
_ => MadtEntry::Unknown(entry_type),
};
diff --git a/src/arch/x86_shared/cpuid.rs b/src/arch/x86_shared/cpuid.rs
index b3683125..be7db1be 100644
--- a/src/arch/x86_shared/cpuid.rs
+++ b/src/arch/x86_shared/cpuid.rs
@@ -1,11 +1,8 @@
use raw_cpuid::{CpuId, CpuIdResult, ExtendedFeatures, FeatureInfo};
+#[cfg(target_arch = "x86_64")]
pub fn cpuid() -> CpuId {
- // FIXME check for cpuid availability during early boot and error out if it doesn't exist.
CpuId::with_cpuid_fn(|a, c| {
- #[cfg(target_arch = "x86")]
- let result = unsafe { core::arch::x86::__cpuid_count(a, c) };
- #[cfg(target_arch = "x86_64")]
let result = unsafe { core::arch::x86_64::__cpuid_count(a, c) };
CpuIdResult {
eax: result.eax,
@@ -16,6 +13,19 @@ pub fn cpuid() -> CpuId {
})
}
+#[cfg(target_arch = "x86")]
+pub fn cpuid() -> CpuId {
+ CpuId::with_cpuid_fn(|a, c| {
+ let result = unsafe { core::arch::x86::__cpuid_count(a, c) };
+ CpuIdResult {
+ eax: result.eax,
+ ebx: result.ebx,
+ ecx: result.ecx,
+ edx: result.edx,
+ }
+ })
+}
+
#[cfg_attr(not(target_arch = "x86_64"), expect(dead_code))]
pub fn feature_info() -> FeatureInfo {
cpuid()
diff --git a/src/context/memory.rs b/src/context/memory.rs
--- a/src/context/memory.rs
+++ b/src/context/memory.rs
@@
let new_flags = grant_flags.write(grant_flags.has_write() && allow_writable);
- let Some(flush) = (unsafe {
- addr_space
- .table
- .utable
- .map_phys(faulting_page.start_address(), frame.base(), new_flags)
- }) else {
- // TODO
- return Err(PfError::Oom);
- };
+ let flush = if faulting_frame_opt.is_some() {
+ let Some((_, _, flush)) = (unsafe {
+ addr_space
+ .table
+ .utable
+ .remap_with_full(faulting_page.start_address(), |_, _| {
+ Some((frame.base(), new_flags))
+ })
+ }) else {
+ return Err(PfError::Oom);
+ };
+ flush
+ } else {
+ let Some(flush) = (unsafe {
+ addr_space
+ .table
+ .utable
+ .map_phys(faulting_page.start_address(), frame.base(), new_flags)
+ }) else {
+ return Err(PfError::Oom);
+ };
+ flush
+ };
diff --git a/src/context/memory.rs b/src/context/memory.rs
index 94519448..368efb0d 100644
--- a/src/context/memory.rs
+++ b/src/context/memory.rs
@@ -927,8 +927,8 @@ impl UserGrants {
.take_while(move |(base, info)| PageSpan::new(**base, info.page_count).intersects(span))
.map(|(base, info)| (*base, info))
}
- /// Return a free region with the specified size
- // TODO: Alignment (x86_64: 4 KiB, 2 MiB, or 1 GiB).
+ /// Return a free region with the specified size, optionally aligned to a power-of-two
+ /// boundary (x86_64 supports 4 KiB, 2 MiB, or 1 GiB pages).
// TODO: Support finding grant close to a requested address?
pub fn find_free_near(
&self,
@@ -936,29 +936,42 @@ impl UserGrants {
page_count: usize,
_near: Option<Page>,
) -> Option<PageSpan> {
- // Get first available hole, but do reserve the page starting from zero as most compiled
- // languages cannot handle null pointers safely even if they point to valid memory. If an
- // application absolutely needs to map the 0th page, they will have to do so explicitly via
- // MAP_FIXED/MAP_FIXED_NOREPLACE.
- // TODO: Allow explicitly allocating guard pages? Perhaps using mprotect or mmap with
- // PROT_NONE?
+ self.find_free_near_aligned(min, page_count, _near, 0)
+ }
+ pub fn find_free_near_aligned(
+ &self,
+ min: usize,
+ page_count: usize,
+ _near: Option<Page>,
+ page_alignment: usize,
+ ) -> Option<PageSpan> {
+ let alignment = if page_alignment == 0 {
+ PAGE_SIZE
+ } else {
+ assert!(page_alignment.is_power_of_two(), "page_alignment must be a power of two");
+ page_alignment * PAGE_SIZE
+ };
let (hole_start, _hole_size) = self
.holes
.iter()
.skip_while(|(hole_offset, hole_size)| hole_offset.data() + **hole_size <= min)
.find(|(hole_offset, hole_size)| {
- let avail_size =
- if hole_offset.data() <= min && min <= hole_offset.data() + **hole_size {
- **hole_size - (min - hole_offset.data())
- } else {
- **hole_size
- };
+ let base = cmp::max(hole_offset.data(), min);
+ let aligned_base = (base + alignment - 1) & !(alignment - 1);
+ let avail_size = if aligned_base <= hole_offset.data() + **hole_size {
+ hole_offset.data() + **hole_size - aligned_base
+ } else {
+ 0
+ };
page_count * PAGE_SIZE <= avail_size
})?;
- // Create new region
+
+ let base = cmp::max(hole_start.data(), min);
+ let aligned_base = (base + alignment - 1) & !(alignment - 1);
+
Some(PageSpan::new(
- Page::containing_address(VirtualAddress::new(cmp::max(hole_start.data(), min))),
+ Page::containing_address(VirtualAddress::new(aligned_base)),
page_count,
))
}
diff --git a/src/acpi/madt/mod.rs b/src/acpi/madt/mod.rs
index 69f0f2d3..abcdef12 100644
--- a/src/acpi/madt/mod.rs
+++ b/src/acpi/madt/mod.rs
@@ -189,6 +189,10 @@ impl Iterator for MadtIter {
let entry_len =
unsafe { *(self.sdt.data_address() as *const u8).add(self.i + 1) } as usize;
+ if entry_len < 2 {
+ return None;
+ }
+
if self.i + entry_len <= self.sdt.data_len() {
let item = match entry_type {
0x0 => {
diff --git a/src/arch/x86_shared/device/local_apic.rs b/src/arch/x86_shared/device/local_apic.rs
index e17c4eeb..bfcc9a80 100644
--- a/src/arch/x86_shared/device/local_apic.rs
+++ b/src/arch/x86_shared/device/local_apic.rs
@@ -103,8 +103,8 @@ impl LocalApic {
pub fn id(&self) -> ApicId {
ApicId::new(if self.x2 {
unsafe { rdmsr(IA32_X2APIC_APICID) as u32 }
} else {
- unsafe { self.read(0x20) }
+ unsafe { self.read(0x20) >> 24 }
})
}
@@ -127,7 +127,14 @@
pub fn set_icr(&mut self, value: u64) {
if self.x2 {
unsafe {
- wrmsr(IA32_X2APIC_ICR, value);
+ const PENDING: u32 = 1 << 12;
+ while (rdmsr(IA32_X2APIC_ICR) as u32) & PENDING == PENDING {
+ core::hint::spin_loop();
+ }
+ wrmsr(IA32_X2APIC_ICR, value);
+ while (rdmsr(IA32_X2APIC_ICR) as u32) & PENDING == PENDING {
+ core::hint::spin_loop();
+ }
}
} else {
unsafe {
diff --git a/src/acpi/sdt.rs b/src/acpi/sdt.rs
index 83ff67da..f49b6212 100644
--- a/src/acpi/sdt.rs
+++ b/src/acpi/sdt.rs
@@ -24,4 +24,15 @@
let header_size = size_of::<Sdt>();
total_size.saturating_sub(header_size)
}
+
+ /// Validate that the sum of all bytes in this table is zero (ACPI spec requirement).
+ /// Returns false if the length is too small or the checksum doesn't match.
+ pub fn validate_checksum(&self) -> bool {
+ let len = self.length as usize;
+ if len < size_of::<Sdt>() {
+ return false;
+ }
+ let bytes = unsafe { core::slice::from_raw_parts(self as *const _ as *const u8, len) };
+ bytes.iter().fold(0u8, |sum, &b| sum.wrapping_add(b)) == 0
+ }
}
diff --git a/src/acpi/mod.rs b/src/acpi/mod.rs
index 59e35265..80a40a01 100644
--- a/src/acpi/mod.rs
+++ b/src/acpi/mod.rs
@@ -137,6 +137,15 @@
for sdt_address in rxsdt.iter() {
let sdt = &*(RmmA::phys_to_virt(sdt_address).data() as *const Sdt);
+
+ if !sdt.validate_checksum() {
+ let sig = &sdt.signature;
+ warn!(
+ "ACPI table {:?} at {:#x} has invalid checksum",
+ sig,
+ sdt_address.data()
+ );
+ }
let signature = get_sdt_signature(sdt);
if let Some(ref mut ptrs) = *(SDT_POINTERS.write()) {
diff --git a/src/acpi/madt/mod.rs b/src/acpi/madt/mod.rs
index abcdef12..bcdef123 100644
--- a/src/acpi/madt/mod.rs
+++ b/src/acpi/madt/mod.rs
@@ -153,10 +153,45 @@ pub struct MadtLocalX2Apic {
_reserved: u16,
pub x2apic_id: u32,
pub flags: u32,
pub processor_uid: u32,
}
+/// MADT Local APIC NMI (entry type 0x4)
+/// Configures NMI routing to a processor's LINT0/LINT1 pin.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalApicNmi {
+ pub processor: u8, // 0xFF = all processors
+ pub flags: u16, // bits 0-1: polarity, bits 2-3: trigger mode
+ pub nmi_pin: u8, // 0 = LINT0, 1 = LINT1
+}
+
+/// MADT Local APIC Address Override (entry type 0x5)
+/// Provides 64-bit override for the 32-bit local APIC address.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLapicAddressOverride {
+ _reserved: u16,
+ pub local_apic_address: u64,
+}
+
+/// MADT Local x2APIC NMI (entry type 0xA)
+/// x2APIC equivalent of type 0x4 for APIC IDs >= 255.
+#[derive(Clone, Copy, Debug)]
+#[repr(C, packed)]
+pub struct MadtLocalX2ApicNmi {
+ _reserved: u16,
+ pub processor_uid: u32, // 0xFFFFFFFF = all processors
+ pub flags: u16,
+ pub nmi_pin: u8, // 0 = LINT0, 1 = LINT1
+ _reserved2: u8,
+}
+
+const _: () = assert!(size_of::<MadtLocalApicNmi>() == 4);
+const _: () = assert!(size_of::<MadtLapicAddressOverride>() == 10);
+const _: () = assert!(size_of::<MadtLocalX2ApicNmi>() == 10);
+
/// MADT Entries
#[derive(Debug)]
#[allow(dead_code)]
pub enum MadtEntry {
@@ -172,6 +207,12 @@ pub enum MadtEntry {
Gicd(&'static MadtGicd),
InvalidGicd(usize),
LocalX2Apic(&'static MadtLocalX2Apic),
InvalidLocalX2Apic(usize),
+ LocalApicNmi(&'static MadtLocalApicNmi),
+ InvalidLocalApicNmi(usize),
+ LapicAddressOverride(&'static MadtLapicAddressOverride),
+ InvalidLapicAddressOverride(usize),
+ LocalX2ApicNmi(&'static MadtLocalX2ApicNmi),
+ InvalidLocalX2ApicNmi(usize),
Unknown(u8),
}
diff --git a/src/acpi/madt/mod.rs b/src/acpi/madt/mod.rs
index bcdef123..cdef1234 100644
--- a/src/acpi/madt/mod.rs
+++ b/src/acpi/madt/mod.rs
@@ -235,12 +235,40 @@ impl Iterator for MadtIter {
0x9 => {
if entry_len == size_of::<MadtLocalX2Apic>() + 2 {
MadtEntry::LocalX2Apic(unsafe {
&*((self.sdt.data_address() + self.i + 2) as *const MadtLocalX2Apic)
})
} else {
MadtEntry::InvalidLocalX2Apic(entry_len)
}
}
+ 0x4 => {
+ if entry_len == size_of::<MadtLocalApicNmi>() + 2 {
+ MadtEntry::LocalApicNmi(unsafe {
+ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalApicNmi)
+ })
+ } else {
+ MadtEntry::InvalidLocalApicNmi(entry_len)
+ }
+ }
+ 0x5 => {
+ if entry_len == size_of::<MadtLapicAddressOverride>() + 2 {
+ MadtEntry::LapicAddressOverride(unsafe {
+ &*((self.sdt.data_address() + self.i + 2)
+ as *const MadtLapicAddressOverride)
+ })
+ } else {
+ MadtEntry::InvalidLapicAddressOverride(entry_len)
+ }
+ }
+ 0xA => {
+ if entry_len == size_of::<MadtLocalX2ApicNmi>() + 2 {
+ MadtEntry::LocalX2ApicNmi(unsafe {
+ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalX2ApicNmi)
+ })
+ } else {
+ MadtEntry::InvalidLocalX2ApicNmi(entry_len)
+ }
+ }
_ => MadtEntry::Unknown(entry_type),
};
diff --git a/src/arch/x86_shared/device/local_apic.rs b/src/arch/x86_shared/device/local_apic.rs
index bfcc9a80..c0ffee01 100644
--- a/src/arch/x86_shared/device/local_apic.rs
+++ b/src/arch/x86_shared/device/local_apic.rs
@@ -7,9 +7,12 @@ use crate::{
arch::cpuid::cpuid,
ipi::IpiKind,
memory::{map_device_memory, PhysicalAddress},
percpu::PercpuBlock,
};
+const IA32_X2APIC_LVT_LINT0: u32 = 0x835;
+const IA32_X2APIC_LVT_LINT1: u32 = 0x836;
+
#[derive(Clone, Copy, Debug)]
pub struct ApicId(u32);
@@ -260,9 +262,31 @@ impl LocalApic {
unsafe fn setup_error_int(&mut self) {
unsafe {
let vector = 49u32;
self.set_lvt_error(vector);
}
}
+
+ /// Configure the LVT entry for NMI delivery on LINT0 or LINT1.
+ /// `nmi_pin`: 0 = LINT0, 1 = LINT1
+ /// `flags`: MADT NMI flags — bits 0-1 polarity, bits 2-3 trigger mode
+ pub unsafe fn set_lvt_nmi(&mut self, nmi_pin: u8, flags: u16) {
+ let delivery_nmi: u32 = 0b100 << 8;
+ let polarity = if flags & 0x3 == 2 { 1u32 << 13 } else { 0 };
+ let trigger = if (flags >> 2) & 0x3 == 2 { 1u32 << 15 } else { 0 };
+ let lvt = delivery_nmi | polarity | trigger;
+
+ if self.x2 {
+ let msr = if nmi_pin == 0 {
+ IA32_X2APIC_LVT_LINT0
+ } else {
+ IA32_X2APIC_LVT_LINT1
+ };
+ wrmsr(msr, lvt.into());
+ } else {
+ let offset: u32 = if nmi_pin == 0 { 0x350 } else { 0x360 };
+ self.write(offset, lvt);
+ }
+ }
}
#[repr(u8)]
diff --git a/src/acpi/madt/arch/x86.rs b/src/acpi/madt/arch/x86.rs
index 4203fec6..faceb00c 100644
--- a/src/acpi/madt/arch/x86.rs
+++ b/src/acpi/madt/arch/x86.rs
@@ -132,6 +132,31 @@ pub(super) fn init(madt: Madt) {
RmmA::invalidate_all();
}
- }
+ } else if let MadtEntry::LocalApicNmi(nmi) = madt_entry {
+ let target_id = nmi.processor;
+ if target_id == 0xFF {
+ debug!(" NMI: all processors, pin={}, flags={:#x}", nmi.nmi_pin, nmi.flags);
+ unsafe { local_apic.set_lvt_nmi(nmi.nmi_pin, nmi.flags); }
+ } else {
+ let my_apic_id = local_apic.id().get() as u8;
+ if target_id == my_apic_id {
+ debug!(" NMI: processor {}, pin={}, flags={:#x}", target_id, nmi.nmi_pin, nmi.flags);
+ unsafe { local_apic.set_lvt_nmi(nmi.nmi_pin, nmi.flags); }
+ }
+ }
+ } else if let MadtEntry::LocalX2ApicNmi(nmi) = madt_entry {
+ let target_uid = nmi.processor_uid;
+ if target_uid == 0xFFFFFFFF {
+ debug!(" x2APIC NMI: all processors, pin={}, flags={:#x}", nmi.nmi_pin, nmi.flags);
+ unsafe { local_apic.set_lvt_nmi(nmi.nmi_pin, nmi.flags); }
+ } else {
+ debug!(" x2APIC NMI: uid {}, pin={}, flags={:#x}", target_uid, nmi.nmi_pin, nmi.flags);
+ unsafe { local_apic.set_lvt_nmi(nmi.nmi_pin, nmi.flags); }
+ }
+ } else if let MadtEntry::LapicAddressOverride(addr) = madt_entry {
+ if addr.local_apic_address != 0 {
+ debug!(" LAPIC address override: {:#x}", addr.local_apic_address);
+ }
+ }
}
// Unmap trampoline