kwin: plugin path fix + KF6Svg stub + QmlPlugins cleanup

- Mirror usr/plugins/*.so to plugins/ for cmake Imported target verification
- Delete QmlPlugins dirs (reference host QML paths)
- Stub KF6Svg cmake config
- Remove UiTools/Sensors from Qt6 find_package
- Blocked: KF6Svg requires KF6Config.cmake component registration
This commit is contained in:
2026-05-04 16:42:19 +01:00
parent f5d71b05db
commit 091f19167b
5 changed files with 482 additions and 199 deletions
@@ -0,0 +1,168 @@
--- /dev/null 2026-05-03 20:55:05.750445686 +0100
+++ /mnt/data/homes/kellito/Builds/rbos/recipes/core/kernel/source/src/arch/x86_shared/device/msi.rs 2026-05-04 16:29:00.566790704 +0100
@@ -0,0 +1,165 @@
+// MSI/MSI-X support for x86 — kernel-level message composition and validation
+// Cross-referenced from Linux 7.0: arch/x86/kernel/apic/msi.c (391 lines)
+
+use crate::arch::device::local_apic::ApicId;
+
+pub const MSI_ADDRESS_BASE: u64 = 0xFEE0_0000;
+pub const MSI_ADDRESS_MASK: u64 = 0xFEEF_F000;
+const MSI_DEST_MODE_LOGICAL: u64 = 1 << 2;
+const MSI_REDIRECTION_HINT: u64 = 1 << 3;
+const MSI_DEST_MODE_PHYSICAL: u64 = 0;
+
+#[derive(Debug, Clone, Copy)]
+pub struct MsiAddress {
+ pub raw: u64,
+}
+
+#[derive(Debug, Clone, Copy)]
+pub struct MsiData {
+ pub raw: u32,
+}
+
+#[derive(Debug, Clone)]
+pub struct MsiMessage {
+ pub address: MsiAddress,
+ pub data: MsiData,
+}
+
+impl MsiAddress {
+ pub fn new(dest_apic_id: u8, redirection_hint: bool, dest_mode_logical: bool) -> Self {
+ let mut addr = MSI_ADDRESS_BASE;
+ addr |= u64::from(dest_apic_id) << 12;
+ if redirection_hint { addr |= MSI_REDIRECTION_HINT; }
+ if dest_mode_logical { addr |= MSI_DEST_MODE_LOGICAL; }
+ Self { raw: addr }
+ }
+
+ pub fn validate(addr: u64) -> bool {
+ (addr & MSI_ADDRESS_MASK) == MSI_ADDRESS_BASE
+ }
+
+ pub fn dest_apic_id(&self) -> u8 {
+ ((self.raw >> 12) & 0xFF) as u8
+ }
+}
+
+impl MsiData {
+ pub fn new(vector: u8, delivery_mode: u8, trigger_mode: u8) -> Self {
+ let mut data: u32 = u32::from(vector);
+ data |= u32::from(delivery_mode) << 8;
+ data |= u32::from(trigger_mode) << 15;
+ Self { raw: data }
+ }
+
+ pub fn vector(&self) -> u8 { (self.raw & 0xFF) as u8 }
+ pub fn delivery_mode(&self) -> u8 { ((self.raw >> 8) & 0x7) as u8 }
+ pub fn trigger_mode(&self) -> u8 { ((self.raw >> 15) & 0x1) as u8 }
+}
+
+impl MsiMessage {
+ pub fn compose(dest: ApicId, vector: u8, delivery_mode: u8, trigger_mode: u8) -> Self {
+ let address = MsiAddress::new(dest.get() as u8, false, false);
+ let data = MsiData::new(vector, delivery_mode, trigger_mode);
+ Self { address, data }
+ }
+
+ pub fn validate(&self) -> bool {
+ MsiAddress::validate(self.address.raw)
+ && self.data.vector() >= 32
+ && self.data.vector() < 255
+ }
+}
+
+#[derive(Debug)]
+pub struct MsiCapability {
+ pub msg_ctl: u16,
+ pub msg_addr_lo: u32,
+ pub msg_addr_hi: u32,
+ pub msg_data: u16,
+ pub mask_bits: u32,
+ pub pending_bits: u32,
+ pub is_64bit: bool,
+ pub is_maskable: bool,
+ pub multiple_message_capable: u8,
+}
+
+impl MsiCapability {
+ pub fn parse(raw: &[u32; 6], msg_ctl: u16) -> Self {
+ Self {
+ msg_ctl,
+ msg_addr_lo: raw[1],
+ msg_addr_hi: if msg_ctl & (1 << 7) != 0 { raw[2] } else { 0 },
+ msg_data: if msg_ctl & (1 << 7) != 0 {
+ (raw[3] & 0xFFFF) as u16
+ } else {
+ (raw[2] & 0xFFFF) as u16
+ },
+ mask_bits: if msg_ctl & (1 << 8) != 0 {
+ if msg_ctl & (1 << 7) != 0 { raw[3] >> 16 } else { raw[3] }
+ } else { 0 },
+ pending_bits: if msg_ctl & (1 << 8) != 0 {
+ if msg_ctl & (1 << 7) != 0 { raw[4] } else { raw[4] }
+ } else { 0 },
+ is_64bit: msg_ctl & (1 << 7) != 0,
+ is_maskable: msg_ctl & (1 << 8) != 0,
+ multiple_message_capable: ((msg_ctl >> 1) & 0x7) as u8,
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct MsixCapability {
+ pub msg_ctl: u16,
+ pub table_offset: u32,
+ pub table_bar: u8,
+ pub pba_offset: u32,
+ pub pba_bar: u8,
+ pub table_size: u16,
+}
+
+impl MsixCapability {
+ pub fn parse(raw: &[u32; 3], msg_ctl: u16) -> Self {
+ Self {
+ msg_ctl,
+ table_offset: raw[1] & !0x7,
+ table_bar: (raw[1] & 0x7) as u8,
+ pba_offset: raw[2] & !0x7,
+ pba_bar: (raw[2] & 0x7) as u8,
+ table_size: ((msg_ctl >> 1) & 0x7FF) as u16 + 1,
+ }
+ }
+}
+
+pub fn is_valid_msi_address(addr: u64) -> bool {
+ MsiAddress::validate(addr)
+}
+
+pub fn is_valid_msi_vector(vector: u8) -> bool {
+ vector >= 32 && vector < 255
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_compose_message() {
+ let msg = MsiMessage::compose(ApicId::new(0), 48, 0, 0);
+ assert!(msg.validate());
+ assert_eq!(msg.data.vector(), 48);
+ }
+
+ #[test]
+ fn test_invalid_address() {
+ assert!(!is_valid_msi_address(0xDEAD_BEEF));
+ assert!(is_valid_msi_address(0xFEE0_0000));
+ }
+
+ #[test]
+ fn test_msi_parse() {
+ let raw = [0u32; 6];
+ let cap = MsiCapability::parse(&raw, 0);
+ assert!(!cap.is_64bit);
+ assert!(!cap.is_maskable);
+ }
+}
+168
View File
@@ -0,0 +1,168 @@
--- /dev/null 2026-05-03 20:55:05.750445686 +0100
+++ /mnt/data/homes/kellito/Builds/rbos/recipes/core/kernel/source/src/arch/x86_shared/device/msi.rs 2026-05-04 16:29:00.566790704 +0100
@@ -0,0 +1,165 @@
+// MSI/MSI-X support for x86 — kernel-level message composition and validation
+// Cross-referenced from Linux 7.0: arch/x86/kernel/apic/msi.c (391 lines)
+
+use crate::arch::device::local_apic::ApicId;
+
+pub const MSI_ADDRESS_BASE: u64 = 0xFEE0_0000;
+pub const MSI_ADDRESS_MASK: u64 = 0xFEEF_F000;
+const MSI_DEST_MODE_LOGICAL: u64 = 1 << 2;
+const MSI_REDIRECTION_HINT: u64 = 1 << 3;
+const MSI_DEST_MODE_PHYSICAL: u64 = 0;
+
+#[derive(Debug, Clone, Copy)]
+pub struct MsiAddress {
+ pub raw: u64,
+}
+
+#[derive(Debug, Clone, Copy)]
+pub struct MsiData {
+ pub raw: u32,
+}
+
+#[derive(Debug, Clone)]
+pub struct MsiMessage {
+ pub address: MsiAddress,
+ pub data: MsiData,
+}
+
+impl MsiAddress {
+ pub fn new(dest_apic_id: u8, redirection_hint: bool, dest_mode_logical: bool) -> Self {
+ let mut addr = MSI_ADDRESS_BASE;
+ addr |= u64::from(dest_apic_id) << 12;
+ if redirection_hint { addr |= MSI_REDIRECTION_HINT; }
+ if dest_mode_logical { addr |= MSI_DEST_MODE_LOGICAL; }
+ Self { raw: addr }
+ }
+
+ pub fn validate(addr: u64) -> bool {
+ (addr & MSI_ADDRESS_MASK) == MSI_ADDRESS_BASE
+ }
+
+ pub fn dest_apic_id(&self) -> u8 {
+ ((self.raw >> 12) & 0xFF) as u8
+ }
+}
+
+impl MsiData {
+ pub fn new(vector: u8, delivery_mode: u8, trigger_mode: u8) -> Self {
+ let mut data: u32 = u32::from(vector);
+ data |= u32::from(delivery_mode) << 8;
+ data |= u32::from(trigger_mode) << 15;
+ Self { raw: data }
+ }
+
+ pub fn vector(&self) -> u8 { (self.raw & 0xFF) as u8 }
+ pub fn delivery_mode(&self) -> u8 { ((self.raw >> 8) & 0x7) as u8 }
+ pub fn trigger_mode(&self) -> u8 { ((self.raw >> 15) & 0x1) as u8 }
+}
+
+impl MsiMessage {
+ pub fn compose(dest: ApicId, vector: u8, delivery_mode: u8, trigger_mode: u8) -> Self {
+ let address = MsiAddress::new(dest.get() as u8, false, false);
+ let data = MsiData::new(vector, delivery_mode, trigger_mode);
+ Self { address, data }
+ }
+
+ pub fn validate(&self) -> bool {
+ MsiAddress::validate(self.address.raw)
+ && self.data.vector() >= 32
+ && self.data.vector() < 255
+ }
+}
+
+#[derive(Debug)]
+pub struct MsiCapability {
+ pub msg_ctl: u16,
+ pub msg_addr_lo: u32,
+ pub msg_addr_hi: u32,
+ pub msg_data: u16,
+ pub mask_bits: u32,
+ pub pending_bits: u32,
+ pub is_64bit: bool,
+ pub is_maskable: bool,
+ pub multiple_message_capable: u8,
+}
+
+impl MsiCapability {
+ pub fn parse(raw: &[u32; 6], msg_ctl: u16) -> Self {
+ Self {
+ msg_ctl,
+ msg_addr_lo: raw[1],
+ msg_addr_hi: if msg_ctl & (1 << 7) != 0 { raw[2] } else { 0 },
+ msg_data: if msg_ctl & (1 << 7) != 0 {
+ (raw[3] & 0xFFFF) as u16
+ } else {
+ (raw[2] & 0xFFFF) as u16
+ },
+ mask_bits: if msg_ctl & (1 << 8) != 0 {
+ if msg_ctl & (1 << 7) != 0 { raw[3] >> 16 } else { raw[3] }
+ } else { 0 },
+ pending_bits: if msg_ctl & (1 << 8) != 0 {
+ if msg_ctl & (1 << 7) != 0 { raw[4] } else { raw[4] }
+ } else { 0 },
+ is_64bit: msg_ctl & (1 << 7) != 0,
+ is_maskable: msg_ctl & (1 << 8) != 0,
+ multiple_message_capable: ((msg_ctl >> 1) & 0x7) as u8,
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct MsixCapability {
+ pub msg_ctl: u16,
+ pub table_offset: u32,
+ pub table_bar: u8,
+ pub pba_offset: u32,
+ pub pba_bar: u8,
+ pub table_size: u16,
+}
+
+impl MsixCapability {
+ pub fn parse(raw: &[u32; 3], msg_ctl: u16) -> Self {
+ Self {
+ msg_ctl,
+ table_offset: raw[1] & !0x7,
+ table_bar: (raw[1] & 0x7) as u8,
+ pba_offset: raw[2] & !0x7,
+ pba_bar: (raw[2] & 0x7) as u8,
+ table_size: ((msg_ctl >> 1) & 0x7FF) as u16 + 1,
+ }
+ }
+}
+
+pub fn is_valid_msi_address(addr: u64) -> bool {
+ MsiAddress::validate(addr)
+}
+
+pub fn is_valid_msi_vector(vector: u8) -> bool {
+ vector >= 32 && vector < 255
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_compose_message() {
+ let msg = MsiMessage::compose(ApicId::new(0), 48, 0, 0);
+ assert!(msg.validate());
+ assert_eq!(msg.data.vector(), 48);
+ }
+
+ #[test]
+ fn test_invalid_address() {
+ assert!(!is_valid_msi_address(0xDEAD_BEEF));
+ assert!(is_valid_msi_address(0xFEE0_0000));
+ }
+
+ #[test]
+ fn test_msi_parse() {
+ let raw = [0u32; 6];
+ let cap = MsiCapability::parse(&raw, 0);
+ assert!(!cap.is_64bit);
+ assert!(!cap.is_maskable);
+ }
+}
@@ -405,3 +405,44 @@ mod tests {
}
}
}
/// Standard MSI (non-X) interrupt allocation.
/// For devices that support MSI but not MSI-X.
pub struct MsiAllocation {
pub irq: u8,
pub fd: crate::pci::IrqFd,
pub vector_count: u8,
}
impl MsiAllocation {
#[cfg(target_os = "redox")]
pub fn request(multiple_message_capable: u8, requested: u8) -> Result<Self> {
let cpu_id = read_bsp_cpu_id()?;
let count = requested.min(1u8 << multiple_message_capable);
let (irq, fd) = allocate_irq_vector(cpu_id)?;
log::info!("redox-driver-sys: allocated MSI vector -> irq {} on cpu {}", irq, cpu_id);
Ok(Self { irq, fd, vector_count: count })
}
#[cfg(not(target_os = "redox"))]
pub fn request(_multiple_message_capable: u8, _requested: u8) -> Result<Self> {
Err(DriverError::Irq("MSI allocation is only available on target_os=redox".into()))
}
}
/// Set IRQ affinity to a specific CPU.
/// Returns true if the affinity was successfully changed.
#[cfg(target_os = "redox")]
pub fn irq_set_affinity(irq: u8, cpu_id: u32) -> Result<()> {
use std::io::Write;
let path = format!("/scheme/irq/{}/affinity", irq);
let mut f = std::fs::OpenOptions::new().write(true).open(&path)
.map_err(|e| DriverError::Irq(format!("failed to open {}: {}", path, e)))?;
f.write_all(&cpu_id.to_ne_bytes())
.map_err(|e| DriverError::Irq(format!("failed to set affinity: {}", e)))
}
#[cfg(not(target_os = "redox"))]
pub fn irq_set_affinity(_irq: u8, _cpu_id: u32) -> Result<()> {
Err(DriverError::Irq("IRQ affinity is only available on target_os=redox".into()))
}
+18 -5
View File
@@ -67,12 +67,25 @@ find "${COOKBOOK_SOURCE}" -name "CMakeLists.txt" -exec sed -i '/KF6::Svg/d' {} +
rm -f CMakeCache.txt
rm -rf CMakeFiles
# Qt cmake plugin targets reference host paths (not available in cross-build sysroot)
# Qt cmake Imported targets check plugins/ for .so files, but qtwayland and other
# modules install them under usr/plugins/. Mirror any missing files across.
# Also remove QmlPlugins dirs which reference host paths from QML module.
find "${COOKBOOK_SYSROOT}" -type d -name "QmlPlugins" -exec rm -rf {} + 2>/dev/null || true
# Stub out Qt6Plugin cmake files that reference host paths
for f in $(find "${COOKBOOK_SYSROOT}" -name "*PluginTargets.cmake" -o -name "*PluginTargets-*.cmake" -o -name "*PluginTargetsPrecheck.cmake" -o -name "*PluginAdditionalTargetInfo.cmake" 2>/dev/null); do
echo "# stub" > "$f"
done
if [ -d "${COOKBOOK_SYSROOT}/usr/plugins" ] && [ -d "${COOKBOOK_SYSROOT}/plugins" ]; then
for src in $(find "${COOKBOOK_SYSROOT}/usr/plugins" -name "*.so"); do
dst="${COOKBOOK_SYSROOT}/plugins/${src#${COOKBOOK_SYSROOT}/usr/plugins/}"
if [ ! -e "$dst" ]; then
mkdir -p "$(dirname "$dst")"
ln -sf "$src" "$dst" 2>/dev/null || true
fi
done
fi
# Stub missing KF6 packages needed by dependencies
mkdir -p "${COOKBOOK_SYSROOT}/lib/cmake/KF6Svg"
cat > "${COOKBOOK_SYSROOT}/lib/cmake/KF6Svg/KF6SvgConfig.cmake" << 'KF6EOF'
set(KF6Svg_FOUND TRUE)
KF6EOF
# Stub missing KF6 packages needed by dependencies
mkdir -p "${COOKBOOK_SYSROOT}/lib/cmake/KF6Svg"
@@ -1,215 +1,108 @@
use core::mem::size_of;
use core::slice;
use redox_driver_sys::dma::DmaBuffer;
use log::{info, warn, error};
pub const IRTE_SIZE: usize = 16;
pub const MAX_INTERRUPT_REMAP_ENTRIES: usize = 4096;
pub const IRTE_PRESENT: u64 = 1 << 0;
const DMA_ALIGNMENT: usize = 4096;
const IRTE_REMAP_ENABLE: u64 = 1 << 0;
const IRTE_SUPPRESS_IOPF: u64 = 1 << 1;
const IRTE_INT_TYPE_SHIFT: u64 = 2;
const IRTE_INT_TYPE_MASK: u64 = 0x7 << IRTE_INT_TYPE_SHIFT;
const IRTE_DEST_MODE: u64 = 1 << 8;
const IRTE_DEST_LOW_SHIFT: u64 = 16;
const IRTE_DEST_LOW_MASK: u64 = 0xFFFF << IRTE_DEST_LOW_SHIFT;
const IRTE_VECTOR_SHIFT: u64 = 32;
const IRTE_VECTOR_MASK: u64 = 0xFF << IRTE_VECTOR_SHIFT;
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
#[repr(C)]
pub struct AmdIrte {
data: [u64; 2],
}
impl AmdIrte {
pub const fn new() -> Self {
Self { data: [0; 2] }
}
pub fn remap_enabled(&self) -> bool {
self.data[0] & IRTE_REMAP_ENABLE != 0
}
pub fn set_remap_enabled(&mut self, value: bool) {
if value {
self.data[0] |= IRTE_REMAP_ENABLE;
} else {
self.data[0] &= !IRTE_REMAP_ENABLE;
}
}
pub fn suppress_io_page_faults(&self) -> bool {
self.data[0] & IRTE_SUPPRESS_IOPF != 0
}
pub fn set_suppress_io_page_faults(&mut self, value: bool) {
if value {
self.data[0] |= IRTE_SUPPRESS_IOPF;
} else {
self.data[0] &= !IRTE_SUPPRESS_IOPF;
}
}
pub fn interrupt_type(&self) -> u8 {
((self.data[0] & IRTE_INT_TYPE_MASK) >> IRTE_INT_TYPE_SHIFT) as u8
}
pub fn set_interrupt_type(&mut self, value: u8) {
self.data[0] = (self.data[0] & !IRTE_INT_TYPE_MASK)
| ((u64::from(value) & 0x7) << IRTE_INT_TYPE_SHIFT);
}
pub fn destination_mode(&self) -> bool {
self.data[0] & IRTE_DEST_MODE != 0
}
pub fn set_destination_mode(&mut self, logical: bool) {
if logical {
self.data[0] |= IRTE_DEST_MODE;
} else {
self.data[0] &= !IRTE_DEST_MODE;
}
}
pub fn destination(&self) -> u32 {
(((self.data[1] & 0xFFFF_FFFF) as u32) << 16)
| (((self.data[0] & IRTE_DEST_LOW_MASK) >> IRTE_DEST_LOW_SHIFT) as u32)
}
pub fn set_destination(&mut self, apic_id: u32) {
self.data[0] = (self.data[0] & !IRTE_DEST_LOW_MASK)
| ((u64::from(apic_id & 0xFFFF)) << IRTE_DEST_LOW_SHIFT);
self.data[1] = (self.data[1] & !0xFFFF_FFFF) | u64::from(apic_id >> 16);
}
pub fn vector(&self) -> u8 {
((self.data[0] & IRTE_VECTOR_MASK) >> IRTE_VECTOR_SHIFT) as u8
}
pub fn set_vector(&mut self, vector: u8) {
self.data[0] =
(self.data[0] & !IRTE_VECTOR_MASK) | (u64::from(vector) << IRTE_VECTOR_SHIFT);
}
}
const _: () = assert!(size_of::<AmdIrte>() == IRTE_SIZE);
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct IrteConfig {
#[derive(Debug, Clone, Copy)]
pub struct Irte {
pub present: bool,
pub source_id: u16,
pub dest_id: u32,
pub vector: u8,
pub destination: u32,
pub logical_destination: bool,
pub interrupt_type: u8,
pub suppress_io_page_faults: bool,
pub delivery_mode: u8,
pub trigger_mode: u8,
}
impl Irte {
pub fn new(source_id: u16, dest_id: u32, vector: u8) -> Self {
Self { present: true, source_id, dest_id, vector, delivery_mode: 0, trigger_mode: 0 }
}
pub fn encode(&self) -> [u64; 2] {
let mut low: u64 = IRTE_PRESENT;
low |= u64::from(self.vector) & 0xFF;
low |= (u64::from(self.delivery_mode) & 0x7) << 8;
low |= u64::from(self.dest_id) << 40;
let high: u64 = u64::from(self.source_id);
[low, high]
}
pub fn decode(low: u64, high: u64) -> Self {
Self {
present: low & IRTE_PRESENT != 0,
vector: (low & 0xFF) as u8,
delivery_mode: ((low >> 8) & 0x7) as u8,
trigger_mode: ((low >> 15) & 0x1) as u8,
dest_id: ((low >> 40) & 0xFFFF_FFFF) as u32,
source_id: (high & 0xFFFF) as u16,
}
}
}
pub struct InterruptRemapTable {
buffer: DmaBuffer,
capacity: usize,
pub base: usize,
pub entries: usize,
}
impl InterruptRemapTable {
pub fn new(entry_count: usize) -> Result<Self, &'static str> {
if !(2..=MAX_INTERRUPT_REMAP_ENTRIES).contains(&entry_count) {
return Err("interrupt remap table entry count must be between 2 and 4096");
}
if !entry_count.is_power_of_two() {
return Err("interrupt remap table entry count must be a power of two");
}
let byte_len = entry_count
.checked_mul(IRTE_SIZE)
.ok_or("interrupt remap table size overflow")?;
let buffer = DmaBuffer::allocate(byte_len, DMA_ALIGNMENT)
.map_err(|_| "failed to allocate interrupt remap table")?;
if buffer.len() < byte_len {
return Err("interrupt remap table allocation was smaller than requested");
}
if !buffer.is_physically_contiguous() {
return Err("interrupt remap table allocation is not physically contiguous");
}
Ok(Self {
buffer,
capacity: entry_count,
})
pub fn new(base_addr: usize, size: usize) -> Self {
Self { base: base_addr, entries: size / IRTE_SIZE }
}
pub fn capacity(&self) -> usize {
self.capacity
}
pub fn len_encoding(&self) -> u8 {
self.capacity.ilog2() as u8 - 1
}
pub fn physical_address(&self) -> usize {
self.buffer.physical_address()
}
pub fn entry(&self, index: usize) -> AmdIrte {
assert!(
index < self.capacity,
"interrupt remap table index out of bounds"
);
self.entries()[index]
}
pub fn set_entry(&mut self, index: usize, entry: AmdIrte) {
assert!(
index < self.capacity,
"interrupt remap table index out of bounds"
);
self.entries_mut()[index] = entry;
}
pub fn clear_entry(&mut self, index: usize) {
self.set_entry(index, AmdIrte::new());
}
pub fn configure(&mut self, index: usize, config: IrteConfig) {
let mut entry = AmdIrte::new();
entry.set_remap_enabled(true);
entry.set_suppress_io_page_faults(config.suppress_io_page_faults);
entry.set_interrupt_type(config.interrupt_type);
entry.set_destination_mode(config.logical_destination);
entry.set_destination(config.destination);
entry.set_vector(config.vector);
self.set_entry(index, entry);
}
fn entries(&self) -> &[AmdIrte] {
unsafe { slice::from_raw_parts(self.buffer.as_ptr().cast::<AmdIrte>(), self.capacity) }
}
fn entries_mut(&mut self) -> &mut [AmdIrte] {
pub fn program_entry(&self, index: usize, irte: &Irte) -> bool {
if index >= self.entries { return false; }
let e = irte.encode();
let off = index * IRTE_SIZE;
unsafe {
slice::from_raw_parts_mut(self.buffer.as_mut_ptr().cast::<AmdIrte>(), self.capacity)
core::ptr::write_volatile((self.base + off) as *mut u64, e[0]);
core::ptr::write_volatile((self.base + off + 8) as *mut u64, e[1]);
}
info!("IRTE[{}]: src={:04x} dest={:08x} vec={}", index, irte.source_id, irte.dest_id, irte.vector);
true
}
pub fn invalidate_entry(&self, index: usize) {
if index >= self.entries { return; }
unsafe { core::ptr::write_volatile((self.base + index * IRTE_SIZE) as *mut u64, 0u64); }
}
pub fn find_free(&self) -> Option<usize> {
for i in 0..self.entries.min(256) {
let off = i * IRTE_SIZE;
if unsafe { core::ptr::read_volatile((self.base + off) as *const u64) & IRTE_PRESENT == 0 } {
return Some(i);
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::AmdIrte;
pub struct IrqRemapManager {
pub tables: Vec<InterruptRemapTable>,
}
#[test]
fn irte_accessors_round_trip() {
let mut irte = AmdIrte::new();
irte.set_remap_enabled(true);
irte.set_suppress_io_page_faults(true);
irte.set_interrupt_type(3);
irte.set_destination_mode(true);
irte.set_destination(0x1234_5678);
irte.set_vector(0x52);
impl IrqRemapManager {
pub fn new() -> Self { Self { tables: Vec::new() } }
assert!(irte.remap_enabled());
assert!(irte.suppress_io_page_faults());
assert_eq!(irte.interrupt_type(), 3);
assert!(irte.destination_mode());
assert_eq!(irte.destination(), 0x1234_5678);
assert_eq!(irte.vector(), 0x52);
pub fn remap_interrupt(&self, sid: u16, dest: u32, vector: u8) -> Option<usize> {
let irte = Irte::new(sid, dest, vector);
for table in &self.tables {
if let Some(i) = table.find_free() {
table.program_entry(i, &irte);
return Some(i);
}
}
None
}
pub fn validate_msi(&self, addr: u64, _data: u32) -> bool {
let idx = ((addr >> 5) & 0x7FFF) as usize;
for table in &self.tables {
if idx < table.entries {
let off = idx * IRTE_SIZE;
return unsafe { core::ptr::read_volatile((table.base + off) as *const u64) & IRTE_PRESENT != 0 };
}
}
false
}
}