milestone: desktop path Phases 1-5

Phase 1 (Runtime Substrate): 4 check binaries, --probe, POSIX tests
Phase 2 (Wayland Compositor): bounded scaffold, zero warnings
Phase 3 (KWin Session): preflight checker (KWin stub, gated on Qt6Quick)
Phase 4 (KDE Plasma): 18 KF6 enabled, preflight checker
Phase 5 (Hardware GPU): DRM/firmware/Mesa preflight checker

Build: zero warnings, all scripts syntax-clean. Oracle-verified.
This commit is contained in:
2026-04-29 09:54:06 +01:00
parent b23714f542
commit 8acc73d774
508 changed files with 76526 additions and 396 deletions
@@ -0,0 +1,24 @@
[package]
name = "virtio-core"
description = "VirtIO driver library"
version = "0.1.0"
edition = "2021"
authors = ["Anhad Singh <andypython@protonmail.com>"]
[dependencies]
static_assertions.workspace = true
bitflags.workspace = true
redox_syscall.workspace = true
libredox.workspace = true
log.workspace = true
thiserror.workspace = true
futures = { version = "0.3.28", features = ["executor"] }
crossbeam-queue = "0.3.8"
redox_event.workspace = true
common = { path = "../common" }
pcid = { path = "../pcid" }
[lints]
workspace = true
@@ -0,0 +1,9 @@
use std::fs::File;
use pcid_interface::*;
use crate::{transport::Error, Device};
pub fn enable_msix(pcid_handle: &mut PciFunctionHandle) -> Result<File, Error> {
unimplemented!("virtio_core: aarch64 enable_msix")
}
@@ -0,0 +1,9 @@
use std::fs::File;
use pcid_interface::*;
use crate::{transport::Error, Device};
pub fn enable_msix(pcid_handle: &mut PciFunctionHandle) -> Result<File, Error> {
unimplemented!("virtio_core: enable_msix")
}
@@ -0,0 +1,37 @@
use crate::transport::Error;
use pcid_interface::irq_helpers::{allocate_single_interrupt_vector_for_msi, read_bsp_apic_id};
use std::fs::File;
use crate::MSIX_PRIMARY_VECTOR;
use pcid_interface::*;
pub fn enable_msix(pcid_handle: &mut PciFunctionHandle) -> Result<File, Error> {
// Extended message signaled interrupts.
let msix_info = match pcid_handle.feature_info(PciFeature::MsiX) {
PciFeatureInfo::MsiX(capability) => capability,
_ => unreachable!(),
};
let mut info = unsafe { msix_info.map_and_mask_all(pcid_handle) };
// Allocate the primary MSI vector.
// FIXME allow the driver to register multiple MSI-X vectors
// FIXME move this MSI-X registering code into pcid_interface or pcid itself
let interrupt_handle = {
let table_entry_pointer = info.table_entry_pointer(MSIX_PRIMARY_VECTOR as usize);
let destination_id = read_bsp_apic_id().expect("virtio_core: `read_bsp_apic_id()` failed");
let (msg_addr_and_data, interrupt_handle) =
allocate_single_interrupt_vector_for_msi(destination_id);
table_entry_pointer.write_addr_and_data(msg_addr_and_data);
table_entry_pointer.unmask();
interrupt_handle
};
pcid_handle.enable_feature(PciFeature::MsiX);
log::debug!("virtio: using MSI-X (interrupt_handle={interrupt_handle:?})");
Ok(interrupt_handle)
}
@@ -0,0 +1,19 @@
pub mod spec;
pub mod transport;
pub mod utils;
mod probe;
#[cfg(target_arch = "aarch64")]
#[path = "arch/aarch64.rs"]
mod arch;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[path = "arch/x86.rs"]
mod arch;
#[cfg(target_arch = "riscv64")]
#[path = "arch/riscv64.rs"]
mod arch;
pub use probe::{probe_device, reinit, Device, MSIX_PRIMARY_VECTOR};
@@ -0,0 +1,158 @@
use std::fs::File;
use std::sync::Arc;
use pcid_interface::*;
use crate::spec::*;
use crate::transport::{Error, StandardTransport, Transport};
use crate::utils::align_down;
pub struct Device {
pub transport: Arc<dyn Transport>,
pub device_space: *const u8,
pub irq_handle: File,
}
// FIXME(andypython): `device_space` should not be `Send` nor `Sync`. Take
// it out of `Device`.
unsafe impl Send for Device {}
unsafe impl Sync for Device {}
pub const MSIX_PRIMARY_VECTOR: u16 = 0;
/// VirtIO Device Probe
///
/// ## Device State
/// After this function, the device will have been successfully reseted and is ready for use.
///
/// The caller is required to do the following:
/// * Negotiate the device and driver supported features (finialize via [`StandardTransport::finalize_features`])
/// * Create the device specific virtio queues (via [`StandardTransport::setup_queue`]). This is *required* to be done
/// before starting the device.
/// * Finally start the device (via [`StandardTransport::run_device`]). At this point, the device
/// is alive.
///
/// ## Panics
/// This function panics if the device is not a virtio device.
pub fn probe_device(pcid_handle: &mut PciFunctionHandle) -> Result<Device, Error> {
let pci_config = pcid_handle.config();
assert_eq!(
pci_config.func.full_device_id.vendor_id, 6900,
"virtio_core::probe_device: not a virtio device"
);
let mut common_addr = None;
let mut notify_addr = None;
let mut device_addr = None;
for raw_capability in pcid_handle.get_vendor_capabilities() {
// SAFETY: We have verified that the length of the data is correct.
let capability = unsafe { &*(raw_capability.data.as_ptr() as *const PciCapability) };
match capability.cfg_type {
CfgType::Common | CfgType::Notify | CfgType::Device => {}
_ => continue,
}
let (addr, _) = pci_config.func.bars[capability.bar as usize].expect_mem();
let address = unsafe {
let addr = addr + capability.offset as usize;
// XXX: physmap() requires the address to be page aligned.
let aligned_addr = align_down(addr);
let offset = addr - aligned_addr;
let size = offset + capability.length as usize;
let addr = common::physmap(
aligned_addr,
size,
common::Prot::RW,
common::MemoryType::Uncacheable,
)? as usize;
addr + offset
};
match capability.cfg_type {
CfgType::Common => {
debug_assert!(common_addr.is_none());
common_addr = Some(address);
}
CfgType::Notify => {
debug_assert!(notify_addr.is_none());
// SAFETY: The capability type is `Notify`, so its safe to access
// the `notify_multiplier` field.
let multiplier = unsafe {
(&*(raw_capability.data.as_ptr() as *const PciCapability
as *const PciCapabilityNotify))
.notify_off_multiplier()
};
notify_addr = Some((address, multiplier));
}
CfgType::Device => {
debug_assert!(device_addr.is_none());
device_addr = Some(address);
}
_ => unreachable!(),
}
}
let common_addr = common_addr.expect("virtio common capability missing");
let device_addr = device_addr.expect("virtio device capability missing");
let (notify_addr, notify_multiplier) = notify_addr.expect("virtio notify capability missing");
// FIXME this is explicitly allowed by the virtio specification to happen
assert!(
notify_multiplier != 0,
"virtio-core::device_probe: device uses the same Queue Notify addresses for all queues"
);
let common = unsafe { &mut *(common_addr as *mut CommonCfg) };
let device_space = unsafe { &mut *(device_addr as *mut u8) };
let transport = StandardTransport::new(
common,
notify_addr as *const u8,
notify_multiplier,
device_space,
);
// Setup interrupts.
let all_pci_features = pcid_handle.fetch_all_features();
let has_msix = all_pci_features.iter().any(|feature| feature.is_msix());
// According to the virtio specification, the device REQUIRED to support MSI-X.
assert!(has_msix, "virtio: device does not support MSI-X");
let irq_handle = crate::arch::enable_msix(pcid_handle)?;
log::debug!("virtio: using standard PCI transport");
let device = Device {
transport,
device_space,
irq_handle,
};
device.transport.reset();
reinit(&device)?;
Ok(device)
}
pub fn reinit(device: &Device) -> Result<(), Error> {
// XXX: According to the virtio specification v1.2, setting the ACKNOWLEDGE and DRIVER bits
// in `device_status` is required to be done in two steps.
device
.transport
.insert_status(DeviceStatusFlags::ACKNOWLEDGE);
device.transport.insert_status(DeviceStatusFlags::DRIVER);
Ok(())
}
@@ -0,0 +1,56 @@
//! https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html
//!
//! This file contains comments copied from the VirtIO specification which are
//! licensed under the following conditions:
//!
//! Copyright © OASIS Open 2022. All Rights Reserved.
//!
//! All capitalized terms in the following text have the meanings assigned to them
//! in the OASIS Intellectual Property Rights Policy (the "OASIS IPR Policy"). The
//! full Policy may be found at the OASIS website.
//!
//! This document and translations of it may be copied and furnished to others,
//! and derivative works that comment on or otherwise explain it or assist in its
//! implementation may be prepared, copied, published, and distributed, in whole
//! or in part, without restriction of any kind, provided that the above copyright
//! notice and this section are included on all such copies and derivative works.
//! However, this document itself may not be modified in any way, including by
//! removing the copyright notice or references to OASIS, except as needed for the
//! purpose of developing any document or deliverable produced by an OASIS Technical
//! Committee (in which case the rules applicable to copyrights, as set forth in the
//! OASIS IPR Policy, must be followed) or as required to translate it into languages
//! other than English.
bitflags::bitflags! {
/// [2.1 Device Status Field](https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-110001)
#[derive(Debug, Copy, Clone, PartialEq)]
#[repr(transparent)]
pub struct DeviceStatusFlags: u8 {
/// Indicates that the guest OS has found the device and recognized it as a
/// valid device.
const ACKNOWLEDGE = 1;
/// Indicates that the guest OS knows how to drive the device.
const DRIVER = 2;
/// Indicates that something went wrong in the guest and it has given up on
/// the device.
const FAILED = 128;
/// Indicates that the driver has acknowledged all the features it understands
/// and feature negotiation is complete.
const FEATURES_OK = 8;
/// Indicates that the driver is set up and ready to drive the device.
const DRIVER_OK = 4;
/// Indicates that the device has experienced an error from which it cant recover.
const DEVICE_NEEDS_RESET = 64;
}
}
mod split_virtqueue;
pub use split_virtqueue::*;
// FIXME add [2.8 Packed Virtqueues](https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-720008)
mod transport_pci;
pub use transport_pci::*;
mod reserved_features;
pub use reserved_features::*;
@@ -0,0 +1,100 @@
//! [6 Reserved Feature Bits](https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-6600006)
//!
//! This file contains comments copied from the VirtIO specification which are
//! licensed under the following conditions:
//!
//! Copyright © OASIS Open 2022. All Rights Reserved.
//!
//! All capitalized terms in the following text have the meanings assigned to them
//! in the OASIS Intellectual Property Rights Policy (the "OASIS IPR Policy"). The
//! full Policy may be found at the OASIS website.
//!
//! This document and translations of it may be copied and furnished to others,
//! and derivative works that comment on or otherwise explain it or assist in its
//! implementation may be prepared, copied, published, and distributed, in whole
//! or in part, without restriction of any kind, provided that the above copyright
//! notice and this section are included on all such copies and derivative works.
//! However, this document itself may not be modified in any way, including by
//! removing the copyright notice or references to OASIS, except as needed for the
//! purpose of developing any document or deliverable produced by an OASIS Technical
//! Committee (in which case the rules applicable to copyrights, as set forth in the
//! OASIS IPR Policy, must be followed) or as required to translate it into languages
//! other than English.
/// Negotiating this feature indicates that the driver can use descriptors
/// with the VIRTQ_DESC_F_INDIRECT flag set as described in 2.7.5.3 Indirect
/// Descriptors and 2.8.7 Indirect Flag: Scatter-Gather Support.
pub const VIRTIO_F_INDIRECT_DESC: u32 = 28;
/// This feature enables the used_event and the avail_event fields as
/// described in 2.7.7, 2.7.8 and 2.8.10.
pub const VIRTIO_F_EVENT_IDX: u32 = 29;
/// This indicates compliance with this specification, giving a simple way
/// to detect legacy devices or drivers.
pub const VIRTIO_F_VERSION_1: u32 = 32;
/// This feature indicates that the device can be used on a platform where device
/// access to data in memory is limited and/or translated. E.g. this is the case
/// if the device can be located behind an IOMMU that translates bus addresses
/// from the device into physical addresses in memory, if the device can be limited
/// to only access certain memory addresses or if special commands such as a cache
/// flush can be needed to synchronise data in memory with the device. Whether
/// accesses are actually limited or translated is described by platform-specific
/// means. If this feature bit is set to 0, then the device has same access to
/// memory addresses supplied to it as the driver has. In particular, the device
/// will always use physical addresses matching addresses used by the driver
/// (typically meaning physical addresses used by the CPU) and not translated
/// further, and can access any address supplied to it by the driver. When clear,
/// this overrides any platform-specific description of whether device access is
/// limited or translated in any way, e.g. whether an IOMMU may be present.
pub const VIRTIO_F_ACCESS_PLATFORM: u32 = 33;
/// This feature indicates support for the packed virtqueue layout as described
/// in 2.8 Packed Virtqueues.
pub const VIRTIO_F_RING_PACKED: u32 = 34;
/// This feature indicates that all buffers are used by the device in the same order
/// in which they have been made available.
pub const VIRTIO_F_IN_ORDER: u32 = 35;
/// This feature indicates that memory accesses by the driver and the device are
/// ordered in a way described by the platform.
/// If this feature bit is negotiated, the ordering in effect for any memory
/// accesses by the driver that need to be ordered in a specific way with respect
/// to accesses by the device is the one suitable for devices described by the
/// platform. This implies that the driver needs to use memory barriers suitable
/// for devices described by the platform; e.g. for the PCI transport in the case
/// of hardware PCI devices.
///
/// If this feature bit is not negotiated, then the device and driver are assumed
/// to be implemented in software, that is they can be assumed to run on identical
/// CPUs in an SMP configuration. Thus a weaker form of memory barriers is sufficient
/// to yield better performance.
pub const VIRTIO_F_ORDER_PLATFORM: u32 = 36;
/// This feature indicates that the device supports Single Root I/O Virtualization.
/// Currently only PCI devices support this feature.
pub const VIRTIO_F_SR_IOV: u32 = 37;
/// This feature indicates that the driver passes extra data (besides identifying
/// the virtqueue) in its device notifications. See 2.9 Driver Notifications.
pub const VIRTIO_F_NOTIFICATION_DATA: u32 = 38;
/// This feature indicates that the driver uses the data provided by the device as
/// a virtqueue identifier in available buffer notifications. As mentioned in section
/// 2.9, when the driver is required to send an available buffer notification to the
/// device, it sends the virtqueue number to be notified. The method of delivering
/// notifications is transport specific. With the PCI transport, the device can
/// optionally provide a per-virtqueue value for the driver to use in driver
/// notifications, instead of the virtqueue number. Some devices may benefit from this
/// flexibility by providing, for example, an internal virtqueue identifier, or an
/// internal offset related to the virtqueue number.
///
/// This feature indicates the availability of such value. The definition of the data
/// to be provided in driver notification and the delivery method is transport
/// specific. For more details about driver notifications over PCI see 4.1.5.2.
pub const VIRTIO_F_NOTIF_CONFIG_DATA: u32 = 39;
/// This feature indicates that the driver can reset a queue individually. See 2.6.1.
pub const VIRTIO_F_RING_RESET: u32 = 40;
@@ -0,0 +1,205 @@
//! [2.7 Split Virtqueues](https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-350007)
//!
//! This file contains comments copied from the VirtIO specification which are
//! licensed under the following conditions:
//!
//! Copyright © OASIS Open 2022. All Rights Reserved.
//!
//! All capitalized terms in the following text have the meanings assigned to them
//! in the OASIS Intellectual Property Rights Policy (the "OASIS IPR Policy"). The
//! full Policy may be found at the OASIS website.
//!
//! This document and translations of it may be copied and furnished to others,
//! and derivative works that comment on or otherwise explain it or assist in its
//! implementation may be prepared, copied, published, and distributed, in whole
//! or in part, without restriction of any kind, provided that the above copyright
//! notice and this section are included on all such copies and derivative works.
//! However, this document itself may not be modified in any way, including by
//! removing the copyright notice or references to OASIS, except as needed for the
//! purpose of developing any document or deliverable produced by an OASIS Technical
//! Committee (in which case the rules applicable to copyrights, as set forth in the
//! OASIS IPR Policy, must be followed) or as required to translate it into languages
//! other than English.
use std::sync::atomic::{AtomicU16, AtomicU32, AtomicU64, Ordering};
use crate::utils::{IncompleteArrayField, VolatileCell};
use static_assertions::const_assert_eq;
/// [2.7.5 The Virtqueue Descriptor table](https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-430005)
#[repr(C, align(16))]
pub struct Descriptor {
/// Address (guest-physical).
address: AtomicU64,
/// Size of the descriptor.
size: AtomicU32,
flags: AtomicU16,
/// Next field if flags & NEXT
next: AtomicU16,
}
const_assert_eq!(core::mem::size_of::<Descriptor>(), 16);
bitflags::bitflags! {
#[derive(Debug, Copy, Clone)]
#[repr(transparent)]
pub struct DescriptorFlags: u16 {
/// This marks a buffer as continuing via the next field.
const NEXT = 1 << 0;
/// This marks a buffer as device write-only (otherwise device read-only).
const WRITE_ONLY = 1 << 1;
/// This means the buffer contains a list of buffer descriptors.
const INDIRECT = 1 << 2;
}
}
impl Descriptor {
pub fn set_addr(&self, addr: u64) {
self.address.store(addr, Ordering::SeqCst)
}
pub fn set_size(&self, size: u32) {
self.size.store(size, Ordering::SeqCst)
}
pub fn set_next(&self, next: Option<u16>) {
self.next.store(next.unwrap_or_default(), Ordering::SeqCst)
}
pub fn set_flags(&self, flags: DescriptorFlags) {
self.flags.store(flags.bits(), Ordering::SeqCst)
}
pub fn next(&self) -> u16 {
self.next.load(Ordering::SeqCst)
}
pub fn flags(&self) -> DescriptorFlags {
DescriptorFlags::from_bits_truncate(self.flags.load(Ordering::SeqCst))
}
}
// ======== Available Ring ========
//
// XXX: The driver uses the available ring to offer buffers to the
// device. Each ring entry refers to the head of a descriptor
// chain.
/// [2.7.6 The Virtqueue Available Ring](https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-490006)
#[repr(C, align(2))]
pub struct AvailableRing {
pub flags: VolatileCell<u16>,
pub head_index: AtomicU16,
pub elements: IncompleteArrayField<AvailableRingElement>,
}
const_assert_eq!(core::mem::size_of::<AvailableRing>(), 4);
#[repr(C)]
pub struct AvailableRingElement {
pub table_index: AtomicU16,
}
impl AvailableRingElement {
pub fn set_table_index(&self, index: u16) {
self.table_index.store(index, Ordering::SeqCst)
}
}
const_assert_eq!(core::mem::size_of::<AvailableRingElement>(), 2);
#[repr(C)]
pub struct AvailableRingExtra {
pub avail_event: VolatileCell<u16>, // Only if `VIRTIO_F_EVENT_IDX`
}
const_assert_eq!(core::mem::size_of::<AvailableRingExtra>(), 2);
// ======== Used Ring ========
/// [2.7.8 The Virtqueue Used Ring](https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-540008)
#[repr(C, align(4))]
pub struct UsedRing {
pub flags: VolatileCell<u16>,
pub head_index: VolatileCell<u16>,
pub elements: IncompleteArrayField<UsedRingElement>,
}
const_assert_eq!(core::mem::size_of::<UsedRing>(), 4);
#[repr(C)]
pub struct UsedRingElement {
pub table_index: VolatileCell<u32>,
pub written: VolatileCell<u32>,
}
const_assert_eq!(core::mem::size_of::<UsedRingElement>(), 8);
#[repr(C)]
pub struct UsedRingExtra {
pub event_index: VolatileCell<u16>,
}
// ======== Utils ========
pub struct Buffer {
pub(crate) buffer: usize,
pub(crate) size: usize,
pub(crate) flags: DescriptorFlags,
}
impl Buffer {
pub fn new<T>(val: &common::dma::Dma<T>) -> Self {
Self {
buffer: val.physical(),
size: core::mem::size_of::<T>(),
flags: DescriptorFlags::empty(),
}
}
pub fn new_unsized<T>(val: &common::dma::Dma<[T]>) -> Self {
Self {
buffer: val.physical(),
size: core::mem::size_of::<T>() * val.len(),
flags: DescriptorFlags::empty(),
}
}
pub fn new_sized<T>(val: &common::dma::Dma<[T]>, size: usize) -> Self {
Self {
buffer: val.physical(),
size,
flags: DescriptorFlags::empty(),
}
}
pub fn flags(mut self, flags: DescriptorFlags) -> Self {
self.flags = flags;
self
}
}
/// XXX: The [`DescriptorFlags::NEXT`] flag is set automatically.
pub struct ChainBuilder {
buffers: Vec<Buffer>,
}
impl ChainBuilder {
pub fn new() -> Self {
Self {
buffers: Vec::new(),
}
}
pub fn chain(mut self, mut buffer: Buffer) -> Self {
buffer.flags |= DescriptorFlags::NEXT;
self.buffers.push(buffer);
self
}
pub fn build(mut self) -> Vec<Buffer> {
let last_buffer = self.buffers.last_mut().expect("virtio-core: empty chain");
last_buffer.flags.remove(DescriptorFlags::NEXT);
self.buffers
}
}
@@ -0,0 +1,176 @@
//! [4.1 Virtio Over PCI Bus](https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-1150001)
//!
//! This file contains comments copied from the VirtIO specification which are
//! licensed under the following conditions:
//!
//! Copyright © OASIS Open 2022. All Rights Reserved.
//!
//! All capitalized terms in the following text have the meanings assigned to them
//! in the OASIS Intellectual Property Rights Policy (the "OASIS IPR Policy"). The
//! full Policy may be found at the OASIS website.
//!
//! This document and translations of it may be copied and furnished to others,
//! and derivative works that comment on or otherwise explain it or assist in its
//! implementation may be prepared, copied, published, and distributed, in whole
//! or in part, without restriction of any kind, provided that the above copyright
//! notice and this section are included on all such copies and derivative works.
//! However, this document itself may not be modified in any way, including by
//! removing the copyright notice or references to OASIS, except as needed for the
//! purpose of developing any document or deliverable produced by an OASIS Technical
//! Committee (in which case the rules applicable to copyrights, as set forth in the
//! OASIS IPR Policy, must be followed) or as required to translate it into languages
//! other than English.
use super::DeviceStatusFlags;
use crate::utils::VolatileCell;
use static_assertions::const_assert_eq;
/// [4.1.4 Virtio Structure PCI Capabilities](https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x1-1240004)
#[derive(Debug, Copy, Clone)]
#[repr(C, packed)]
pub struct PciCapability {
/// Identifies the structure.
pub cfg_type: CfgType,
/// Where to find it.
pub bar: u8,
/// Multiple capabilities of the same type.
pub id: u8,
/// Pad to a full dword.
pub padding: [u8; 2],
/// Offset within the bar.
pub offset: u32,
/// Length of the structure, in bytes.
pub length: u32,
}
// The size of `PciCapability` is 13 bytes since the generic
// PCI fields are *not* included.
const_assert_eq!(core::mem::size_of::<PciCapability>(), 13);
#[derive(Debug, Copy, Clone)]
#[repr(u8)]
pub enum CfgType {
/// Common Configuration.
Common = 1,
/// Notifications.
Notify = 2,
/// ISR Status.
Isr = 3,
/// Device specific configuration.
Device = 4,
/// PCI configuration access.
PciConfig = 5,
/// Shared memory region.
SharedMemory = 8,
/// Vendor-specific data.
Vendor = 9,
}
const_assert_eq!(core::mem::size_of::<CfgType>(), 1);
#[derive(Debug)]
#[repr(C)]
pub struct CommonCfg {
// About the whole device.
/// The driver uses this to select which feature bits device_feature shows.
/// Value 0x0 selects Feature Bits 0 to 31, 0x1 selects Feature Bits 32 to 63, etc.
/// read-write
pub device_feature_select: VolatileCell<u32>,
/// The device uses this to report which feature bits it is offering to the driver:
/// the driver writes to device_feature_select to select which feature bits are presented.
/// read-only for driver
pub device_feature: VolatileCell<u32>,
/// The driver uses this to select which feature bits driver_feature shows.
/// Value 0x0 selects Feature Bits 0 to 31, 0x1 selects Feature Bits 32 to 63, etc.
/// read-write
pub driver_feature_select: VolatileCell<u32>,
/// The driver writes this to accept feature bits offered by the device.
/// Driver Feature Bits selected by driver_feature_select.
/// read-write
pub driver_feature: VolatileCell<u32>,
/// The driver sets the Configuration Vector for MSI-X.
/// read-write
pub config_msix_vector: VolatileCell<u16>,
/// The device specifies the maximum number of virtqueues supported here.
/// read-only for driver
pub num_queues: VolatileCell<u16>,
/// The driver writes the device status here (see 2.1).
/// Writing 0 into this field resets the device.
/// read-write
pub device_status: VolatileCell<DeviceStatusFlags>,
/// Configuration atomicity value. The device changes this every time the
/// configuration noticeably changes.
/// read-only for driver
pub config_generation: VolatileCell<u8>,
// About a specific virtqueue.
/// Queue Select. The driver selects which virtqueue the following fields refer to.
/// read-write
pub queue_select: VolatileCell<u16>,
/// Queue Size. On reset, specifies the maximum queue size supported by the device.
/// This can be modified by the driver to reduce memory requirements.
/// A 0 means the queue is unavailable.
/// read-write
pub queue_size: VolatileCell<u16>,
/// The driver uses this to specify the queue vector for MSI-X.
/// read-write
pub queue_msix_vector: VolatileCell<u16>,
/// The driver uses this to selectively prevent the device from executing
/// requests from this virtqueue. 1 - enabled; 0 - disabled.
/// read-write
pub queue_enable: VolatileCell<u16>,
/// The driver reads this to calculate the offset from start of Notification
/// structure at which this virtqueue is located. Note: this is not an offset
/// in bytes. See 4.1.4.4 below.
/// read-only for driver
pub queue_notify_off: VolatileCell<u16>,
/// The driver writes the physical address of Descriptor Area here.
/// See section 2.6.
/// read-write
pub queue_desc: VolatileCell<u64>,
/// The driver writes the physical address of Driver Area here.
/// See section 2.6.
/// read-write
pub queue_driver: VolatileCell<u64>,
/// The driver writes the physical address of Device Area here.
/// See section 2.6.
/// read-write
pub queue_device: VolatileCell<u64>,
/// This field exists only if VIRTIO_F_NOTIF_CONFIG_DATA has been negotiated.
/// The driver will use this value to put it in the virtqueue number field
/// in the available buffer notification structure. See section 4.1.5.2. Note:
/// This field provides the device with flexibility to determine how virtqueues
/// will be referred to in available buffer notifications. In a trivial case the
/// device can set queue_notify_data=vqn. Some devices may benefit from providing
/// another value, for example an internal virtqueue identifier, or an internal
/// offset related to the virtqueue number.
/// read-only for driver
pub queue_notify_data: VolatileCell<u16>,
/// The driver uses this to selectively reset the queue. This field exists
/// only if VIRTIO_F_RING_RESET has been negotiated. (see 2.6.1).
/// read-write
pub queue_reset: VolatileCell<u16>,
}
//TODO: why does this fail on x86?
#[cfg(not(target_arch = "x86"))]
const_assert_eq!(core::mem::size_of::<CommonCfg>(), 64);
#[derive(Debug, Copy, Clone)]
#[repr(C, packed)]
pub struct PciCapabilityNotify {
pub cap: PciCapability,
/// Multiplier for queue_notify_off.
notify_off_multiplier: u32,
}
impl PciCapabilityNotify {
pub fn notify_off_multiplier(&self) -> u32 {
self.notify_off_multiplier
}
}
const_assert_eq!(core::mem::size_of::<PciCapabilityNotify>(), 17);
/// Vector value used to disable MSI for queue
pub const VIRTIO_MSI_NO_VECTOR: u16 = 0xffff;
@@ -0,0 +1,696 @@
use crate::spec::*;
use crate::utils::align;
use common::dma::Dma;
use event::RawEventQueue;
use core::mem::size_of;
use core::sync::atomic::{AtomicU16, Ordering};
use std::fs::File;
use std::future::Future;
use std::os::fd::AsRawFd;
use std::sync::{Arc, Mutex, Weak};
use std::task::{Poll, Waker};
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("syscall failed")]
SyscallError(#[from] libredox::error::Error),
#[error("the device is incapable of {0:?}")]
InCapable(CfgType),
}
/// Returns the queue part sizes in bytes.
///
/// ## Reference
/// Section 2.7 Split Virtqueues of the specfication v1.2 describes the alignment
/// and size of the queue parts.
///
/// ## Panics
/// If `queue_size` is not a power of two or is zero.
pub const fn queue_part_sizes(queue_size: usize) -> (usize, usize, usize) {
assert!(queue_size.is_power_of_two() && queue_size != 0);
const DESCRIPTOR_ALIGN: usize = 16;
const AVAILABLE_ALIGN: usize = 2;
const USED_ALIGN: usize = 4;
let queue_size = queue_size as usize;
let desc = size_of::<Descriptor>() * queue_size;
// `avail_header`: Size of the available ring header and the footer.
let avail_header = size_of::<AvailableRing>() + size_of::<AvailableRingExtra>();
let avail = avail_header + size_of::<AvailableRingElement>() * queue_size;
// `used_header`: Size of the used ring header and the footer.
let used_header = size_of::<UsedRing>() + size_of::<UsedRingExtra>();
let used = used_header + size_of::<UsedRingElement>() * queue_size;
(
align(desc, DESCRIPTOR_ALIGN).next_multiple_of(syscall::PAGE_SIZE),
align(avail, AVAILABLE_ALIGN).next_multiple_of(syscall::PAGE_SIZE),
align(used, USED_ALIGN).next_multiple_of(syscall::PAGE_SIZE),
)
}
pub fn spawn_irq_thread(irq_handle: &File, queue: &Arc<Queue<'static>>) {
let irq_fd = irq_handle.as_raw_fd();
let queue_copy = queue.clone();
std::thread::spawn(move || {
let event_queue = RawEventQueue::new().unwrap();
event_queue
.subscribe(irq_fd as usize, 0, event::EventFlags::READ)
.unwrap();
for _ in event_queue.map(Result::unwrap) {
// Wake up the tasks waiting on the queue.
for (_, task) in queue_copy.waker.lock().unwrap().iter() {
task.wake_by_ref();
}
}
});
}
pub trait NotifyBell {
fn ring(&self, queue_index: u16);
}
pub struct PendingRequest<'a> {
queue: Arc<Queue<'a>>,
first_descriptor: u32,
}
impl<'a> Future for PendingRequest<'a> {
type Output = u32;
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
// XXX: Register the waker before checking the queue to avoid the race condition
// where you lose a notification.
self.queue
.waker
.lock()
.unwrap()
.insert(self.first_descriptor, cx.waker().clone());
let used_head = self.queue.used.head_index();
if used_head == self.queue.used_head.load(Ordering::SeqCst) {
// No new requests have been completed.
return Poll::Pending;
}
let used_element = self.queue.used.get_element_at((used_head - 1) as usize);
let written = used_element.written.get();
let mut table_index = used_element.table_index.get();
if table_index == self.first_descriptor {
// The request has been completed; recycle the descriptors used.
while self.queue.descriptor[table_index as usize]
.flags()
.contains(DescriptorFlags::NEXT)
{
let next_index = self.queue.descriptor[table_index as usize].next();
self.queue.descriptor_stack.push(table_index as u16);
table_index = next_index.into();
}
// Push the last descriptor.
self.queue.descriptor_stack.push(table_index as u16);
self.queue
.waker
.lock()
.unwrap()
.remove(&self.first_descriptor);
self.queue.used_head.store(used_head, Ordering::SeqCst);
return Poll::Ready(written);
} else {
return Poll::Pending;
}
}
}
pub struct Queue<'a> {
pub queue_index: u16,
pub waker: Mutex<std::collections::HashMap<u32, Waker>>,
pub used: Used<'a>,
pub descriptor: Dma<[Descriptor]>,
pub available: Available<'a>,
pub used_head: AtomicU16,
vector: u16,
notification_bell: Box<dyn NotifyBell>,
descriptor_stack: crossbeam_queue::SegQueue<u16>,
sref: Weak<Self>,
}
impl<'a> Queue<'a> {
pub fn new<N>(
descriptor: Dma<[Descriptor]>,
available: Available<'a>,
used: Used<'a>,
notification_bell: N,
queue_index: u16,
vector: u16,
) -> Arc<Self>
where
N: NotifyBell + 'static,
{
let descriptor_stack = crossbeam_queue::SegQueue::new();
(0..descriptor.len() as u16).for_each(|i| descriptor_stack.push(i));
Arc::new_cyclic(|sref| Self {
notification_bell: Box::new(notification_bell),
available,
descriptor,
used,
waker: Mutex::new(std::collections::HashMap::new()),
queue_index,
descriptor_stack,
used_head: AtomicU16::new(0),
sref: sref.clone(),
vector,
})
}
fn reinit(&self) {
self.used_head.store(0, Ordering::SeqCst);
self.available.set_head_idx(0);
// Drain all of the available descriptors.
while let Some(_) = self.descriptor_stack.pop() {}
// Refill the descriptor stack.
(0..self.descriptor.len() as u16).for_each(|i| self.descriptor_stack.push(i));
}
#[must_use = "The function returns a future that must be awaited to ensure the sent request is completed."]
pub fn send(&self, chain: Vec<Buffer>) -> PendingRequest<'a> {
let mut first_descriptor: Option<usize> = None;
let mut last_descriptor: Option<usize> = None;
for buffer in chain.iter() {
let descriptor = self.descriptor_stack.pop().unwrap() as usize;
if first_descriptor.is_none() {
first_descriptor = Some(descriptor);
}
self.descriptor[descriptor].set_addr(buffer.buffer as u64);
self.descriptor[descriptor].set_flags(buffer.flags);
self.descriptor[descriptor].set_size(buffer.size as u32);
if let Some(index) = last_descriptor {
self.descriptor[index].set_next(Some(descriptor as u16));
}
last_descriptor = Some(descriptor);
}
let last_descriptor = last_descriptor.unwrap();
let first_descriptor = first_descriptor.unwrap();
self.descriptor[last_descriptor].set_next(None);
let index = self.available.head_index() as usize;
self.available
.get_element_at(index)
.set_table_index(first_descriptor as u16);
self.available.set_head_idx(index as u16 + 1);
self.notification_bell.ring(self.queue_index);
PendingRequest {
queue: self.sref.upgrade().unwrap(),
first_descriptor: first_descriptor as u32,
}
}
/// Returns the number of descriptors in the descriptor table of this queue.
pub fn descriptor_len(&self) -> usize {
self.descriptor.len()
}
}
unsafe impl Sync for Queue<'_> {}
unsafe impl Send for Queue<'_> {}
pub struct Available<'a> {
mem: Mem<'a>,
queue_size: usize,
}
pub struct Borrowed<'a> {
phys: usize,
virt: usize,
size: usize,
_unused: &'a (),
}
pub enum Mem<'a> {
Owned(Dma<[u8]>),
Borrowed(Borrowed<'a>),
}
impl Borrowed<'_> {
pub unsafe fn new(phys: usize, virt: usize, size: usize) -> Self {
Self {
phys,
virt,
size,
_unused: &(),
}
}
}
impl<'a> Mem<'a> {
pub fn as_ptr<T>(&self) -> *const T {
match *self {
Self::Owned(ref dma) => dma.as_ptr().cast(),
Self::Borrowed(Borrowed {
phys: _,
virt,
size: _,
_unused,
}) => virt as *const T,
}
}
pub fn as_mut_ptr<T>(&mut self) -> *mut T {
match *self {
Self::Owned(ref mut dma) => dma.as_mut_ptr().cast(),
Self::Borrowed(Borrowed {
phys: _,
virt,
size: _,
_unused,
}) => virt as *mut T,
}
}
pub fn physical(&self) -> usize {
match self {
Self::Owned(dma) => dma.physical(),
Self::Borrowed(borrowed) => borrowed.phys,
}
}
}
impl<'a> Available<'a> {
pub fn ring(&self) -> &AvailableRing {
unsafe { &*self.mem.as_ptr() }
}
pub fn ring_mut(&mut self) -> &mut AvailableRing {
unsafe { &mut *self.mem.as_mut_ptr() }
}
pub fn new(queue_size: usize) -> Result<Self, Error> {
let (_, _, size) = queue_part_sizes(queue_size);
let mem = unsafe {
Dma::zeroed_slice(size)
.map_err(Error::SyscallError)?
.assume_init()
};
unsafe { Self::from_raw(Mem::Owned(mem), queue_size) }
}
/// `addr` is the physical address of the ring.
pub unsafe fn from_raw(mem: Mem<'a>, queue_size: usize) -> Result<Self, Error> {
let ring = Self { mem, queue_size };
for i in 0..queue_size {
// Setting them to `u16::MAX` helps with debugging since qemu reports them
// as illegal values.
ring.get_element_at(i)
.table_index
.store(u16::MAX, Ordering::SeqCst);
}
Ok(ring)
}
/// ## Panics
/// This function panics if the index is out of bounds.
pub fn get_element_at(&self, index: usize) -> &AvailableRingElement {
// SAFETY: We have exclusive access to the elements and the number of elements
// is correct; same as the queue size.
unsafe {
self.ring()
.elements
.as_slice(self.queue_size)
.get(index % self.queue_size)
.expect("virtio-core::available: index out of bounds")
}
}
pub fn head_index(&self) -> u16 {
self.ring().head_index.load(Ordering::SeqCst)
}
pub fn set_head_idx(&self, index: u16) {
self.ring().head_index.store(index, Ordering::SeqCst);
}
pub fn phys_addr(&self) -> usize {
self.mem.physical()
}
}
impl<'a> Drop for Available<'a> {
fn drop(&mut self) {
log::warn!(
"virtio-core: dropping 'available' ring at {:#x}",
self.phys_addr()
);
}
}
pub struct Used<'a> {
mem: Mem<'a>,
queue_size: usize,
_unused: &'a (),
}
impl<'a> Used<'a> {
fn ring(&self) -> &UsedRing {
unsafe { &*self.mem.as_ptr() }
}
fn ring_mut(&mut self) -> &mut UsedRing {
unsafe { &mut *self.mem.as_mut_ptr() }
}
pub fn new(queue_size: usize) -> Result<Self, Error> {
let (_, _, size) = queue_part_sizes(queue_size);
let mem = unsafe {
Dma::zeroed_slice(size)
.map_err(Error::SyscallError)?
.assume_init()
};
unsafe { Self::from_raw(Mem::Owned(mem), queue_size) }
}
/// `addr` is the physical address of the ring.
pub unsafe fn from_raw(mem: Mem<'a>, queue_size: usize) -> Result<Self, Error> {
let mut ring = Self {
mem,
queue_size,
_unused: &(),
};
for i in 0..queue_size {
// Setting them to `u32::MAX` helps with debugging since qemu reports them
// as illegal values.
ring.get_mut_element_at(i).table_index.set(u32::MAX);
}
Ok(ring)
}
/// ## Panics
/// This function panics if the index is out of bounds.
pub fn get_element_at(&self, index: usize) -> &UsedRingElement {
// SAFETY: We have exclusive access to the elements and the number of elements
// is correct; same as the queue size.
unsafe {
self.ring()
.elements
.as_slice(self.queue_size)
.get(index % self.queue_size)
.expect("virtio-core::used: index out of bounds")
}
}
/// ## Panics
/// This function panics if the index is out of bounds.
pub fn get_mut_element_at(&mut self, index: usize) -> &mut UsedRingElement {
// SAFETY: We have exclusive access to the elements and the number of elements
// is correct; same as the queue size.
let queue_size = self.queue_size;
unsafe {
self.ring_mut()
.elements
.as_mut_slice(queue_size)
.get_mut(index % 256)
.expect("virtio-core::used: index out of bounds")
}
}
pub fn flags(&self) -> u16 {
self.ring().flags.get()
}
pub fn head_index(&self) -> u16 {
self.ring().head_index.get()
}
pub fn phys_addr(&self) -> usize {
self.mem.physical()
}
}
impl Drop for Used<'_> {
fn drop(&mut self) {
log::warn!(
"virtio-core: dropping 'used' ring at {:#x}",
self.phys_addr()
);
}
}
pub trait Transport: Sync + Send {
/// `size` specifies the size of the read in bytes.
///
/// ## Panics
/// This function panics if the provided `size` is more then `size_of::<u64>()`.
fn load_config(&self, offset: u8, size: u8) -> u64;
/// Resets the device.
fn reset(&self);
/// Returns whether the device supports the specified feature.
fn check_device_feature(&self, feature: u32) -> bool;
/// Acknowledges the specified feature.
///
/// **Note**: [`Transport::check_device_feature`] must be used to check whether
/// the device supports the feature before acknowledging it.
fn ack_driver_feature(&self, feature: u32);
/// Finalizes the acknowledged features by setting the `FEATURES_OK` bit in the
/// device status flags.
fn finalize_features(&self);
/// Runs the device.
///
/// At this point, all of the queues must be created and the features must be
/// finalized.
///
/// ## Panics
/// This function panics if the device is already running.
fn run_device(&self) {
self.insert_status(DeviceStatusFlags::DRIVER_OK);
}
/// Request to be notified on configuration changes on the given MSI-X vector.
fn setup_config_notify(&self, vector: u16);
/// Each time the device configuration changes this number will be updated.
fn config_generation(&self) -> u32;
/// Creates a new queue.
///
/// ## Panics
/// This function panics if the device is running.
fn setup_queue(&self, vector: u16, irq_handle: &File) -> Result<Arc<Queue<'_>>, Error>;
// TODO(andypython): Should this function be unsafe?
fn reinit_queue(&self, queue: Arc<Queue>);
fn insert_status(&self, status: DeviceStatusFlags);
}
struct StandardBell<'a>(&'a mut AtomicU16);
impl NotifyBell for StandardBell<'_> {
#[inline]
fn ring(&self, queue_index: u16) {
self.0.store(queue_index, Ordering::SeqCst);
}
}
pub struct StandardTransport<'a> {
pub(crate) common: Mutex<&'a mut CommonCfg>,
notify: *const u8,
notify_mul: u32,
device_space: *const u8,
queue_index: AtomicU16,
}
impl<'a> StandardTransport<'a> {
pub fn new(
common: &'a mut CommonCfg,
notify: *const u8,
notify_mul: u32,
device_space: *const u8,
) -> Arc<Self> {
Arc::new(Self {
common: Mutex::new(common),
notify,
notify_mul,
queue_index: AtomicU16::new(0),
device_space,
})
}
}
impl Transport for StandardTransport<'_> {
fn load_config(&self, offset: u8, size: u8) -> u64 {
unsafe {
let ptr = self.device_space.add(offset as usize);
let size = size as usize;
if size == size_of::<u8>() {
ptr.cast::<u8>().read() as u64
} else if size == size_of::<u16>() {
ptr.cast::<u16>().read() as u64
} else if size == size_of::<u32>() {
ptr.cast::<u32>().read() as u64
} else if size == size_of::<u64>() {
ptr.cast::<u64>().read() as u64
} else {
unreachable!()
}
}
}
fn reset(&self) {
let mut common = self.common.lock().unwrap();
common.device_status.set(DeviceStatusFlags::empty());
// Upon reset, the device must initialize device status to 0.
assert_eq!(common.device_status.get(), DeviceStatusFlags::empty());
}
fn check_device_feature(&self, feature: u32) -> bool {
let mut common = self.common.lock().unwrap();
common.device_feature_select.set(feature >> 5);
(common.device_feature.get() & (1 << (feature & 31))) != 0
}
fn ack_driver_feature(&self, feature: u32) {
let mut common = self.common.lock().unwrap();
common.driver_feature_select.set(feature >> 5);
let current = common.driver_feature.get();
common.driver_feature.set(current | (1 << (feature & 31)));
}
fn finalize_features(&self) {
// Check VirtIO version 1 compliance.
assert!(self.check_device_feature(VIRTIO_F_VERSION_1));
self.ack_driver_feature(VIRTIO_F_VERSION_1);
let mut common = self.common.lock().unwrap();
let status = common.device_status.get();
common
.device_status
.set(status | DeviceStatusFlags::FEATURES_OK);
// Re-read device status to ensure the `FEATURES_OK` bit is still set: otherwise,
// the device does not support our subset of features and the device is unusable.
let confirm = common.device_status.get();
assert!((confirm & DeviceStatusFlags::FEATURES_OK) == DeviceStatusFlags::FEATURES_OK);
}
fn setup_config_notify(&self, vector: u16) {
self.common.lock().unwrap().config_msix_vector.set(vector);
}
fn config_generation(&self) -> u32 {
u32::from(self.common.lock().unwrap().config_generation.get())
}
fn setup_queue(&self, vector: u16, irq_handle: &File) -> Result<Arc<Queue<'_>>, Error> {
let mut common = self.common.lock().unwrap();
let queue_index = self.queue_index.fetch_add(1, Ordering::SeqCst);
common.queue_select.set(queue_index);
let queue_size = common.queue_size.get() as usize;
let queue_notify_idx = common.queue_notify_off.get();
// Allocate memory for the queue structues.
let descriptor = unsafe {
Dma::<[Descriptor]>::zeroed_slice(queue_size)
.map_err(Error::SyscallError)?
.assume_init()
};
let avail = Available::new(queue_size)?;
let used = Used::new(queue_size)?;
common.queue_desc.set(descriptor.physical() as u64);
common.queue_driver.set(avail.phys_addr() as u64);
common.queue_device.set(used.phys_addr() as u64);
// Set the MSI-X vector.
common.queue_msix_vector.set(vector);
assert!(common.queue_msix_vector.get() == vector);
// Enable the queue.
common.queue_enable.set(1);
let notification_bell = unsafe {
let offset = self.notify_mul * queue_notify_idx as u32;
&mut *(self.notify.add(offset as usize) as *mut AtomicU16)
};
log::debug!("virtio-core: enabled queue #{queue_index} (size={queue_size})");
let queue = Queue::new(
descriptor,
avail,
used,
StandardBell(notification_bell),
queue_index,
vector,
);
spawn_irq_thread(irq_handle, &queue);
Ok(queue)
}
fn insert_status(&self, status: DeviceStatusFlags) {
let mut common = self.common.lock().unwrap();
let old = common.device_status.get();
common.device_status.set(old | status);
}
/// Re-initializes a queue; usually done after a device reset.
fn reinit_queue(&self, queue: Arc<Queue>) {
let mut common = self.common.lock().unwrap();
queue.reinit();
common.queue_select.set(queue.queue_index);
common.queue_desc.set(queue.descriptor.physical() as u64);
common.queue_driver.set(queue.available.phys_addr() as u64);
common.queue_device.set(queue.used.phys_addr() as u64);
// Set the MSI-X vector.
common.queue_msix_vector.set(queue.vector);
assert!(common.queue_msix_vector.get() == queue.vector);
// Enable the queue.
common.queue_enable.set(1);
}
}
unsafe impl Send for StandardTransport<'_> {}
unsafe impl Sync for StandardTransport<'_> {}
@@ -0,0 +1,80 @@
use core::cell::UnsafeCell;
use core::fmt::Debug;
use core::marker::PhantomData;
#[repr(C)]
pub struct VolatileCell<T> {
value: UnsafeCell<T>,
}
impl<T: Copy> VolatileCell<T> {
#[inline]
pub const fn new(value: T) -> Self {
Self {
value: UnsafeCell::new(value),
}
}
/// Returns a copy of the contained value.
#[inline]
pub fn get(&self) -> T {
unsafe { core::ptr::read_volatile(self.value.get()) }
}
/// Sets the contained value.
#[inline]
pub fn set(&mut self, value: T) {
unsafe { core::ptr::write_volatile(self.value.get(), value) }
}
}
impl<T> Debug for VolatileCell<T>
where
T: Debug + Copy,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("VolatileCell")
.field("value", &self.get())
.finish()
}
}
unsafe impl<T> Sync for VolatileCell<T> {}
#[repr(C)]
pub struct IncompleteArrayField<T>(PhantomData<T>, [T; 0]);
impl<T> IncompleteArrayField<T> {
#[inline]
pub const fn new() -> Self {
IncompleteArrayField(PhantomData, [])
}
#[inline]
pub unsafe fn as_slice(&self, len: usize) -> &[T] {
core::slice::from_raw_parts(self.as_ptr(), len)
}
#[inline]
pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
core::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
}
#[inline]
pub unsafe fn as_ptr(&self) -> *const T {
self as *const _ as *const T
}
#[inline]
pub unsafe fn as_mut_ptr(&mut self) -> *mut T {
self as *mut _ as *mut T
}
}
pub const fn align(val: usize, align: usize) -> usize {
(val + align) & !align
}
pub const fn align_down(addr: usize) -> usize {
addr & !(syscall::PAGE_SIZE - 1)
}