Red Bear OS — microkernel OS in Rust, based on Redox

Derivative of Redox OS (https://www.redox-os.org) adding:
- AMD GPU driver (amdgpu) via LinuxKPI compat layer
- ext4 filesystem support (ext4d scheme daemon)
- ACPI fixes for AMD bare metal (x2APIC, DMAR, IVRS, MCFG)
- Custom branding (hostname, os-release, boot identity)

Build system is full upstream Redox with RBOS overlay in local/.
Patches for kernel, base, and relibc are symlinked from local/patches/
and protected from make clean/distclean. Custom recipes live in
local/recipes/ with symlinks into the recipes/ search path.

Build:  make all CONFIG_NAME=redbear-full
Sync:   ./local/scripts/sync-upstream.sh
This commit is contained in:
2026-04-12 19:05:00 +01:00
commit 50b731f1b7
3392 changed files with 98327 additions and 0 deletions
@@ -0,0 +1,103 @@
use std::alloc::Layout;
use std::collections::HashMap;
use std::sync::Mutex;
const GFP_DMA32: u32 = 2;
/// Wrapper to make raw pointers `Send`, required because `DEVRES_MAP` is a
/// global `Mutex` (which needs `T: Send`). Raw pointers are not `Send` by
/// default since the compiler can't prove thread-safety. Here each `(ptr,
/// Layout)` pair is exclusively owned by the device that allocated it — only
/// freed via `devm_kfree` or `devres_free_all` — so sending across threads is
/// safe.
struct TrackedAlloc(*mut u8, Layout);
unsafe impl Send for TrackedAlloc {}
lazy_static::lazy_static! {
static ref DEVRES_MAP: Mutex<HashMap<usize, Vec<TrackedAlloc>>> =
Mutex::new(HashMap::new());
}
fn align_up(size: usize, align: usize) -> usize {
(size + align - 1) & !(align - 1)
}
fn tracked_layout(size: usize, flags: u32) -> Option<Layout> {
if size == 0 {
return None;
}
if flags & GFP_DMA32 != 0 {
return Layout::from_size_align(size, 4096).ok();
}
let aligned_size = align_up(size, 16);
Layout::from_size_align(aligned_size, 16).ok()
}
#[no_mangle]
pub extern "C" fn devm_kzalloc(dev: *mut u8, size: usize, flags: u32) -> *mut u8 {
let ptr = super::memory::kzalloc(size, flags);
if ptr.is_null() || dev.is_null() {
return ptr;
}
let layout = match tracked_layout(size, flags) {
Some(layout) => layout,
None => return ptr,
};
if let Ok(mut devres_map) = DEVRES_MAP.lock() {
devres_map
.entry(dev as usize)
.or_default()
.push(TrackedAlloc(ptr, layout));
}
ptr
}
#[no_mangle]
pub extern "C" fn devm_kfree(dev: *mut u8, ptr: *mut u8) {
if ptr.is_null() {
return;
}
if !dev.is_null() {
if let Ok(mut devres_map) = DEVRES_MAP.lock() {
let dev_key = dev as usize;
let should_remove = if let Some(entries) = devres_map.get_mut(&dev_key) {
if let Some(index) = entries.iter().position(|alloc| alloc.0 == ptr) {
entries.swap_remove(index);
}
entries.is_empty()
} else {
false
};
if should_remove {
devres_map.remove(&dev_key);
}
}
}
super::memory::kfree(ptr);
}
#[no_mangle]
pub extern "C" fn devres_free_all(dev: *mut u8) {
if dev.is_null() {
return;
}
let allocations = match DEVRES_MAP.lock() {
Ok(mut devres_map) => devres_map.remove(&(dev as usize)),
Err(_) => None,
};
if let Some(allocations) = allocations {
for alloc in allocations {
super::memory::kfree(alloc.0);
}
}
}
@@ -0,0 +1,93 @@
use std::alloc::{alloc_zeroed, dealloc, Layout};
use std::ptr;
use syscall::CallFlags;
lazy_static::lazy_static! {
static ref TRANSLATION_FD: Option<usize> = {
libredox::call::open("/scheme/memory/translation",
syscall::flag::O_CLOEXEC as i32, 0)
.ok()
.map(|fd| fd)
};
}
fn virt_to_phys(virt: usize) -> usize {
let raw = match *TRANSLATION_FD {
Some(fd) => fd,
None => return 0,
};
let mut buf = virt.to_ne_bytes();
let _ = libredox::call::call_ro(raw, &mut buf, CallFlags::empty(), &[]);
usize::from_ne_bytes(buf)
}
#[no_mangle]
pub extern "C" fn dma_alloc_coherent(
_dev: *mut u8,
size: usize,
dma_handle: *mut u64,
_flags: u32,
) -> *mut u8 {
if size == 0 || dma_handle.is_null() {
return ptr::null_mut();
}
let layout = match Layout::from_size_align(size, 4096) {
Ok(l) => l,
Err(_) => return ptr::null_mut(),
};
let vaddr = unsafe { alloc_zeroed(layout) };
if vaddr.is_null() {
return ptr::null_mut();
}
let phys = virt_to_phys(vaddr as usize);
if phys == 0 {
unsafe { dealloc(vaddr, layout) };
return ptr::null_mut();
}
unsafe { *dma_handle = phys as u64 };
log::debug!(
"dma_alloc_coherent: {} bytes at virt={:#x} phys={:#x}",
size,
vaddr as usize,
phys
);
vaddr
}
#[no_mangle]
pub extern "C" fn dma_free_coherent(_dev: *mut u8, size: usize, vaddr: *mut u8, _dma_handle: u64) {
if vaddr.is_null() || size == 0 {
return;
}
let layout = match Layout::from_size_align(size, 4096) {
Ok(l) => l,
Err(_) => return,
};
unsafe { dealloc(vaddr, layout) };
}
#[no_mangle]
pub extern "C" fn dma_map_single(_dev: *mut u8, ptr: *mut u8, _size: usize, _dir: u32) -> u64 {
if ptr.is_null() {
return 0;
}
virt_to_phys(ptr as usize) as u64
}
#[no_mangle]
pub extern "C" fn dma_unmap_single(_dev: *mut u8, _addr: u64, _size: usize, _dir: u32) {}
#[no_mangle]
pub extern "C" fn dma_set_mask(_dev: *mut u8, _mask: u64) -> i32 {
0
}
#[no_mangle]
pub extern "C" fn dma_set_coherent_mask(_dev: *mut u8, _mask: u64) -> i32 {
0
}
@@ -0,0 +1,265 @@
use std::collections::{BTreeMap, HashMap};
use std::ptr;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Mutex;
static NEXT_GEM_HANDLE: AtomicU32 = AtomicU32::new(1);
#[repr(C)]
struct CallerGemObject {
dev: *mut u8,
handle_count: u32,
_pad: u32,
size: usize,
driver_private: *mut u8,
}
unsafe fn write_handle_count(obj: *mut u8, count: u32) {
let cobj = obj as *mut CallerGemObject;
unsafe {
(*cobj).handle_count = count;
}
}
unsafe fn write_size(obj: *mut u8, size: usize) {
let cobj = obj as *mut CallerGemObject;
unsafe {
(*cobj).size = size;
}
}
struct ObjectState {
size: usize,
handle_count: u32,
handles: Vec<u32>,
}
static OBJECTS: Mutex<Option<HashMap<usize, ObjectState>>> = Mutex::new(None);
static HANDLES: Mutex<Option<BTreeMap<u32, usize>>> = Mutex::new(None);
fn with_objects<F, R>(f: F) -> R
where
F: FnOnce(&mut HashMap<usize, ObjectState>) -> R,
{
let mut guard = OBJECTS.lock().unwrap_or_else(|e| e.into_inner());
if guard.is_none() {
*guard = Some(HashMap::new());
}
f(guard.as_mut().unwrap())
}
fn with_handles<F, R>(f: F) -> R
where
F: FnOnce(&mut BTreeMap<u32, usize>) -> R,
{
let mut guard = HANDLES.lock().unwrap_or_else(|e| e.into_inner());
if guard.is_none() {
*guard = Some(BTreeMap::new());
}
f(guard.as_mut().unwrap())
}
fn next_gem_handle() -> u32 {
NEXT_GEM_HANDLE.fetch_add(1, Ordering::Relaxed)
}
#[no_mangle]
pub extern "C" fn drm_dev_register(_dev: *mut u8, _flags: u64) -> i32 {
0
}
#[no_mangle]
pub extern "C" fn drm_dev_unregister(_dev: *mut u8) {}
#[no_mangle]
pub extern "C" fn drm_gem_object_init(_dev: *mut u8, obj: *mut u8, size: usize) -> i32 {
let key = obj as usize;
unsafe {
write_size(obj, size);
write_handle_count(obj, 0);
}
with_objects(|objects| {
objects.insert(
key,
ObjectState {
size,
handle_count: 0,
handles: Vec::new(),
},
);
});
log::debug!("drm_gem_object_init: obj={:#x} size={}", key, size);
0
}
#[no_mangle]
pub extern "C" fn drm_gem_object_release(obj: *mut u8) {
let key = obj as usize;
with_objects(|objects| {
if let Some(state) = objects.remove(&key) {
for h in &state.handles {
with_handles(|handles| {
handles.remove(h);
});
}
log::debug!(
"drm_gem_object_release: obj={:#x} handles_dropped={}",
key,
state.handles.len()
);
}
});
}
#[no_mangle]
pub extern "C" fn drm_gem_handle_create(_file: *mut u8, obj: *mut u8, handlep: *mut u32) -> i32 {
if handlep.is_null() {
return -22;
}
let key = obj as usize;
let handle = with_objects(|objects| match objects.get_mut(&key) {
Some(state) => {
let handle = next_gem_handle();
state.handle_count += 1;
unsafe {
write_handle_count(obj, state.handle_count);
}
state.handles.push(handle);
Some(handle)
}
None => {
log::error!(
"drm_gem_handle_create: obj={:#x} not initialized (drm_gem_object_init not called)",
key
);
None
}
});
let handle = match handle {
Some(h) => h,
None => return -22,
};
with_handles(|handles| {
handles.insert(handle, key);
});
unsafe { *handlep = handle };
log::debug!("drm_gem_handle_create: handle={} obj={:#x}", handle, key);
0
}
#[no_mangle]
pub extern "C" fn drm_gem_handle_delete(_file: *mut u8, handle: u32) {
let obj_key = with_handles(|handles| handles.remove(&handle));
if let Some(key) = obj_key {
with_objects(|objects| {
if let Some(state) = objects.get_mut(&key) {
state.handles.retain(|h| *h != handle);
state.handle_count = state.handle_count.saturating_sub(1);
unsafe {
write_handle_count(key as *mut u8, state.handle_count);
}
}
});
}
log::debug!("drm_gem_handle_delete: handle={}", handle);
}
#[no_mangle]
pub extern "C" fn drm_gem_handle_lookup(_file: *mut u8, handle: u32) -> *mut u8 {
let obj_key = with_handles(|handles| handles.get(&handle).copied());
match obj_key {
Some(key) => {
let found = with_objects(|objects| objects.contains_key(&key));
if found {
key as *mut u8
} else {
log::warn!(
"drm_gem_handle_lookup: handle={} maps to obj={:#x} but object released",
handle,
key
);
ptr::null_mut()
}
}
None => {
log::warn!("drm_gem_handle_lookup: handle={} not found", handle);
ptr::null_mut()
}
}
}
#[no_mangle]
pub extern "C" fn drm_gem_object_lookup(_file: *mut u8, handle: u32) -> *mut u8 {
let obj_key = with_handles(|handles| handles.get(&handle).copied());
match obj_key {
Some(key) => {
let found = with_objects(|objects| {
if let Some(state) = objects.get_mut(&key) {
state.handle_count += 1;
unsafe {
write_handle_count(key as *mut u8, state.handle_count);
}
true
} else {
false
}
});
if found {
key as *mut u8
} else {
log::warn!(
"drm_gem_object_lookup: handle={} maps to obj={:#x} but object released",
handle,
key
);
ptr::null_mut()
}
}
None => {
log::warn!("drm_gem_object_lookup: handle={} not found", handle);
ptr::null_mut()
}
}
}
#[no_mangle]
pub extern "C" fn drm_gem_object_put(obj: *mut u8) {
if obj.is_null() {
return;
}
let key = obj as usize;
with_objects(|objects| {
if let Some(state) = objects.get_mut(&key) {
state.handle_count = state.handle_count.saturating_sub(1);
unsafe {
write_handle_count(obj, state.handle_count);
}
}
});
}
#[no_mangle]
pub extern "C" fn drm_ioctl(_dev: *mut u8, cmd: u32, _data: *mut u8, _file: *mut u8) -> i32 {
log::trace!("drm_ioctl: cmd={:#x}", cmd);
0
}
#[no_mangle]
pub extern "C" fn drm_mode_config_reset(_dev: *mut u8) {}
#[no_mangle]
pub extern "C" fn drm_connector_register(_connector: *mut u8) -> i32 {
0
}
#[no_mangle]
pub extern "C" fn drm_crtc_handle_vblank(_crtc: *mut u8) -> u32 {
0
}
@@ -0,0 +1,95 @@
use std::ptr;
#[repr(C)]
pub struct Firmware {
pub size: usize,
pub data: *const u8,
}
impl Default for Firmware {
fn default() -> Self {
Firmware {
size: 0,
data: ptr::null(),
}
}
}
impl Drop for Firmware {
fn drop(&mut self) {
if !self.data.is_null() && self.size > 0 {
let layout = match std::alloc::Layout::from_size_align(self.size, 1) {
Ok(l) => l,
Err(_) => return,
};
unsafe { std::alloc::dealloc(self.data as *mut u8, layout) };
self.data = ptr::null();
self.size = 0;
}
}
}
#[no_mangle]
pub extern "C" fn request_firmware(fw: *mut *mut Firmware, name: *const u8, _dev: *mut u8) -> i32 {
if fw.is_null() || name.is_null() {
return -22;
}
let name_str = unsafe {
let len = {
let mut l = 0;
while *name.add(l) != 0 {
l += 1;
}
l
};
let slice = std::slice::from_raw_parts(name, len);
match std::str::from_utf8(slice) {
Ok(s) => s,
Err(_) => return -22,
}
};
let firmware_path = format!("/scheme/firmware/{}", name_str);
log::info!(
"request_firmware: loading '{}' via {}",
name_str,
firmware_path
);
let data = match std::fs::read(&firmware_path) {
Ok(d) => d,
Err(e) => {
log::error!("request_firmware: failed to load '{}': {}", name_str, e);
return -2;
}
};
let size = data.len();
let layout = match std::alloc::Layout::from_size_align(size, 1) {
Ok(l) => l,
Err(_) => return -12,
};
let ptr = unsafe { std::alloc::alloc(layout) };
if ptr.is_null() {
return -12;
}
unsafe { ptr::copy_nonoverlapping(data.as_ptr(), ptr, size) };
let firmware = Box::new(Firmware {
size,
data: ptr as *const u8,
});
unsafe { *fw = Box::into_raw(firmware) };
log::info!("request_firmware: loaded {} bytes for '{}'", size, name_str);
0
}
#[no_mangle]
pub extern "C" fn release_firmware(fw: *mut Firmware) {
if fw.is_null() {
return;
}
unsafe { drop(Box::from_raw(fw)) };
}
@@ -0,0 +1,151 @@
use std::collections::HashMap;
use std::ptr;
const EINVAL: i32 = 22;
const ENOSPC: i32 = 28;
#[repr(C)]
pub struct Idr {
map: HashMap<u32, usize>,
next_id: u32,
}
#[no_mangle]
pub extern "C" fn idr_init(idr: *mut Idr) {
if idr.is_null() {
return;
}
unsafe {
ptr::write(
idr,
Idr {
map: HashMap::new(),
next_id: 0,
},
);
}
}
fn normalize_id(value: i32) -> Option<u32> {
if value < 0 {
None
} else {
Some(value as u32)
}
}
#[no_mangle]
pub extern "C" fn idr_alloc(idr: *mut Idr, ptr: *mut u8, start: i32, end: i32, _gfp: u32) -> i32 {
if idr.is_null() {
return -EINVAL;
}
let start = match normalize_id(start) {
Some(start) => start,
None => return -EINVAL,
};
let end = match end {
0 => None,
value if value > 0 => Some(value as u32),
_ => return -EINVAL,
};
if let Some(end) = end {
if start >= end {
return -EINVAL;
}
}
let idr_ref = unsafe { &mut *idr };
let initial = idr_ref.next_id.max(start);
if let Some(end) = end {
for candidate in initial..end {
if let std::collections::hash_map::Entry::Vacant(entry) = idr_ref.map.entry(candidate) {
entry.insert(ptr as usize);
idr_ref.next_id = candidate.saturating_add(1);
if idr_ref.next_id >= end {
idr_ref.next_id = start;
}
return candidate as i32;
}
}
for candidate in start..initial {
if let std::collections::hash_map::Entry::Vacant(entry) = idr_ref.map.entry(candidate) {
entry.insert(ptr as usize);
idr_ref.next_id = candidate.saturating_add(1);
if idr_ref.next_id >= end {
idr_ref.next_id = start;
}
return candidate as i32;
}
}
return -ENOSPC;
}
for candidate in initial..=u32::MAX {
if let std::collections::hash_map::Entry::Vacant(entry) = idr_ref.map.entry(candidate) {
entry.insert(ptr as usize);
idr_ref.next_id = if candidate == u32::MAX {
start
} else {
candidate.saturating_add(1).max(start)
};
return candidate as i32;
}
}
for candidate in start..initial {
if let std::collections::hash_map::Entry::Vacant(entry) = idr_ref.map.entry(candidate) {
entry.insert(ptr as usize);
idr_ref.next_id = if candidate == u32::MAX {
start
} else {
candidate.saturating_add(1).max(start)
};
return candidate as i32;
}
}
-ENOSPC
}
#[no_mangle]
pub extern "C" fn idr_find(idr: *mut Idr, id: u32) -> *mut u8 {
if idr.is_null() {
return ptr::null_mut();
}
let idr_ref = unsafe { &*idr };
match idr_ref.map.get(&id) {
Some(value) => *value as *mut u8,
None => ptr::null_mut(),
}
}
#[no_mangle]
pub extern "C" fn idr_remove(idr: *mut Idr, id: u32) {
if idr.is_null() {
return;
}
let idr_ref = unsafe { &mut *idr };
idr_ref.map.remove(&id);
if id < idr_ref.next_id {
idr_ref.next_id = id;
}
}
#[no_mangle]
pub extern "C" fn idr_destroy(idr: *mut Idr) {
if idr.is_null() {
return;
}
let idr_ref = unsafe { &mut *idr };
idr_ref.map.clear();
idr_ref.next_id = 0;
}
@@ -0,0 +1,126 @@
use std::collections::HashMap;
use std::ptr;
use std::sync::Mutex;
type PhysAddr = u64;
struct MappedRegion {
size: usize,
}
lazy_static::lazy_static! {
static ref MMIO_MAP_TRACKER: Mutex<HashMap<usize, MappedRegion>> = Mutex::new(HashMap::new());
}
#[no_mangle]
pub extern "C" fn ioremap(phys: PhysAddr, size: usize) -> *mut u8 {
if size == 0 || phys == 0 {
return ptr::null_mut();
}
log::info!(
"ioremap(phys={:#x}, size={}) — mapping via scheme:memory",
phys,
size
);
let ptr = match redox_driver_sys::memory::MmioRegion::map(
phys,
size,
redox_driver_sys::memory::CacheType::DeviceMemory,
redox_driver_sys::memory::MmioProt::READ_WRITE,
) {
Ok(region) => {
let p = region.as_ptr() as *mut u8;
let s = region.size();
if let Ok(mut tracker) = MMIO_MAP_TRACKER.lock() {
tracker.insert(p as usize, MappedRegion { size: s });
}
std::mem::forget(region);
p
}
Err(e) => {
log::error!("ioremap: failed to map {:#x}+{:#x}: {:?}", phys, size, e);
ptr::null_mut()
}
};
ptr
}
#[no_mangle]
pub extern "C" fn iounmap(addr: *mut u8, size: usize) {
if addr.is_null() || size == 0 {
return;
}
if let Ok(mut tracker) = MMIO_MAP_TRACKER.lock() {
if let Some(region) = tracker.remove(&(addr as usize)) {
let _ = unsafe { libredox::call::munmap(addr as *mut (), region.size) };
}
}
}
#[no_mangle]
pub extern "C" fn readl(addr: *const u8) -> u32 {
if addr.is_null() {
return 0;
}
unsafe { ptr::read_volatile(addr as *const u32) }
}
#[no_mangle]
pub extern "C" fn writel(val: u32, addr: *mut u8) {
if addr.is_null() {
return;
}
unsafe { ptr::write_volatile(addr as *mut u32, val) };
}
#[no_mangle]
pub extern "C" fn readq(addr: *const u8) -> u64 {
if addr.is_null() {
return 0;
}
unsafe { ptr::read_volatile(addr as *const u64) }
}
#[no_mangle]
pub extern "C" fn writeq(val: u64, addr: *mut u8) {
if addr.is_null() {
return;
}
unsafe { ptr::write_volatile(addr as *mut u64, val) };
}
#[no_mangle]
pub extern "C" fn readb(addr: *const u8) -> u8 {
if addr.is_null() {
return 0;
}
unsafe { ptr::read_volatile(addr) }
}
#[no_mangle]
pub extern "C" fn writeb(val: u8, addr: *mut u8) {
if addr.is_null() {
return;
}
unsafe { ptr::write_volatile(addr, val) };
}
#[no_mangle]
pub extern "C" fn readw(addr: *const u8) -> u16 {
if addr.is_null() {
return 0;
}
unsafe { ptr::read_volatile(addr as *const u16) }
}
#[no_mangle]
pub extern "C" fn writew(val: u16, addr: *mut u8) {
if addr.is_null() {
return;
}
unsafe { ptr::write_volatile(addr as *mut u16, val) };
}
@@ -0,0 +1,126 @@
use std::collections::HashMap;
use std::fs::File;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
struct SendU8Ptr(*mut u8);
impl SendU8Ptr {
fn as_ptr(&self) -> *mut u8 {
self.0
}
}
unsafe impl Send for SendU8Ptr {}
pub type IrqHandler = extern "C" fn(i32, *mut u8) -> u32;
struct IrqEntry {
cancel: Arc<AtomicBool>,
fd: Option<File>,
handle: Option<std::thread::JoinHandle<()>>,
}
lazy_static::lazy_static! {
static ref IRQ_TABLE: Mutex<HashMap<u32, IrqEntry>> = Mutex::new(HashMap::new());
}
#[no_mangle]
pub extern "C" fn request_irq(
irq: u32,
handler: IrqHandler,
_flags: u32,
_name: *const u8,
dev_id: *mut u8,
) -> i32 {
let path = format!("/scheme/irq/{}", irq);
let fd = match std::fs::File::open(&path) {
Ok(f) => f,
Err(e) => {
log::error!("request_irq: failed to open {} : {}", path, e);
return -22;
}
};
let thread_fd = match fd.try_clone() {
Ok(f) => f,
Err(e) => {
log::error!("request_irq: failed to clone {} : {}", path, e);
return -22;
}
};
let cancel = Arc::new(AtomicBool::new(false));
let cancel_clone = Arc::clone(&cancel);
let send_dev_id = SendU8Ptr(dev_id);
let handle = std::thread::spawn(move || {
use std::io::Read;
let mut fd = thread_fd;
let mut buf = [0u8; 8];
loop {
if cancel_clone.load(Ordering::Acquire) {
break;
}
match fd.read(&mut buf) {
Ok(0) | Err(_) => break,
Ok(_) => {
if cancel_clone.load(Ordering::Acquire) {
break;
}
handler(irq as i32, send_dev_id.as_ptr());
}
}
}
});
let entry = IrqEntry {
cancel: Arc::clone(&cancel),
fd: Some(fd),
handle: Some(handle),
};
if let Ok(mut table) = IRQ_TABLE.lock() {
table.insert(irq, entry);
} else {
cancel.store(true, Ordering::Release);
let mut entry = entry;
let _ = entry.fd.take();
if let Some(handle) = entry.handle.take() {
let _ = handle.join();
}
log::error!("request_irq: failed to record handler for IRQ {}", irq);
return -22;
}
log::info!("request_irq: registered handler for IRQ {}", irq);
0
}
#[no_mangle]
pub extern "C" fn free_irq(irq: u32, _dev_id: *mut u8) {
let entry = if let Ok(mut table) = IRQ_TABLE.lock() {
let mut entry = table.remove(&irq);
if let Some(ref mut entry_ref) = entry {
entry_ref.cancel.store(true, Ordering::Release);
let _ = entry_ref.fd.take();
}
entry
} else {
None
};
if let Some(mut entry) = entry {
if let Some(handle) = entry.handle.take() {
let _ = handle.join();
}
}
log::info!("free_irq: released IRQ {}", irq);
}
#[no_mangle]
pub extern "C" fn enable_irq(_irq: u32) {}
#[no_mangle]
pub extern "C" fn disable_irq(_irq: u32) {}
@@ -0,0 +1,253 @@
use std::alloc::{alloc_zeroed, dealloc, Layout};
use std::collections::HashMap;
use std::ptr;
use std::sync::Mutex;
use syscall::{flag, CallFlags};
struct SendU8Ptr(*mut u8);
impl SendU8Ptr {
#[allow(dead_code)]
fn as_ptr(&self) -> *mut u8 {
self.0
}
}
unsafe impl Send for SendU8Ptr {}
impl PartialEq for SendU8Ptr {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Eq for SendU8Ptr {}
impl std::hash::Hash for SendU8Ptr {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
(self.0 as usize).hash(state);
}
}
lazy_static::lazy_static! {
static ref ALLOC_TRACKER: Mutex<HashMap<SendU8Ptr, Layout>> = Mutex::new(HashMap::new());
static ref DMA32_TRACKER: Mutex<HashMap<SendU8Ptr, Layout>> = Mutex::new(HashMap::new());
}
fn align_up(size: usize, align: usize) -> usize {
(size + align - 1) & !(align - 1)
}
/// Translate virtual address to physical address via scheme:memory/translation.
/// Returns 0 on failure.
fn virt_to_phys(virt: usize) -> usize {
let fd = match libredox::Fd::open("/scheme/memory/translation", flag::O_CLOEXEC as i32, 0) {
Ok(f) => f,
Err(_) => return 0,
};
let mut buf = virt.to_ne_bytes();
let _ = libredox::call::call_ro(fd.raw(), &mut buf, CallFlags::empty(), &[]);
usize::from_ne_bytes(buf)
}
const GFP_DMA32_RETRIES: usize = 8;
const DMA32_LIMIT: u64 = 0x1_0000_0000;
/// Allocate memory with physical address below 4GB (GFP_DMA32).
/// Tries up to GFP_DMA32_RETRIES allocations; if none land below 4GB,
/// returns null rather than giving a buffer the device can't DMA to.
fn dma32_alloc(size: usize) -> *mut u8 {
let layout = match Layout::from_size_align(size, 4096) {
Ok(l) => l,
Err(_) => return ptr::null_mut(),
};
for attempt in 0..GFP_DMA32_RETRIES {
let candidate = unsafe { alloc_zeroed(layout) };
if candidate.is_null() {
return ptr::null_mut();
}
let phys = virt_to_phys(candidate as usize);
if phys == 0 {
log::warn!(
"dma32_alloc: virt_to_phys failed for {:#x}",
candidate as usize
);
unsafe { dealloc(candidate, layout) };
continue;
}
if phys as u64 >= DMA32_LIMIT {
log::debug!(
"dma32_alloc: attempt {} phys={:#x} >= 4GB, retrying",
attempt,
phys
);
unsafe { dealloc(candidate, layout) };
continue;
}
log::debug!(
"dma32_alloc: {} bytes at virt={:#x} phys={:#x} (< 4GB)",
size,
candidate as usize,
phys
);
if let Ok(mut tracker) = DMA32_TRACKER.lock() {
tracker.insert(SendU8Ptr(candidate), layout);
} else {
unsafe { dealloc(candidate, layout) };
return ptr::null_mut();
}
return candidate;
}
log::warn!(
"dma32_alloc: failed to get <4GB physical address after {} retries for {} bytes",
GFP_DMA32_RETRIES,
size
);
ptr::null_mut()
}
const GFP_KERNEL: u32 = 0;
const GFP_ATOMIC: u32 = 1;
const GFP_DMA32: u32 = 2;
#[no_mangle]
/// Allocate kernel memory.
/// GFP_DMA32 flag routes through a dedicated path with physical address verification
/// to ensure allocations are suitable for devices with 32-bit DMA limitations.
pub extern "C" fn kmalloc(size: usize, flags: u32) -> *mut u8 {
if size == 0 {
return ptr::null_mut();
}
// Handle GFP_DMA32 allocations via dedicated path
if flags & GFP_DMA32 != 0 {
return dma32_alloc(size);
}
let aligned_size = align_up(size, 16);
let layout = match Layout::from_size_align(aligned_size, 16) {
Ok(l) => l,
Err(_) => return ptr::null_mut(),
};
let ptr = unsafe { alloc_zeroed(layout) };
if ptr.is_null() {
return ptr::null_mut();
}
if let Ok(mut tracker) = ALLOC_TRACKER.lock() {
tracker.insert(SendU8Ptr(ptr), layout);
}
ptr
}
#[no_mangle]
pub extern "C" fn kzalloc(size: usize, flags: u32) -> *mut u8 {
let ptr = kmalloc(size, flags);
if !ptr.is_null() {
unsafe { ptr::write_bytes(ptr, 0, size) };
}
ptr
}
#[no_mangle]
pub extern "C" fn kfree(ptr: *const u8) {
if ptr.is_null() {
return;
}
// Check DMA32 tracker first
{
let mut dma32_tracker = match DMA32_TRACKER.lock() {
Ok(t) => t,
Err(_) => return,
};
if let Some(layout) = dma32_tracker.remove(&SendU8Ptr(ptr as *mut u8)) {
unsafe { dealloc(ptr as *mut u8, layout) };
return;
}
}
// Check regular allocator tracker
let layout = {
let mut tracker = match ALLOC_TRACKER.lock() {
Ok(t) => t,
Err(_) => return,
};
match tracker.remove(&SendU8Ptr(ptr as *mut u8)) {
Some(l) => l,
None => return,
}
};
unsafe { dealloc(ptr as *mut u8, layout) };
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_kmalloc_basic() {
let p = kmalloc(64, GFP_KERNEL);
assert!(!p.is_null());
kfree(p);
}
#[test]
fn test_kzalloc_zeroed() {
let p = kzalloc(64, GFP_KERNEL);
assert!(!p.is_null());
for i in 0..64 {
assert_eq!(unsafe { *p.add(i) }, 0);
}
kfree(p);
}
#[test]
fn test_kfree_null() {
kfree(ptr::null());
}
#[test]
fn test_kmalloc_zero_size() {
assert!(kmalloc(0, GFP_KERNEL).is_null());
}
#[test]
fn test_kmalloc_dma32_basic() {
let p = kmalloc(64, GFP_DMA32);
assert!(!p.is_null(), "GFP_DMA32 allocation should succeed");
kfree(p);
}
#[test]
fn test_kmalloc_dma32_zero_size() {
assert!(
kmalloc(0, GFP_DMA32).is_null(),
"GFP_DMA32 with size 0 should return null"
);
}
#[test]
fn test_kfree_dma32_null() {
// kfree(null) should not crash
kfree(ptr::null());
}
#[test]
fn test_kmalloc_dma32_multiple() {
// Allocate and free multiple DMA32 buffers
let p1 = kmalloc(128, GFP_DMA32);
let p2 = kmalloc(256, GFP_DMA32);
assert!(!p1.is_null());
assert!(!p2.is_null());
kfree(p1);
kfree(p2);
}
}
@@ -0,0 +1,13 @@
pub mod device;
pub mod dma;
pub mod drm_shim;
pub mod firmware;
pub mod idr;
pub mod io;
pub mod irq;
pub mod memory;
pub mod pci;
pub mod sync;
pub mod timer;
pub mod wait;
pub mod workqueue;
@@ -0,0 +1,443 @@
use std::os::raw::c_ulong;
use std::ptr;
use std::sync::Mutex;
use redox_driver_sys::pci::{
enumerate_pci_class, PciDevice, PciDeviceInfo, PciLocation, PCI_CLASS_DISPLAY,
};
const EINVAL: i32 = 22;
const ENODEV: i32 = 19;
const EIO: i32 = 5;
const PCI_ANY_ID: u32 = !0;
#[repr(C)]
#[derive(Default)]
pub struct Device {
driver: *mut u8,
driver_data: *mut u8,
platform_data: *mut u8,
of_node: *mut u8,
dma_mask: u64,
}
#[repr(C)]
pub struct PciDev {
pub vendor: u16,
pub device: u16,
bus: u8,
dev: u8,
func: u8,
revision: u8,
irq: u32,
bars: [u64; 6],
bar_sizes: [u64; 6],
driver_data: *mut u8,
device_obj: Device,
pub enabled: bool,
}
#[repr(C)]
pub struct PciDeviceId {
vendor: u32,
device: u32,
subvendor: u32,
subdevice: u32,
class: u32,
class_mask: u32,
driver_data: c_ulong,
}
impl Default for PciDev {
fn default() -> Self {
PciDev {
vendor: 0,
device: 0,
bus: 0,
dev: 0,
func: 0,
revision: 0,
irq: 0,
bars: [0; 6],
bar_sizes: [0; 6],
driver_data: ptr::null_mut(),
device_obj: Device::default(),
enabled: false,
}
}
}
#[derive(Clone, Copy, Debug)]
struct CurrentDevice {
location: PciLocation,
ptr: usize,
}
lazy_static::lazy_static! {
static ref CURRENT_DEVICE: Mutex<Option<CurrentDevice>> = Mutex::new(None);
static ref REGISTERED_PROBE: Mutex<Option<PciDriverProbe>> = Mutex::new(None);
}
pub const PCI_VENDOR_ID_AMD: u16 = 0x1002;
pub const PCI_VENDOR_ID_INTEL: u16 = 0x8086;
fn current_location_from_state(dev: *mut PciDev) -> Result<PciLocation, i32> {
if let Ok(state) = CURRENT_DEVICE.lock() {
if let Some(current) = *state {
return Ok(current.location);
}
}
if dev.is_null() {
return Err(-EINVAL);
}
Ok(PciLocation {
segment: 0,
bus: unsafe { (*dev).bus },
device: unsafe { (*dev).dev },
function: unsafe { (*dev).func },
})
}
fn open_current_device(dev: *mut PciDev) -> Result<PciDevice, i32> {
let location = current_location_from_state(dev)?;
PciDevice::open_location(&location).map_err(|error| {
log::warn!("pci: failed to open PCI device {}: {}", location, error);
-ENODEV
})
}
fn matches_id(info: &PciDeviceInfo, id: &PciDeviceId) -> bool {
let class =
((info.class_code as u32) << 16) | ((info.subclass as u32) << 8) | info.prog_if as u32;
let vendor_matches = id.vendor == PCI_ANY_ID || id.vendor == info.vendor_id as u32;
let device_matches = id.device == PCI_ANY_ID || id.device == info.device_id as u32;
let subvendor_matches = id.subvendor == PCI_ANY_ID;
let subdevice_matches = id.subdevice == PCI_ANY_ID;
let class_matches = id.class_mask == 0 || (class & id.class_mask) == (id.class & id.class_mask);
vendor_matches && device_matches && subvendor_matches && subdevice_matches && class_matches
}
fn matching_id_entry(
info: &PciDeviceInfo,
mut id: *const PciDeviceId,
) -> Option<*const PciDeviceId> {
if id.is_null() {
return None;
}
loop {
let current = unsafe { &*id };
if current.vendor == 0
&& current.device == 0
&& current.subvendor == 0
&& current.subdevice == 0
&& current.class == 0
&& current.class_mask == 0
&& current.driver_data == 0
{
return None;
}
if matches_id(info, current) {
return Some(id);
}
id = unsafe { id.add(1) };
}
}
fn build_pci_dev(info: &PciDeviceInfo, id: &PciDeviceId) -> PciDev {
let mut dev = PciDev {
vendor: info.vendor_id,
device: info.device_id,
bus: info.location.bus,
dev: info.location.device,
func: info.location.function,
revision: info.revision,
irq: info.irq.unwrap_or(0),
bars: [0; 6],
bar_sizes: [0; 6],
driver_data: id.driver_data as usize as *mut u8,
device_obj: Device::default(),
enabled: false,
};
for bar in &info.bars {
if bar.index < dev.bars.len() {
dev.bars[bar.index] = bar.addr;
dev.bar_sizes[bar.index] = bar.size;
}
}
dev
}
fn replace_current_device(location: PciLocation, dev_ptr: *mut PciDev) {
if let Ok(mut state) = CURRENT_DEVICE.lock() {
if let Some(previous) = state.replace(CurrentDevice {
location,
ptr: dev_ptr as usize,
}) {
unsafe { drop(Box::from_raw(previous.ptr as *mut PciDev)) };
}
}
}
fn clear_current_device() {
if let Ok(mut state) = CURRENT_DEVICE.lock() {
if let Some(previous) = state.take() {
unsafe { drop(Box::from_raw(previous.ptr as *mut PciDev)) };
}
}
}
#[no_mangle]
pub extern "C" fn pci_enable_device(dev: *mut PciDev) -> i32 {
if dev.is_null() {
return -EINVAL;
}
log::info!(
"pci_enable_device: vendor=0x{:04x} device=0x{:04x}",
unsafe { (*dev).vendor },
unsafe { (*dev).device }
);
unsafe { (*dev).enabled = true };
0
}
#[no_mangle]
pub extern "C" fn pci_disable_device(dev: *mut PciDev) {
if dev.is_null() {
return;
}
log::info!("pci_disable_device");
unsafe { (*dev).enabled = false };
}
#[no_mangle]
pub extern "C" fn pci_iomap(dev: *mut PciDev, bar: u32, max_len: usize) -> *mut u8 {
if dev.is_null() || bar >= 6 {
return ptr::null_mut();
}
let len = if max_len > 0 {
max_len
} else {
unsafe { (*dev).bar_sizes[bar as usize] as usize }
};
if len == 0 {
return ptr::null_mut();
}
log::warn!("pci_iomap: bar={} len={} — using heap fallback", bar, len);
super::io::ioremap(unsafe { (*dev).bars[bar as usize] }, len)
}
#[no_mangle]
pub extern "C" fn pci_iounmap(_dev: *mut PciDev, addr: *mut u8, size: usize) {
super::io::iounmap(addr, size);
}
#[no_mangle]
pub extern "C" fn pci_read_config_dword(dev: *mut PciDev, offset: u32, val: *mut u32) -> i32 {
if dev.is_null() || val.is_null() {
return -EINVAL;
}
let mut pci = match open_current_device(dev) {
Ok(pci) => pci,
Err(error) => return error,
};
match pci.read_config_dword(offset as u64) {
Ok(read) => {
unsafe { *val = read };
log::info!(
"pci_read_config_dword: offset=0x{:x} -> 0x{:08x}",
offset,
read
);
0
}
Err(error) => {
log::warn!(
"pci_read_config_dword: failed at offset=0x{:x}: {}",
offset,
error
);
-EIO
}
}
}
#[no_mangle]
pub extern "C" fn pci_write_config_dword(dev: *mut PciDev, offset: u32, val: u32) -> i32 {
if dev.is_null() {
return -EINVAL;
}
let mut pci = match open_current_device(dev) {
Ok(pci) => pci,
Err(error) => return error,
};
match pci.write_config_dword(offset as u64, val) {
Ok(()) => {
log::info!(
"pci_write_config_dword: offset=0x{:x} val=0x{:08x}",
offset,
val
);
0
}
Err(error) => {
log::warn!(
"pci_write_config_dword: failed at offset=0x{:x} val=0x{:08x}: {}",
offset,
val,
error
);
-EIO
}
}
}
#[no_mangle]
pub extern "C" fn pci_set_master(dev: *mut PciDev) {
if dev.is_null() {
return;
}
log::info!("pci_set_master");
}
#[no_mangle]
pub extern "C" fn pci_resource_start(dev: *const PciDev, bar: u32) -> u64 {
if dev.is_null() || bar >= 6 {
return 0;
}
unsafe { (*dev).bars[bar as usize] }
}
#[no_mangle]
pub extern "C" fn pci_resource_len(dev: *const PciDev, bar: u32) -> u64 {
if dev.is_null() || bar >= 6 {
return 0;
}
unsafe { (*dev).bar_sizes[bar as usize] }
}
pub type PciDriverProbe = extern "C" fn(*mut PciDev, *const PciDeviceId) -> i32;
pub type PciDriverRemove = extern "C" fn(*mut PciDev);
#[repr(C)]
pub struct PciDriver {
name: *const u8,
id_table: *const PciDeviceId,
probe: Option<PciDriverProbe>,
remove: Option<PciDriverRemove>,
}
#[no_mangle]
pub extern "C" fn pci_register_driver(drv: *mut PciDriver) -> i32 {
if drv.is_null() {
return -EINVAL;
}
let driver = unsafe { &*drv };
let probe = match driver.probe {
Some(probe) => probe,
None => {
log::warn!("pci_register_driver: missing probe callback");
return -EINVAL;
}
};
let devices = match enumerate_pci_class(PCI_CLASS_DISPLAY) {
Ok(devices) => devices,
Err(error) => {
log::warn!("pci_register_driver: PCI enumeration failed: {}", error);
return -ENODEV;
}
};
let Some((info, id_ptr)) = devices.into_iter().find_map(|candidate| {
matching_id_entry(&candidate, driver.id_table).map(|id_ptr| (candidate, id_ptr))
}) else {
log::info!("pci_register_driver: no matching PCI display device found");
return -ENODEV;
};
let mut pci = match PciDevice::from_info(&info) {
Ok(pci) => pci,
Err(error) => {
log::warn!(
"pci_register_driver: failed to open {}: {}",
info.location,
error
);
return -ENODEV;
}
};
let full_info = match pci.full_info() {
Ok(full_info) => full_info,
Err(error) => {
log::warn!(
"pci_register_driver: failed to read PCI info for {}: {}",
info.location,
error
);
return -EIO;
}
};
let id = unsafe { &*id_ptr };
let dev_ptr = Box::into_raw(Box::new(build_pci_dev(&full_info, id)));
replace_current_device(full_info.location, dev_ptr);
if let Ok(mut registered_probe) = REGISTERED_PROBE.lock() {
*registered_probe = Some(probe);
}
log::info!(
"pci_register_driver: probing {:04x}:{:04x} at {}",
full_info.vendor_id,
full_info.device_id,
full_info.location
);
let status = probe(dev_ptr, id_ptr);
if status != 0 {
log::warn!("pci_register_driver: probe failed with status {}", status);
clear_current_device();
if let Ok(mut registered_probe) = REGISTERED_PROBE.lock() {
*registered_probe = None;
}
}
status
}
#[no_mangle]
pub extern "C" fn pci_unregister_driver(drv: *mut PciDriver) {
if !drv.is_null() {
let driver = unsafe { &*drv };
if let Some(remove) = driver.remove {
let current_ptr = CURRENT_DEVICE
.lock()
.ok()
.and_then(|state| state.as_ref().map(|current| current.ptr as *mut PciDev));
if let Some(dev_ptr) = current_ptr {
remove(dev_ptr);
}
}
}
clear_current_device();
if let Ok(mut registered_probe) = REGISTERED_PROBE.lock() {
*registered_probe = None;
}
log::info!("pci_unregister_driver: cleared registered PCI driver state");
}
@@ -0,0 +1,177 @@
use std::sync::atomic::{AtomicU8, Ordering};
const UNLOCKED: u8 = 0;
const LOCKED: u8 = 1;
#[repr(C)]
pub struct LinuxMutex {
state: AtomicU8,
}
#[no_mangle]
pub extern "C" fn mutex_init(m: *mut LinuxMutex) {
if m.is_null() {
return;
}
unsafe {
(*m).state = AtomicU8::new(UNLOCKED);
}
}
#[no_mangle]
pub extern "C" fn mutex_lock(m: *mut LinuxMutex) {
if m.is_null() {
return;
}
while unsafe { &*m }
.state
.compare_exchange(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
std::hint::spin_loop();
}
}
#[no_mangle]
pub extern "C" fn mutex_unlock(m: *mut LinuxMutex) {
if m.is_null() {
return;
}
unsafe { &*m }.state.store(UNLOCKED, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn mutex_is_locked(m: *mut LinuxMutex) -> bool {
if m.is_null() {
return false;
}
unsafe { &*m }.state.load(Ordering::Acquire) == LOCKED
}
#[repr(C)]
#[derive(Default)]
pub struct Spinlock {
locked: AtomicU8,
}
#[no_mangle]
pub extern "C" fn spin_lock_init(lock: *mut Spinlock) {
if lock.is_null() {
return;
}
unsafe {
(*lock).locked.store(0, Ordering::SeqCst);
}
}
#[no_mangle]
pub extern "C" fn spin_lock(lock: *mut Spinlock) {
if lock.is_null() {
return;
}
while unsafe {
(*lock)
.locked
.compare_exchange(0, 1, Ordering::Acquire, Ordering::Relaxed)
}
.is_err()
{
std::hint::spin_loop();
}
}
#[no_mangle]
pub extern "C" fn spin_unlock(lock: *mut Spinlock) {
if lock.is_null() {
return;
}
unsafe {
(*lock).locked.store(0, Ordering::Release);
}
}
static IRQ_DEPTH: std::sync::atomic::AtomicU32 = std::sync::atomic::AtomicU32::new(0);
#[no_mangle]
pub extern "C" fn spin_lock_irqsave(lock: *mut Spinlock, flags: *mut u64) -> u64 {
let prev_depth = IRQ_DEPTH.fetch_add(1, Ordering::Acquire);
spin_lock(lock);
if !flags.is_null() {
unsafe { *flags = prev_depth as u64 };
}
prev_depth as u64
}
#[no_mangle]
pub extern "C" fn spin_unlock_irqrestore(lock: *mut Spinlock, flags: u64) {
spin_unlock(lock);
IRQ_DEPTH.store(flags as u32, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn local_irq_save(flags: *mut u64) {
let prev_depth = IRQ_DEPTH.fetch_add(1, Ordering::Acquire);
if !flags.is_null() {
unsafe { *flags = prev_depth as u64 };
}
}
#[no_mangle]
pub extern "C" fn local_irq_restore(flags: u64) {
IRQ_DEPTH.store(flags as u32, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn irqs_disabled() -> bool {
IRQ_DEPTH.load(Ordering::Acquire) > 0
}
use std::ptr;
#[repr(C)]
pub struct Completion {
done: AtomicU8,
_padding: [u8; 63],
}
#[no_mangle]
pub extern "C" fn init_completion(c: *mut Completion) {
if c.is_null() {
return;
}
unsafe {
ptr::write(
c,
Completion {
done: AtomicU8::new(0),
_padding: [0; 63],
},
);
}
}
#[no_mangle]
pub extern "C" fn complete(c: *mut Completion) {
if c.is_null() {
return;
}
unsafe { &*c }.done.store(1, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn wait_for_completion(c: *mut Completion) {
if c.is_null() {
return;
}
while unsafe { &*c }.done.load(Ordering::Acquire) == 0 {
std::hint::spin_loop();
}
}
#[no_mangle]
pub extern "C" fn reinit_completion(c: *mut Completion) {
if c.is_null() {
return;
}
unsafe { &*c }.done.store(0, Ordering::Release);
}
@@ -0,0 +1,256 @@
use std::collections::HashMap;
use std::mem;
use std::os::raw::c_int;
use std::ptr;
use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicU64, Ordering};
use std::sync::{Arc, Mutex, OnceLock};
use std::thread::JoinHandle;
use std::time::Duration;
#[repr(C)]
struct Timespec {
tv_sec: i64,
tv_nsec: i64,
}
unsafe extern "C" {
fn clock_gettime(clock_id: c_int, tp: *mut Timespec) -> c_int;
}
const CLOCK_MONOTONIC: c_int = 1;
struct TimerEntry {
generation: AtomicU64,
active: AtomicBool,
function: AtomicPtr<()>,
data: AtomicPtr<u8>,
handles: Mutex<Vec<JoinHandle<()>>>,
}
#[repr(C)]
pub struct TimerList {
expires: AtomicU64,
function: AtomicPtr<()>,
data: AtomicPtr<u8>,
active: AtomicBool,
}
fn timer_entries() -> &'static Mutex<HashMap<usize, Arc<TimerEntry>>> {
static TIMER_ENTRIES: OnceLock<Mutex<HashMap<usize, Arc<TimerEntry>>>> = OnceLock::new();
TIMER_ENTRIES.get_or_init(|| Mutex::new(HashMap::new()))
}
fn current_jiffies() -> u64 {
let mut ts = Timespec {
tv_sec: 0,
tv_nsec: 0,
};
let result = unsafe { clock_gettime(CLOCK_MONOTONIC, &mut ts) };
if result != 0 || ts.tv_sec < 0 || ts.tv_nsec < 0 {
return 0;
}
(ts.tv_sec as u64)
.saturating_mul(1_000)
.saturating_add((ts.tv_nsec as u64) / 1_000_000)
}
fn lock_timer_entries() -> std::sync::MutexGuard<'static, HashMap<usize, Arc<TimerEntry>>> {
match timer_entries().lock() {
Ok(entries) => entries,
Err(e) => e.into_inner(),
}
}
fn lock_timer_handles(entry: &TimerEntry) -> std::sync::MutexGuard<'_, Vec<JoinHandle<()>>> {
match entry.handles.lock() {
Ok(handles) => handles,
Err(e) => e.into_inner(),
}
}
fn timer_entry(timer: *mut TimerList) -> Arc<TimerEntry> {
let mut entries = lock_timer_entries();
entries
.entry(timer as usize)
.or_insert_with(|| {
Arc::new(TimerEntry {
generation: AtomicU64::new(0),
active: AtomicBool::new(false),
function: AtomicPtr::new(ptr::null_mut()),
data: AtomicPtr::new(ptr::null_mut()),
handles: Mutex::new(Vec::new()),
})
})
.clone()
}
fn reset_timer_entry(timer: *mut TimerList, function: *mut (), data: *mut u8) {
let mut entries = lock_timer_entries();
if let Some(entry) = entries.get(&(timer as usize)) {
entry.active.store(false, Ordering::Release);
entry.generation.fetch_add(1, Ordering::AcqRel);
}
entries.insert(
timer as usize,
Arc::new(TimerEntry {
generation: AtomicU64::new(0),
active: AtomicBool::new(false),
function: AtomicPtr::new(function),
data: AtomicPtr::new(data),
handles: Mutex::new(Vec::new()),
}),
);
}
fn join_all_handles(entry: &TimerEntry) {
let handles = {
let mut guard = lock_timer_handles(entry);
mem::take(&mut *guard)
};
for handle in handles {
let _ = handle.join();
}
}
#[no_mangle]
pub extern "C" fn setup_timer(
timer: *mut TimerList,
function: extern "C" fn(*mut u8),
data: *mut u8,
) {
if timer.is_null() {
return;
}
let function_ptr = function as usize as *mut ();
unsafe {
ptr::write(
timer,
TimerList {
expires: AtomicU64::new(0),
function: AtomicPtr::new(function_ptr),
data: AtomicPtr::new(data),
active: AtomicBool::new(false),
},
);
}
reset_timer_entry(timer, function_ptr, data);
}
#[no_mangle]
pub extern "C" fn mod_timer(timer: *mut TimerList, expires: u64) -> i32 {
if timer.is_null() {
return 0;
}
let timer_ref = unsafe { &*timer };
let entry = timer_entry(timer);
entry.function.store(
timer_ref.function.load(Ordering::Acquire),
Ordering::Release,
);
entry
.data
.store(timer_ref.data.load(Ordering::Acquire), Ordering::Release);
let was_active = entry.active.swap(true, Ordering::AcqRel);
timer_ref.active.store(true, Ordering::Release);
timer_ref.expires.store(expires, Ordering::Release);
let generation = entry
.generation
.fetch_add(1, Ordering::AcqRel)
.wrapping_add(1);
let delay = expires.saturating_sub(current_jiffies());
let function_addr = entry.function.load(Ordering::Acquire) as usize;
let data_addr = entry.data.load(Ordering::Acquire) as usize;
let entry_for_thread = entry.clone();
let handle = std::thread::spawn(move || {
std::thread::sleep(Duration::from_millis(delay));
if !entry_for_thread.active.load(Ordering::Acquire) {
return;
}
if entry_for_thread.generation.load(Ordering::Acquire) != generation {
return;
}
if function_addr == 0 {
entry_for_thread.active.store(false, Ordering::Release);
return;
}
let function =
unsafe { std::mem::transmute::<usize, extern "C" fn(*mut u8)>(function_addr) };
function(data_addr as *mut u8);
if entry_for_thread.generation.load(Ordering::Acquire) == generation {
entry_for_thread.active.store(false, Ordering::Release);
}
});
lock_timer_handles(&entry).push(handle);
if was_active {
1
} else {
0
}
}
#[no_mangle]
pub extern "C" fn del_timer(timer: *mut TimerList) -> i32 {
if timer.is_null() {
return 0;
}
let timer_ref = unsafe { &*timer };
let entry = timer_entry(timer);
let was_active = entry.active.swap(false, Ordering::AcqRel);
entry.generation.fetch_add(1, Ordering::AcqRel);
timer_ref.active.store(false, Ordering::Release);
if was_active {
1
} else {
0
}
}
#[no_mangle]
pub extern "C" fn del_timer_sync(timer: *mut TimerList) -> i32 {
if timer.is_null() {
return 0;
}
let timer_ref = unsafe { &*timer };
let entry = timer_entry(timer);
let was_active = entry.active.swap(false, Ordering::AcqRel);
entry.generation.fetch_add(1, Ordering::AcqRel);
timer_ref.active.store(false, Ordering::Release);
join_all_handles(&entry);
if was_active {
1
} else {
0
}
}
#[no_mangle]
pub extern "C" fn timer_pending(timer: *const TimerList) -> i32 {
if timer.is_null() {
return 0;
}
let entries = lock_timer_entries();
match entries.get(&(timer as usize)) {
Some(entry) if entry.active.load(Ordering::Acquire) => 1,
Some(_) => 0,
None => 0,
}
}
@@ -0,0 +1,186 @@
use std::ptr;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Condvar, Mutex};
use std::time::{Duration, Instant};
use std::collections::HashMap;
use std::sync::{Arc, OnceLock};
struct WaitState {
generation: AtomicU64,
}
#[repr(C)]
pub struct WaitQueueHead {
condvar: Condvar,
mutex: Mutex<bool>,
}
fn wait_states() -> &'static Mutex<HashMap<usize, Arc<WaitState>>> {
static WAIT_STATES: OnceLock<Mutex<HashMap<usize, Arc<WaitState>>>> = OnceLock::new();
WAIT_STATES.get_or_init(|| Mutex::new(HashMap::new()))
}
fn lock_wait_states() -> std::sync::MutexGuard<'static, HashMap<usize, Arc<WaitState>>> {
match wait_states().lock() {
Ok(states) => states,
Err(e) => e.into_inner(),
}
}
fn reset_wait_state(wq: *mut WaitQueueHead) {
lock_wait_states().insert(
wq as usize,
Arc::new(WaitState {
generation: AtomicU64::new(0),
}),
);
}
fn wait_state(wq: *mut WaitQueueHead) -> Arc<WaitState> {
let mut states = lock_wait_states();
states
.entry(wq as usize)
.or_insert_with(|| {
Arc::new(WaitState {
generation: AtomicU64::new(0),
})
})
.clone()
}
fn wait_event_impl<F>(wq: *mut WaitQueueHead, condition: F)
where
F: Fn() -> bool,
{
if wq.is_null() {
return;
}
let wq_ref = unsafe { &*wq };
let state = wait_state(wq);
loop {
if condition() {
return;
}
let mut notified = match wq_ref.mutex.lock() {
Ok(guard) => guard,
Err(e) => e.into_inner(),
};
let generation = state.generation.load(Ordering::Acquire);
while state.generation.load(Ordering::Acquire) == generation && !condition() {
notified = match wq_ref.condvar.wait(notified) {
Ok(guard) => guard,
Err(e) => e.into_inner(),
};
}
*notified = false;
}
}
fn wait_event_timeout_impl<F>(wq: *mut WaitQueueHead, condition: F, timeout_ms: u64) -> i32
where
F: Fn() -> bool,
{
if wq.is_null() {
return 0;
}
let deadline = Instant::now() + Duration::from_millis(timeout_ms);
let wq_ref = unsafe { &*wq };
let state = wait_state(wq);
loop {
if condition() {
return 1;
}
let now = Instant::now();
if now >= deadline {
return 0;
}
let remaining = deadline.saturating_duration_since(now);
let notified = match wq_ref.mutex.lock() {
Ok(guard) => guard,
Err(e) => e.into_inner(),
};
let generation = state.generation.load(Ordering::Acquire);
let (mut notified, wait_result) = match wq_ref.condvar.wait_timeout(notified, remaining) {
Ok(result) => result,
Err(e) => e.into_inner(),
};
if *notified {
*notified = false;
}
if condition() {
return 1;
}
if state.generation.load(Ordering::Acquire) != generation {
continue;
}
if wait_result.timed_out() && !condition() {
return 0;
}
}
}
#[no_mangle]
pub extern "C" fn init_waitqueue_head(wq: *mut WaitQueueHead) {
if wq.is_null() {
return;
}
unsafe {
ptr::write(
wq,
WaitQueueHead {
condvar: Condvar::new(),
mutex: Mutex::new(false),
},
);
}
reset_wait_state(wq);
}
#[no_mangle]
pub extern "C" fn wait_event(wq: *mut WaitQueueHead, condition: extern "C" fn() -> bool) {
wait_event_impl(wq, || condition());
}
#[no_mangle]
pub extern "C" fn wake_up(wq: *mut WaitQueueHead) {
if wq.is_null() {
return;
}
let wq_ref = unsafe { &*wq };
let state = wait_state(wq);
{
let mut notified = match wq_ref.mutex.lock() {
Ok(guard) => guard,
Err(e) => e.into_inner(),
};
*notified = true;
state.generation.fetch_add(1, Ordering::AcqRel);
}
wq_ref.condvar.notify_all();
}
#[no_mangle]
pub extern "C" fn wait_event_timeout(
wq: *mut WaitQueueHead,
condition: extern "C" fn() -> bool,
timeout_ms: u64,
) -> i32 {
wait_event_timeout_impl(wq, || condition(), timeout_ms)
}
@@ -0,0 +1,290 @@
use std::collections::VecDeque;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::{Arc, Condvar, Mutex};
struct SendWorkPtr(*mut WorkStruct);
impl SendWorkPtr {
fn as_ptr(&self) -> *mut WorkStruct {
self.0
}
}
unsafe impl Send for SendWorkPtr {}
#[repr(C)]
pub struct WorkStruct {
pub func: Option<extern "C" fn(*mut WorkStruct)>,
pub __opaque: [u8; 64],
}
#[repr(C)]
pub struct DelayedWork {
pub work: WorkStruct,
pub __timer_opaque: [u8; 64],
}
struct WorkqueueInner {
queue: Mutex<VecDeque<SendWorkPtr>>,
pending_count: AtomicUsize,
done_condvar: Condvar,
shutdown: AtomicBool,
thread_count: usize,
}
pub struct WorkqueueStruct {
inner: Arc<WorkqueueInner>,
_name: String,
handles: Vec<std::thread::JoinHandle<()>>,
}
lazy_static::lazy_static! {
static ref DEFAULT_WQ: Arc<WorkqueueInner> = {
let inner = Arc::new(WorkqueueInner {
queue: Mutex::new(VecDeque::new()),
pending_count: AtomicUsize::new(0),
done_condvar: Condvar::new(),
shutdown: AtomicBool::new(false),
thread_count: 4,
});
let inner_clone = inner.clone();
for _ in 0..inner.thread_count {
let ic = inner_clone.clone();
std::thread::spawn(move || worker_loop(ic));
}
inner
};
}
fn worker_loop(inner: Arc<WorkqueueInner>) {
loop {
if inner.shutdown.load(Ordering::Acquire) {
break;
}
let work = {
let mut queue = match inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
queue.pop_front()
};
if let Some(send_work_ptr) = work {
let work_ptr = send_work_ptr.as_ptr();
if let Some(func) = unsafe { (*work_ptr).func } {
func(work_ptr);
}
let prev = inner.pending_count.fetch_sub(1, Ordering::Release);
if prev == 1 {
let queue = match inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
drop(queue);
inner.done_condvar.notify_all();
}
} else {
std::thread::sleep(std::time::Duration::from_millis(1));
}
}
}
fn dispatch_work(inner: &Arc<WorkqueueInner>, work: *mut WorkStruct) -> i32 {
if work.is_null() {
return 0;
}
{
let mut queue = match inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
queue.push_back(SendWorkPtr(work));
}
inner.pending_count.fetch_add(1, Ordering::Release);
1
}
#[no_mangle]
pub extern "C" fn alloc_workqueue(
name: *const u8,
_flags: u32,
max_active: i32,
) -> *mut WorkqueueStruct {
let name_str = if name.is_null() {
String::from("unknown")
} else {
unsafe {
let mut len = 0;
while *name.add(len) != 0 {
len += 1;
}
match std::str::from_utf8(std::slice::from_raw_parts(name, len)) {
Ok(s) => s.to_string(),
Err(_) => String::from("unknown"),
}
}
};
let thread_count = if max_active > 0 {
max_active as usize
} else {
4
};
let inner = Arc::new(WorkqueueInner {
queue: Mutex::new(VecDeque::new()),
pending_count: AtomicUsize::new(0),
done_condvar: Condvar::new(),
shutdown: AtomicBool::new(false),
thread_count,
});
let mut handles = Vec::with_capacity(inner.thread_count);
for _ in 0..inner.thread_count {
let ic = inner.clone();
handles.push(std::thread::spawn(move || worker_loop(ic)));
}
let wq = Box::new(WorkqueueStruct {
inner,
_name: name_str,
handles,
});
Box::into_raw(wq)
}
#[no_mangle]
pub extern "C" fn destroy_workqueue(wq: *mut WorkqueueStruct) {
if wq.is_null() {
return;
}
let mut wq = unsafe { Box::from_raw(wq) };
{
let mut queue = match wq.inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
while wq.inner.pending_count.load(Ordering::Acquire) > 0 {
queue = match wq.inner.done_condvar.wait(queue) {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: condvar wait failed, recovering: {}", e);
e.into_inner()
}
};
}
}
wq.inner.shutdown.store(true, Ordering::Release);
wq.inner.done_condvar.notify_all();
for handle in wq.handles.drain(..) {
let _ = handle.join();
}
}
#[no_mangle]
pub extern "C" fn queue_work(wq: *mut WorkqueueStruct, work: *mut WorkStruct) -> i32 {
if wq.is_null() {
return 0;
}
let inner = unsafe { &(*wq).inner };
dispatch_work(inner, work)
}
#[no_mangle]
pub extern "C" fn flush_workqueue(wq: *mut WorkqueueStruct) {
if wq.is_null() {
return;
}
let inner = unsafe { &(*wq).inner };
let mut queue = match inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
while inner.pending_count.load(Ordering::Acquire) > 0 {
queue = match inner.done_condvar.wait(queue) {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: condvar wait failed, recovering: {}", e);
e.into_inner()
}
};
}
}
#[no_mangle]
pub extern "C" fn schedule_work(work: *mut WorkStruct) -> i32 {
dispatch_work(&DEFAULT_WQ, work)
}
#[no_mangle]
pub extern "C" fn schedule_delayed_work(dwork: *mut DelayedWork, delay: u64) -> i32 {
if dwork.is_null() {
return 0;
}
let work_ptr = SendWorkPtr(dwork as *mut WorkStruct);
let inner = DEFAULT_WQ.clone();
inner.pending_count.fetch_add(1, Ordering::Release);
std::thread::spawn(move || {
std::thread::sleep(std::time::Duration::from_millis(delay));
let ptr = work_ptr.as_ptr();
if let Some(func) = unsafe { (*ptr).func } {
func(ptr);
}
let prev = inner.pending_count.fetch_sub(1, Ordering::Release);
if prev == 1 {
let queue = match inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
drop(queue);
inner.done_condvar.notify_all();
}
});
1
}
#[no_mangle]
pub extern "C" fn flush_scheduled_work() {
let mut queue = match DEFAULT_WQ.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
while DEFAULT_WQ.pending_count.load(Ordering::Acquire) > 0 {
queue = match DEFAULT_WQ.done_condvar.wait(queue) {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: condvar wait failed, recovering: {}", e);
e.into_inner()
}
};
}
}