Red Bear OS — microkernel OS in Rust, based on Redox

Derivative of Redox OS (https://www.redox-os.org) adding:
- AMD GPU driver (amdgpu) via LinuxKPI compat layer
- ext4 filesystem support (ext4d scheme daemon)
- ACPI fixes for AMD bare metal (x2APIC, DMAR, IVRS, MCFG)
- Custom branding (hostname, os-release, boot identity)

Build system is full upstream Redox with RBOS overlay in local/.
Patches for kernel, base, and relibc are symlinked from local/patches/
and protected from make clean/distclean. Custom recipes live in
local/recipes/ with symlinks into the recipes/ search path.

Build:  make all CONFIG_NAME=redbear-full
Sync:   ./local/scripts/sync-upstream.sh
This commit is contained in:
2026-04-12 19:05:00 +01:00
commit 50b731f1b7
3392 changed files with 98327 additions and 0 deletions
@@ -0,0 +1,17 @@
[package]
name = "linux-kpi"
version = "0.1.0"
edition = "2021"
description = "Linux Kernel API compatibility layer for Redox OS (LinuxKPI-style)"
license = "MIT"
[dependencies]
libredox = "0.1"
redox_syscall = { version = "0.7", features = ["std"] }
log = "0.4"
thiserror = "2"
lazy_static = "1.4"
redox-driver-sys = { path = "../../redox-driver-sys/source" }
[lib]
crate-type = ["rlib", "staticlib"]
@@ -0,0 +1,53 @@
use std::env;
use std::fs;
use std::path::Path;
fn copy_dir_recursive(src: &Path, dst: &Path) -> std::io::Result<()> {
fs::create_dir_all(dst)?;
for entry in fs::read_dir(src)? {
let entry = entry?;
let src_path = entry.path();
let dst_path = dst.join(entry.file_name());
if src_path.is_dir() {
copy_dir_recursive(&src_path, &dst_path)?;
} else {
fs::copy(&src_path, &dst_path)?;
}
}
Ok(())
}
fn main() {
let out_dir = env::var("OUT_DIR").expect("OUT_DIR not set");
let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set");
let headers_src = Path::new(&manifest_dir).join("src/c_headers");
let headers_dst = Path::new(&out_dir).join("include");
if headers_src.exists() {
copy_dir_recursive(&headers_src, &headers_dst)
.expect("failed to copy C headers to OUT_DIR");
println!("cargo:include={}", headers_dst.display());
}
let sysroot = env::var("COOKBOOK_SYSROOT").ok();
if let Some(ref sysroot_path) = sysroot {
let sysroot_include = Path::new(sysroot_path).join("include/linux-kpi");
if headers_src.exists() {
copy_dir_recursive(&headers_src, &sysroot_include)
.expect("failed to copy C headers to COOKBOOK_SYSROOT");
}
}
let stage = env::var("COOKBOOK_STAGE").ok();
if let Some(ref stage_path) = stage {
let stage_include = Path::new(stage_path).join("usr/include/linux-kpi");
if headers_src.exists() {
copy_dir_recursive(&headers_src, &stage_include)
.expect("failed to copy C headers to COOKBOOK_STAGE");
}
}
println!("cargo:rerun-if-changed=src/c_headers");
}
@@ -0,0 +1,77 @@
#ifndef _ASM_IO_H
#define _ASM_IO_H
#include <linux/types.h>
#include <linux/compiler.h>
static inline unsigned char inb(unsigned short port)
{
unsigned char val;
__asm__ __volatile__("inb %1, %0" : "=a"(val) : "Nd"(port));
return val;
}
static inline unsigned short inw(unsigned short port)
{
unsigned short val;
__asm__ __volatile__("inw %1, %0" : "=a"(val) : "Nd"(port));
return val;
}
static inline unsigned int inl(unsigned short port)
{
unsigned int val;
__asm__ __volatile__("inl %1, %0" : "=a"(val) : "Nd"(port));
return val;
}
static inline void outb(unsigned char val, unsigned short port)
{
__asm__ __volatile__("outb %0, %1" : : "a"(val), "Nd"(port));
}
static inline void outw(unsigned short val, unsigned short port)
{
__asm__ __volatile__("outw %0, %1" : : "a"(val), "Nd"(port));
}
static inline void outl(unsigned int val, unsigned short port)
{
__asm__ __volatile__("outl %0, %1" : : "a"(val), "Nd"(port));
}
static inline void insb(unsigned short port, void *buf, unsigned long count)
{
__asm__ __volatile__("rep insb" : "+D"(buf), "+c"(count) : "d"(port) : "memory");
}
static inline void insw(unsigned short port, void *buf, unsigned long count)
{
__asm__ __volatile__("rep insw" : "+D"(buf), "+c"(count) : "d"(port) : "memory");
}
static inline void insl(unsigned short port, void *buf, unsigned long count)
{
__asm__ __volatile__("rep insl" : "+D"(buf), "+c"(count) : "d"(port) : "memory");
}
static inline void outsb(unsigned short port, const void *buf, unsigned long count)
{
__asm__ __volatile__("rep outsb" : "+S"(buf), "+c"(count) : "d"(port) : "memory");
}
static inline void outsw(unsigned short port, const void *buf, unsigned long count)
{
__asm__ __volatile__("rep outsw" : "+S"(buf), "+c"(count) : "d"(port) : "memory");
}
static inline void outsl(unsigned short port, const void *buf, unsigned long count)
{
__asm__ __volatile__("rep outsl" : "+S"(buf), "+c"(count) : "d"(port) : "memory");
}
#define mb() __asm__ __volatile__("mfence" : : : "memory")
#define rmb() __asm__ __volatile__("lfence" : : : "memory")
#define wmb() __asm__ __volatile__("sfence" : : : "memory")
#endif
@@ -0,0 +1,38 @@
#ifndef _DRM_DRM_H
#define _DRM_DRM_H
#include <linux/types.h>
#include <stddef.h>
#define DRM_NAME "drm"
#define DRM_MINORS 256
#define DRM_IOCTL_BASE 'd'
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE, nr)
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE, nr, type)
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE, nr, type)
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE, nr, type)
struct drm_version {
int version_major;
int version_minor;
int version_patchlevel;
size_t name_len;
char *name;
size_t date_len;
char *date;
size_t desc_len;
char *desc;
};
struct drm_unique {
size_t unique_len;
char *unique;
};
#define _IO(type, nr) ((type) << 8 | (nr))
#define _IOR(type, nr, t) ((type) << 8 | (nr))
#define _IOW(type, nr, t) ((type) << 8 | (nr))
#define _IOWR(type, nr, t) ((type) << 8 | (nr))
#endif
@@ -0,0 +1,75 @@
#ifndef _DRM_DRM_CRTC_H
#define _DRM_DRM_CRTC_H
#include <linux/types.h>
#include <stddef.h>
struct drm_crtc {
void *dev;
void *primary;
void *cursor;
u32 index;
char name[32];
bool enabled;
int x;
int y;
u32 width;
u32 height;
};
struct drm_connector {
void *dev;
u32 connector_type;
u32 connector_type_id;
int status;
char name[32];
};
struct drm_encoder {
void *dev;
u32 encoder_type;
u32 possible_crtcs;
u32 possible_clones;
};
struct drm_display_mode {
u32 clock;
u16 hdisplay;
u16 hsync_start;
u16 hsync_end;
u16 htotal;
u16 hskew;
u16 vdisplay;
u16 vsync_start;
u16 vsync_end;
u16 vtotal;
u16 vscan;
u32 flags;
u32 type;
char name[32];
};
struct drm_mode_fb_cmd {
u32 fb_id;
u32 width;
u32 height;
u32 pitch;
u32 bpp;
u32 depth;
u32 handle;
};
#define DRM_MODE_TYPE_BUILTIN (1 << 0)
#define DRM_MODE_TYPE_CLOCK_C ((1 << 1) | (1 << 2))
#define DRM_MODE_TYPE_CRTC_C ((1 << 3) | (1 << 4))
#define DRM_MODE_FLAG_PHSYNC (1 << 0)
#define DRM_MODE_FLAG_NHSYNC (1 << 1)
#define DRM_MODE_FLAG_PVSYNC (1 << 2)
#define DRM_MODE_FLAG_NVSYNC (1 << 3)
#define DRM_CONNECTOR_STATUS_UNKNOWN 0
#define DRM_CONNECTOR_STATUS_CONNECTED 1
#define DRM_CONNECTOR_STATUS_DISCONNECTED 2
#endif
@@ -0,0 +1,39 @@
#ifndef _DRM_DRM_GEM_H
#define _DRM_DRM_GEM_H
#include <linux/types.h>
#include <stddef.h>
struct drm_device;
struct drm_file;
struct drm_gem_object {
void *dev;
u32 handle_count;
size_t size;
void *driver_private;
};
struct drm_gem_object_ops {
void (*free)(struct drm_gem_object *obj);
int (*open)(struct drm_gem_object *obj, struct drm_file *file);
void (*close)(struct drm_gem_object *obj, struct drm_file *file);
int (*pin)(struct drm_gem_object *obj);
void (*unpin)(struct drm_gem_object *obj);
int (*get_sg_table)(struct drm_gem_object *obj);
void *(*vmap)(struct drm_gem_object *obj);
void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
};
extern int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
extern void drm_gem_object_release(struct drm_gem_object *obj);
extern int drm_gem_handle_create(struct drm_file *file,
struct drm_gem_object *obj,
u32 *handlep);
extern void drm_gem_handle_delete(struct drm_file *file, u32 handle);
extern struct drm_gem_object *drm_gem_object_lookup(struct drm_file *file,
u32 handle);
extern void drm_gem_object_put(struct drm_gem_object *obj);
#endif
@@ -0,0 +1,55 @@
#ifndef _DRM_DRM_IOCTL_H
#define _DRM_DRM_IOCTL_H
#include <linux/types.h>
struct drm_file {
u32 pid;
u32 uid;
int authenticated;
int master;
void *driver_priv;
};
struct drm_device {
const char *name;
const char *desc;
u32 driver_features;
void *dev_private;
void *pdev;
u32 irq;
void *mode_config;
void *primary;
void *render;
int unplugged;
};
#define DRIVER_USE_AGP 0x1U
#define DRIVER_REQUIRE_AGP 0x2U
#define DRIVER_GEM 0x8U
#define DRIVER_MODESET 0x10U
#define DRIVER_PRIME 0x20U
#define DRIVER_RENDER 0x40U
#define DRIVER_ATOMIC 0x80U
#define DRIVER_SYNCOBJ 0x100U
struct drm_driver {
const char *name;
const char *desc;
u32 driver_features;
int (*load)(struct drm_device *dev, unsigned long flags);
void (*unload)(struct drm_device *dev);
int (*open)(struct drm_device *dev, struct drm_file *file);
void (*preclose)(struct drm_device *dev, struct drm_file *file);
void (*postclose)(struct drm_device *dev, struct drm_file *file);
void (*lastclose)(struct drm_device *dev);
int (*dma_ioctl)(struct drm_device *dev, void *data, struct drm_file *file);
void (*irq_handler)(int irq, void *arg);
};
extern int drm_dev_register(struct drm_device *dev, unsigned long flags);
extern void drm_dev_unregister(struct drm_device *dev);
extern int drm_ioctl(struct drm_device *dev, unsigned int cmd, void *data,
struct drm_file *file);
#endif
@@ -0,0 +1,84 @@
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <linux/types.h>
typedef struct {
volatile int counter;
} atomic_t;
typedef struct {
volatile long counter;
} atomic_long_t;
static inline int atomic_read(const atomic_t *v)
{
return __sync_fetch_and_add((volatile int *)&v->counter, 0) + v->counter;
}
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
__sync_synchronize();
}
static inline void atomic_inc(atomic_t *v)
{
__sync_fetch_and_add(&v->counter, 1);
}
static inline void atomic_dec(atomic_t *v)
{
__sync_fetch_and_sub(&v->counter, 1);
}
static inline void atomic_add(int i, atomic_t *v)
{
__sync_fetch_and_add(&v->counter, i);
}
static inline void atomic_sub(int i, atomic_t *v)
{
__sync_fetch_and_sub(&v->counter, i);
}
static inline int atomic_inc_return(atomic_t *v)
{
return __sync_add_and_fetch(&v->counter, 1);
}
static inline int atomic_dec_return(atomic_t *v)
{
return __sync_sub_and_fetch(&v->counter, 1);
}
static inline int atomic_xchg(atomic_t *v, int new_val)
{
return __sync_lock_test_and_set(&v->counter, new_val);
}
static inline int atomic_cmpxchg(atomic_t *v, int old_val, int new_val)
{
return __sync_val_compare_and_swap(&v->counter, old_val, new_val);
}
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c = v->counter;
while (c != u && !__sync_bool_compare_and_swap(&v->counter, c, c + a))
c = v->counter;
return c != u;
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static inline int atomic_dec_and_test(atomic_t *v)
{
return __sync_sub_and_fetch(&v->counter, 1) == 0;
}
#define smp_mb() __sync_synchronize()
#define smp_rmb() __sync_synchronize()
#define smp_wmb() __sync_synchronize()
#endif
@@ -0,0 +1,33 @@
#ifndef _LINUX_BUG_H
#define _LINUX_BUG_H
#include <stdio.h>
#include <stdlib.h>
#define BUG() \
do { fprintf(stderr, "BUG: %s:%d\n", __FILE__, __LINE__); } while(0)
#define BUG_ON(condition) \
do { if (unlikely(condition)) { BUG(); } } while(0)
#define WARN(condition, fmt, ...) \
({ \
int __ret = !!(condition); \
if (__ret) { fprintf(stderr, "WARN: %s:%d: " fmt "\n", \
__FILE__, __LINE__, ##__VA_ARGS__); } \
__ret; \
})
#define WARN_ON(condition) \
({ \
int __ret = !!(condition); \
if (__ret) { fprintf(stderr, "WARN: %s:%d\n", __FILE__, __LINE__); } \
__ret; \
})
#define WARN_ON_ONCE(condition) WARN_ON(condition)
#define BUILD_BUG_ON(condition) \
extern char __build_bug_on[(condition) ? -1 : 1] __attribute__((unused))
#endif
@@ -0,0 +1,35 @@
#ifndef _LINUX_COMPILER_H
#define _LINUX_COMPILER_H
#define __init
#define __exit
#define __devinit
#define __devexit
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define __read_mostly
#define __aligned(x) __attribute__((aligned(x)))
#define __packed __attribute__((packed))
#define __cold __attribute__((cold))
#define __hot __attribute__((hot))
#define barrier() __asm__ __volatile__("" : : : "memory")
#define WRITE_ONCE(var, val) \
(*((volatile typeof(var) *)&(var)) = (val))
#define READ_ONCE(var) \
(*((volatile typeof(var) *)&(var)))
#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
#define container_of(ptr, type, member) \
((type *)((char *)(ptr) - offsetof(type, member)))
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
#endif
@@ -0,0 +1,37 @@
#ifndef _LINUX_DEVICE_H
#define _LINUX_DEVICE_H
#include <linux/types.h>
#include <stddef.h>
struct device_driver {
const char *name;
void *owner;
};
struct device {
struct device_driver *driver;
void *driver_data;
void *platform_data;
void *of_node;
u64 dma_mask;
};
static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
}
static inline void dev_set_drvdata(struct device *dev, void *data)
{
dev->driver_data = data;
}
struct class {
const char *name;
};
extern struct device *devm_kzalloc(struct device *dev, size_t size, gfp_t flags);
extern void devm_kfree(struct device *dev, void *ptr);
#endif
@@ -0,0 +1,35 @@
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
#include <linux/types.h>
enum dma_data_direction {
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
DMA_FROM_DEVICE = 2,
DMA_NONE = 3,
};
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL << (n)) - 1))
extern void *dma_alloc_coherent(void *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags);
extern void dma_free_coherent(void *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t dma_map_single(void *dev, void *ptr, size_t size,
enum dma_data_direction dir);
extern void dma_unmap_single(void *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir);
static inline int dma_mapping_error(void *dev, dma_addr_t addr)
{
(void)dev;
(void)addr;
return 0;
}
extern int dma_set_mask(void *dev, u64 mask);
extern int dma_set_coherent_mask(void *dev, u64 mask);
#endif
@@ -0,0 +1,34 @@
#ifndef _LINUX_ERRNO_H
#define _LINUX_ERRNO_H
#define EPERM 1
#define ENOENT 2
#define ESRCH 3
#define EINTR 4
#define EIO 5
#define ENXIO 6
#define E2BIG 7
#define ENOEXEC 8
#define EBADF 9
#define ECHILD 10
#define EAGAIN 11
#define ENOMEM 12
#define EACCES 13
#define EFAULT 14
#define EBUSY 16
#define EEXIST 17
#define ENODEV 19
#define EINVAL 22
#define ENFILE 23
#define EMFILE 24
#define ENOTTY 25
#define EPIPE 32
#define ERANGE 34
#define ENOSYS 38
#define ENODATA 61
#define ENOTSUP 95
#define ETIMEDOUT 110
#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-4096)
#endif
@@ -0,0 +1,26 @@
#ifndef _LINUX_FIRMWARE_H
#define _LINUX_FIRMWARE_H
#include <linux/types.h>
struct firmware {
size_t size;
const u8 *data;
void *priv;
};
struct device;
extern int request_firmware(const struct firmware **fw, const char *name,
struct device *dev);
extern void release_firmware(const struct firmware *fw);
extern int request_firmware_nowait(
struct device *dev, int uevent,
const char *name, void *context,
void (*cont)(const struct firmware *fw, void *context));
extern int request_firmware_direct(const struct firmware **fw,
const char *name, struct device *dev);
#endif
@@ -0,0 +1,46 @@
#ifndef _LINUX_IDR_H
#define _LINUX_IDR_H
#include <linux/types.h>
struct idr {
unsigned char __opaque[256];
};
static inline void idr_init(struct idr *idr)
{
(void)idr;
}
static inline int idr_alloc(struct idr *idr, void *ptr, int start, int end, u32 flags)
{
(void)idr;
(void)ptr;
(void)start;
(void)end;
(void)flags;
return 0;
}
static inline void idr_remove(struct idr *idr, int id)
{
(void)idr;
(void)id;
}
static inline void *idr_find(struct idr *idr, int id)
{
(void)idr;
(void)id;
return (void *)0;
}
static inline void idr_destroy(struct idr *idr)
{
(void)idr;
}
#define idr_for_each_entry(idr, entry, id) \
for ((id) = 0, (entry) = (void *)0; (entry); (id)++)
#endif
@@ -0,0 +1,38 @@
#ifndef _LINUX_INTERRUPT_H
#define _LINUX_INTERRUPT_H
#include <linux/types.h>
#include <linux/irq.h>
static inline int in_interrupt(void)
{
return 0;
}
static inline int in_irq(void)
{
return 0;
}
static inline void local_irq_save(unsigned long *flags)
{
(void)flags;
}
static inline void local_irq_restore(unsigned long flags)
{
(void)flags;
}
static inline void local_irq_disable(void) {}
static inline void local_irq_enable(void) {}
#define disable_irq_nosync(irq) ((void)(irq))
#define enable_irq(irq) ((void)(irq))
#define IRQF_NO_SUSPEND 0x0000U
#define IRQF_FORCE_RESUME 0x0000U
#define IRQF_NO_THREAD 0x0000U
#define IRQF_EARLY_RESUME 0x0000U
#endif
@@ -0,0 +1,41 @@
#ifndef _LINUX_IO_H
#define _LINUX_IO_H
#include <linux/types.h>
#include <stddef.h>
extern void *ioremap(phys_addr_t phys_addr, size_t size);
extern void iounmap(void *addr, size_t size);
extern u32 readl(const void *addr);
extern void writel(u32 val, void *addr);
extern u64 readq(const void *addr);
extern void writeq(u64 val, void *addr);
extern u8 readb(const void *addr);
extern void writeb(u8 val, void *addr);
extern u16 readw(const void *addr);
extern void writew(u16 val, void *addr);
static inline void memcpy_toio(void *dst, const void *src, size_t count)
{
__builtin_memcpy(dst, src, count);
}
static inline void memcpy_fromio(void *dst, const void *src, size_t count)
{
__builtin_memcpy(dst, src, count);
}
static inline void memset_io(void *dst, int c, size_t count)
{
__builtin_memset(dst, c, count);
}
#define ioread8(addr) readb(addr)
#define ioread16(addr) readw(addr)
#define ioread32(addr) readl(addr)
#define iowrite8(v, a) writeb(v, a)
#define iowrite16(v, a) writew(v, a)
#define iowrite32(v, a) writel(v, a)
#endif
@@ -0,0 +1,24 @@
#ifndef _LINUX_IRQ_H
#define _LINUX_IRQ_H
#include <linux/types.h>
typedef unsigned int irqreturn_t;
#define IRQ_NONE 0
#define IRQ_HANDLED 1
#define IRQ_WAKE_THREAD 2
#define IRQF_SHARED 0x0001U
#define IRQF_TRIGGER_RISING 0x0010U
#define IRQF_TRIGGER_FALLING 0x0020U
#define IRQF_TRIGGER_HIGH 0x0040U
#define IRQF_TRIGGER_LOW 0x0080U
typedef irqreturn_t (*irq_handler_t)(int irq, void *dev_id);
extern int request_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *name, void *dev_id);
extern void free_irq(unsigned int irq, void *dev_id);
#endif
@@ -0,0 +1,24 @@
#ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H
#include <linux/types.h>
#include <time.h>
static inline u64 redox_get_jiffies(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (u64)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000);
}
#define jiffies redox_get_jiffies()
#define msecs_to_jiffies(msec) ((unsigned long)(msec))
#define usecs_to_jiffies(usec) ((unsigned long)((usec) / 1000))
#define time_after(a, b) ((long)((b) - (a)) < 0)
#define time_before(a, b) time_after(b, a)
#define MAX_JIFFY_OFFSET ((unsigned long)(~0UL >> 1))
#endif
@@ -0,0 +1,62 @@
#ifndef _LINUX_KERNEL_H
#define _LINUX_KERNEL_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <stddef.h>
#include <stdio.h>
#include <unistd.h>
#define min(a, b) \
({ typeof(a) _a = (a); typeof(b) _b = (b); _a < _b ? _a : _b; })
#define max(a, b) \
({ typeof(a) _a = (a); typeof(b) _b = (b); _a > _b ? _a : _b; })
#define clamp(val, lo, hi) min(max(val, lo), hi)
#define min_t(type, a, b) \
((type)(a) < (type)(b) ? (type)(a) : (type)(b))
#define max_t(type, a, b) \
((type)(a) > (type)(b) ? (type)(a) : (type)(b))
#define min3(a, b, c) min((a), min((b), (c)))
#define max3(a, b, c) max((a), max((b), (c)))
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define DIV_ROUND_DOWN(n, d) ((n) / (d))
#define DIV_ROUND_CLOSEST(n, d) (((n) + (d) / 2) / (d))
#define round_up(x, y) ((((x) + (y) - 1) / (y)) * (y))
#define round_down(x, y) (((x) / (y)) * (y))
#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
#define IS_ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while(0)
static inline void msleep(unsigned int msecs)
{
usleep(msecs * 1000);
}
static inline void udelay(unsigned long usecs)
{
usleep(usecs);
}
static inline void mdelay(unsigned long msecs)
{
usleep(msecs * 1000);
}
#define lower_32_bits(n) ((u32)(n))
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f))
#define roundup(x, y) ((((x) + (y) - 1) / (y)) * (y))
#endif
@@ -0,0 +1,90 @@
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H
#include <stddef.h>
struct list_head {
struct list_head *prev;
struct list_head *next;
};
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
static inline void INIT_LIST_HEAD(struct list_head *list)
{
list->prev = list;
list->next = list;
}
static inline void __list_add(struct list_head *new_node,
struct list_head *prev,
struct list_head *next)
{
next->prev = new_node;
new_node->next = next;
new_node->prev = prev;
prev->next = new_node;
}
static inline void list_add(struct list_head *new_node, struct list_head *head)
{
__list_add(new_node, head, head->next);
}
static inline void list_add_tail(struct list_head *new_node, struct list_head *head)
{
__list_add(new_node, head->prev, head);
}
static inline void __list_del(struct list_head *prev, struct list_head *next)
{
next->prev = prev;
prev->next = next;
}
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->prev = (struct list_head *)0;
entry->next = (struct list_head *)0;
}
static inline int list_empty(const struct list_head *head)
{
return head->next == head;
}
static inline int list_is_last(const struct list_head *list,
const struct list_head *head)
{
return list->next == head;
}
#define list_entry(ptr, type, member) \
((type *)((char *)(ptr) - offsetof(type, member)))
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
#define list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
#endif
@@ -0,0 +1,36 @@
#ifndef _LINUX_MM_H
#define _LINUX_MM_H
#include <linux/types.h>
#include <linux/slab.h>
#include <stddef.h>
struct page {
unsigned char __opaque[64];
};
#define __get_free_pages(flags, order) \
((unsigned long)kmalloc(4096 << (order), (flags)))
#define free_pages(addr, order) \
kfree((const void *)(addr))
static inline void *vmalloc(unsigned long size)
{
return kmalloc(size, 0);
}
static inline void vfree(const void *addr)
{
kfree(addr);
}
static inline unsigned long get_zeroed_page(unsigned int flags)
{
void *p = kzalloc(4096, flags);
return (unsigned long)p;
}
#define PageReserved(page) (0)
#endif
@@ -0,0 +1,29 @@
#ifndef _LINUX_MODULE_H
#define _LINUX_MODULE_H
#define MODULE_LICENSE(x)
#define MODULE_AUTHOR(x)
#define MODULE_DESCRIPTION(x)
#define MODULE_VERSION(x)
#define MODULE_ALIAS(x)
#define MODULE_DEVICE_TABLE(type, name)
#define module_init(x)
#define module_exit(x)
#define THIS_MODULE ((void *)0)
#define EXPORT_SYMBOL(x)
#define EXPORT_SYMBOL_GPL(x)
#define EXPORT_SYMBOL_NS(x, ns)
#define MODULE_PARM_DESC(name, desc)
#define module_param(name, type, perm)
#define MODULE_INFO(tag, info)
typedef struct {
int unused;
} module_t;
#endif
@@ -0,0 +1,23 @@
#ifndef _LINUX_MUTEX_H
#define _LINUX_MUTEX_H
#include <linux/types.h>
struct mutex {
unsigned char __opaque[64];
};
extern void mutex_init(struct mutex *lock);
extern void mutex_lock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
extern int mutex_is_locked(struct mutex *lock);
static inline int mutex_trylock(struct mutex *lock)
{
(void)lock;
return 1;
}
#define DEFINE_MUTEX(name) struct mutex name = { .__opaque = {0} }
#endif
@@ -0,0 +1,71 @@
#ifndef _LINUX_PCI_H
#define _LINUX_PCI_H
#include <linux/types.h>
#include <linux/device.h>
#include <linux/io.h>
#include <stddef.h>
#define PCI_VENDOR_ID_AMD 0x1002U
#define PCI_VENDOR_ID_INTEL 0x8086U
#define PCI_VENDOR_ID_NVIDIA 0x10DEU
#define PCI_ANY_ID (~0U)
struct pci_device_id {
u32 vendor;
u32 device;
u32 subvendor;
u32 subdevice;
u32 class;
u32 class_mask;
unsigned long driver_data;
};
struct pci_dev {
u16 vendor;
u16 device;
u8 bus_number;
u8 dev_number;
u8 func_number;
u8 revision;
u32 irq;
u64 resource_start[6];
u64 resource_len[6];
void *driver_data;
struct device device;
};
struct pci_driver {
const char *name;
const struct pci_device_id *id_table;
int (*probe)(struct pci_dev *dev, const struct pci_device_id *id);
void (*remove)(struct pci_dev *dev);
int (*suspend)(struct pci_dev *dev, u32 state);
int (*resume)(struct pci_dev *dev);
void (*shutdown)(struct pci_dev *dev);
};
extern int pci_enable_device(struct pci_dev *dev);
extern void pci_disable_device(struct pci_dev *dev);
extern void pci_set_master(struct pci_dev *dev);
extern void *pci_iomap(struct pci_dev *dev, unsigned int bar, size_t max_len);
extern void pci_iounmap(struct pci_dev *dev, void *addr, size_t size);
extern int pci_read_config_dword(struct pci_dev *dev, unsigned int offset, u32 *val);
extern int pci_write_config_dword(struct pci_dev *dev, unsigned int offset, u32 val);
extern u64 pci_resource_start(struct pci_dev *dev, unsigned int bar);
extern u64 pci_resource_len(struct pci_dev *dev, unsigned int bar);
extern int pci_register_driver(struct pci_driver *drv);
extern void pci_unregister_driver(struct pci_driver *drv);
#define MODULE_DEVICE_TABLE(type, name)
#define PCI_DEVICE(vend, dev) \
.vendor = (vend), .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
#endif
@@ -0,0 +1,56 @@
#ifndef _LINUX_PRINTK_H
#define _LINUX_PRINTK_H
#include <stdio.h>
#define KERN_SOH "\001"
#define KERN_EMERG KERN_SOH "0"
#define KERN_ALERT KERN_SOH "1"
#define KERN_CRIT KERN_SOH "2"
#define KERN_ERR KERN_SOH "3"
#define KERN_WARNING KERN_SOH "4"
#define KERN_NOTICE KERN_SOH "5"
#define KERN_INFO KERN_SOH "6"
#define KERN_DEBUG KERN_SOH "7"
#define KERN_DEFAULT KERN_SOH "d"
#define pr_info(fmt, ...) \
fprintf(stdout, "[INFO] " fmt "\n", ##__VA_ARGS__)
#define pr_warn(fmt, ...) \
fprintf(stderr, "[WARN] " fmt "\n", ##__VA_ARGS__)
#define pr_err(fmt, ...) \
fprintf(stderr, "[ERR] " fmt "\n", ##__VA_ARGS__)
#define pr_debug(fmt, ...) \
((void)0)
#define pr_emerg(fmt, ...) \
fprintf(stderr, "[EMERG] " fmt "\n", ##__VA_ARGS__)
#define pr_alert(fmt, ...) \
fprintf(stderr, "[ALERT] " fmt "\n", ##__VA_ARGS__)
#define pr_crit(fmt, ...) \
fprintf(stderr, "[CRIT] " fmt "\n", ##__VA_ARGS__)
#define pr_notice(fmt, ...) \
fprintf(stdout, "[NOTE] " fmt "\n", ##__VA_ARGS__)
#define printk(fmt, ...) \
fprintf(stdout, fmt, ##__VA_ARGS__)
#define dev_info(dev, fmt, ...) \
pr_info(fmt, ##__VA_ARGS__)
#define dev_warn(dev, fmt, ...) \
pr_warn(fmt, ##__VA_ARGS__)
#define dev_err(dev, fmt, ...) \
pr_err(fmt, ##__VA_ARGS__)
#define dev_dbg(dev, fmt, ...) \
pr_debug(fmt, ##__VA_ARGS__)
#endif
@@ -0,0 +1,33 @@
#ifndef _LINUX_SLAB_H
#define _LINUX_SLAB_H
#include <linux/types.h>
#include <stddef.h>
#define GFP_KERNEL 0U
#define GFP_ATOMIC 1U
#define GFP_DMA32 2U
#define GFP_HIGHUSER 3U
#define GFP_NOWAIT 4U
#define GFP_DMA 5U
#define __GFP_NOWARN 0U
#define __GFP_ZERO 0U
extern void *kmalloc(size_t size, gfp_t flags);
extern void *kzalloc(size_t size, gfp_t flags);
extern void kfree(const void *ptr);
#define kmalloc_array(n, size, flags) \
kmalloc((n) * (size), flags)
#define kcalloc(n, size, flags) \
kzalloc((n) * (size), flags)
#define kmemdup(src, len, flags) ({ \
void *__p = kmalloc(len, flags); \
if (__p) __builtin_memcpy(__p, src, len); \
__p; \
})
#endif
@@ -0,0 +1,28 @@
#ifndef _LINUX_SPINLOCK_H
#define _LINUX_SPINLOCK_H
#include <linux/types.h>
typedef struct spinlock {
volatile unsigned char __locked;
} spinlock_t;
extern void spin_lock_init(spinlock_t *lock);
extern void spin_lock(spinlock_t *lock);
extern void spin_unlock(spinlock_t *lock);
extern unsigned long spin_lock_irqsave(spinlock_t *lock, unsigned long *flags);
extern void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
static inline void spin_lock_irq(spinlock_t *lock)
{
spin_lock(lock);
}
static inline void spin_unlock_irq(spinlock_t *lock)
{
spin_unlock(lock);
}
#define DEFINE_SPINLOCK(name) spinlock_t name = { .__locked = 0 }
#endif
@@ -0,0 +1,51 @@
#ifndef _LINUX_TIMER_H
#define _LINUX_TIMER_H
#include <linux/types.h>
#include <linux/compiler.h>
struct timer_list {
void (*function)(unsigned long data);
unsigned long data;
unsigned long expires;
unsigned char __opaque[64];
};
static inline void setup_timer(struct timer_list *timer,
void (*function)(unsigned long),
unsigned long data)
{
timer->function = function;
timer->data = data;
timer->expires = 0;
}
static inline int mod_timer(struct timer_list *timer, unsigned long expires)
{
(void)timer;
(void)expires;
return 0;
}
static inline int del_timer(struct timer_list *timer)
{
(void)timer;
return 0;
}
static inline int del_timer_sync(struct timer_list *timer)
{
(void)timer;
return 0;
}
static inline int timer_pending(const struct timer_list *timer)
{
(void)timer;
return 0;
}
#define DEFINE_TIMER(_name, _function, _flags, _data) \
struct timer_list _name = { .function = (_function), .data = (_data) }
#endif
@@ -0,0 +1,29 @@
#ifndef _LINUX_TYPES_H
#define _LINUX_TYPES_H
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <sys/types.h>
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
typedef int8_t s8;
typedef int16_t s16;
typedef int32_t s32;
typedef int64_t s64;
typedef u64 phys_addr_t;
typedef u64 dma_addr_t;
#define __iomem
#define __user
#define __force
#define __must_check
typedef unsigned int gfp_t;
#endif
@@ -0,0 +1,47 @@
#ifndef _LINUX_WAIT_H
#define _LINUX_WAIT_H
#include <linux/types.h>
#include <linux/compiler.h>
struct wait_queue_head {
unsigned char __opaque[128];
};
static inline void init_waitqueue_head(struct wait_queue_head *wq)
{
(void)wq;
}
#define wait_event(wq, condition) \
do { while (!(condition)) { __asm__ volatile("pause"); } } while(0)
#define wait_event_timeout(wq, condition, timeout) \
({ (void)(wq); (condition) ? 1 : 0; })
#define wait_event_interruptible(wq, condition) \
({ (void)(wq); (condition) ? 0 : -512; })
#define wait_event_interruptible_timeout(wq, condition, timeout) \
({ (void)(wq); (condition) ? 1 : 0; })
static inline void wake_up(struct wait_queue_head *wq)
{
(void)wq;
}
static inline void wake_up_interruptible(struct wait_queue_head *wq)
{
(void)wq;
}
#define DEFINE_WAIT(name) \
int name = 0
#define finish_wait(wq, wait) \
do { (void)(wq); (void)(wait); } while(0)
#define prepare_to_wait(wq, wait, state) \
do { (void)(wq); (void)(wait); (void)(state); } while(0)
#endif
@@ -0,0 +1,42 @@
#ifndef _LINUX_WORKQUEUE_H
#define _LINUX_WORKQUEUE_H
#include <linux/types.h>
struct work_struct {
void (*func)(struct work_struct *work);
unsigned char __opaque[64];
};
struct delayed_work {
struct work_struct work;
unsigned char __timer_opaque[64];
};
struct workqueue_struct {
unsigned char __opaque[128];
};
typedef void (*work_func_t)(struct work_struct *work);
extern struct workqueue_struct *alloc_workqueue(const char *name,
unsigned int flags,
int max_active);
extern void destroy_workqueue(struct workqueue_struct *wq);
extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
extern void flush_workqueue(struct workqueue_struct *wq);
#define INIT_WORK(_work, _func) \
do { (_work)->func = (_func); } while(0)
#define INIT_DELAYED_WORK(_work, _func) \
do { (_work)->work.func = (_func); } while(0)
extern int schedule_work(struct work_struct *work);
extern int schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
extern void flush_scheduled_work(void);
#define create_singlethread_workqueue(name) alloc_workqueue(name, 0, 1)
#define create_workqueue(name) alloc_workqueue(name, 0, 0)
#endif
@@ -0,0 +1,14 @@
#![doc = "Linux Kernel API compatibility layer for Redox OS (LinuxKPI-style).\n\nProvides C headers and Rust FFI implementations that translate Linux kernel APIs\nto Redox OS primitives, enabling porting of Linux C drivers as Redox userspace daemons."]
pub mod rust_impl;
pub use rust_impl::device;
pub use rust_impl::dma;
pub use rust_impl::drm_shim;
pub use rust_impl::firmware;
pub use rust_impl::io;
pub use rust_impl::irq;
pub use rust_impl::memory;
pub use rust_impl::pci;
pub use rust_impl::sync;
pub use rust_impl::workqueue;
@@ -0,0 +1,103 @@
use std::alloc::Layout;
use std::collections::HashMap;
use std::sync::Mutex;
const GFP_DMA32: u32 = 2;
/// Wrapper to make raw pointers `Send`, required because `DEVRES_MAP` is a
/// global `Mutex` (which needs `T: Send`). Raw pointers are not `Send` by
/// default since the compiler can't prove thread-safety. Here each `(ptr,
/// Layout)` pair is exclusively owned by the device that allocated it — only
/// freed via `devm_kfree` or `devres_free_all` — so sending across threads is
/// safe.
struct TrackedAlloc(*mut u8, Layout);
unsafe impl Send for TrackedAlloc {}
lazy_static::lazy_static! {
static ref DEVRES_MAP: Mutex<HashMap<usize, Vec<TrackedAlloc>>> =
Mutex::new(HashMap::new());
}
fn align_up(size: usize, align: usize) -> usize {
(size + align - 1) & !(align - 1)
}
fn tracked_layout(size: usize, flags: u32) -> Option<Layout> {
if size == 0 {
return None;
}
if flags & GFP_DMA32 != 0 {
return Layout::from_size_align(size, 4096).ok();
}
let aligned_size = align_up(size, 16);
Layout::from_size_align(aligned_size, 16).ok()
}
#[no_mangle]
pub extern "C" fn devm_kzalloc(dev: *mut u8, size: usize, flags: u32) -> *mut u8 {
let ptr = super::memory::kzalloc(size, flags);
if ptr.is_null() || dev.is_null() {
return ptr;
}
let layout = match tracked_layout(size, flags) {
Some(layout) => layout,
None => return ptr,
};
if let Ok(mut devres_map) = DEVRES_MAP.lock() {
devres_map
.entry(dev as usize)
.or_default()
.push(TrackedAlloc(ptr, layout));
}
ptr
}
#[no_mangle]
pub extern "C" fn devm_kfree(dev: *mut u8, ptr: *mut u8) {
if ptr.is_null() {
return;
}
if !dev.is_null() {
if let Ok(mut devres_map) = DEVRES_MAP.lock() {
let dev_key = dev as usize;
let should_remove = if let Some(entries) = devres_map.get_mut(&dev_key) {
if let Some(index) = entries.iter().position(|alloc| alloc.0 == ptr) {
entries.swap_remove(index);
}
entries.is_empty()
} else {
false
};
if should_remove {
devres_map.remove(&dev_key);
}
}
}
super::memory::kfree(ptr);
}
#[no_mangle]
pub extern "C" fn devres_free_all(dev: *mut u8) {
if dev.is_null() {
return;
}
let allocations = match DEVRES_MAP.lock() {
Ok(mut devres_map) => devres_map.remove(&(dev as usize)),
Err(_) => None,
};
if let Some(allocations) = allocations {
for alloc in allocations {
super::memory::kfree(alloc.0);
}
}
}
@@ -0,0 +1,93 @@
use std::alloc::{alloc_zeroed, dealloc, Layout};
use std::ptr;
use syscall::CallFlags;
lazy_static::lazy_static! {
static ref TRANSLATION_FD: Option<usize> = {
libredox::call::open("/scheme/memory/translation",
syscall::flag::O_CLOEXEC as i32, 0)
.ok()
.map(|fd| fd)
};
}
fn virt_to_phys(virt: usize) -> usize {
let raw = match *TRANSLATION_FD {
Some(fd) => fd,
None => return 0,
};
let mut buf = virt.to_ne_bytes();
let _ = libredox::call::call_ro(raw, &mut buf, CallFlags::empty(), &[]);
usize::from_ne_bytes(buf)
}
#[no_mangle]
pub extern "C" fn dma_alloc_coherent(
_dev: *mut u8,
size: usize,
dma_handle: *mut u64,
_flags: u32,
) -> *mut u8 {
if size == 0 || dma_handle.is_null() {
return ptr::null_mut();
}
let layout = match Layout::from_size_align(size, 4096) {
Ok(l) => l,
Err(_) => return ptr::null_mut(),
};
let vaddr = unsafe { alloc_zeroed(layout) };
if vaddr.is_null() {
return ptr::null_mut();
}
let phys = virt_to_phys(vaddr as usize);
if phys == 0 {
unsafe { dealloc(vaddr, layout) };
return ptr::null_mut();
}
unsafe { *dma_handle = phys as u64 };
log::debug!(
"dma_alloc_coherent: {} bytes at virt={:#x} phys={:#x}",
size,
vaddr as usize,
phys
);
vaddr
}
#[no_mangle]
pub extern "C" fn dma_free_coherent(_dev: *mut u8, size: usize, vaddr: *mut u8, _dma_handle: u64) {
if vaddr.is_null() || size == 0 {
return;
}
let layout = match Layout::from_size_align(size, 4096) {
Ok(l) => l,
Err(_) => return,
};
unsafe { dealloc(vaddr, layout) };
}
#[no_mangle]
pub extern "C" fn dma_map_single(_dev: *mut u8, ptr: *mut u8, _size: usize, _dir: u32) -> u64 {
if ptr.is_null() {
return 0;
}
virt_to_phys(ptr as usize) as u64
}
#[no_mangle]
pub extern "C" fn dma_unmap_single(_dev: *mut u8, _addr: u64, _size: usize, _dir: u32) {}
#[no_mangle]
pub extern "C" fn dma_set_mask(_dev: *mut u8, _mask: u64) -> i32 {
0
}
#[no_mangle]
pub extern "C" fn dma_set_coherent_mask(_dev: *mut u8, _mask: u64) -> i32 {
0
}
@@ -0,0 +1,265 @@
use std::collections::{BTreeMap, HashMap};
use std::ptr;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Mutex;
static NEXT_GEM_HANDLE: AtomicU32 = AtomicU32::new(1);
#[repr(C)]
struct CallerGemObject {
dev: *mut u8,
handle_count: u32,
_pad: u32,
size: usize,
driver_private: *mut u8,
}
unsafe fn write_handle_count(obj: *mut u8, count: u32) {
let cobj = obj as *mut CallerGemObject;
unsafe {
(*cobj).handle_count = count;
}
}
unsafe fn write_size(obj: *mut u8, size: usize) {
let cobj = obj as *mut CallerGemObject;
unsafe {
(*cobj).size = size;
}
}
struct ObjectState {
size: usize,
handle_count: u32,
handles: Vec<u32>,
}
static OBJECTS: Mutex<Option<HashMap<usize, ObjectState>>> = Mutex::new(None);
static HANDLES: Mutex<Option<BTreeMap<u32, usize>>> = Mutex::new(None);
fn with_objects<F, R>(f: F) -> R
where
F: FnOnce(&mut HashMap<usize, ObjectState>) -> R,
{
let mut guard = OBJECTS.lock().unwrap_or_else(|e| e.into_inner());
if guard.is_none() {
*guard = Some(HashMap::new());
}
f(guard.as_mut().unwrap())
}
fn with_handles<F, R>(f: F) -> R
where
F: FnOnce(&mut BTreeMap<u32, usize>) -> R,
{
let mut guard = HANDLES.lock().unwrap_or_else(|e| e.into_inner());
if guard.is_none() {
*guard = Some(BTreeMap::new());
}
f(guard.as_mut().unwrap())
}
fn next_gem_handle() -> u32 {
NEXT_GEM_HANDLE.fetch_add(1, Ordering::Relaxed)
}
#[no_mangle]
pub extern "C" fn drm_dev_register(_dev: *mut u8, _flags: u64) -> i32 {
0
}
#[no_mangle]
pub extern "C" fn drm_dev_unregister(_dev: *mut u8) {}
#[no_mangle]
pub extern "C" fn drm_gem_object_init(_dev: *mut u8, obj: *mut u8, size: usize) -> i32 {
let key = obj as usize;
unsafe {
write_size(obj, size);
write_handle_count(obj, 0);
}
with_objects(|objects| {
objects.insert(
key,
ObjectState {
size,
handle_count: 0,
handles: Vec::new(),
},
);
});
log::debug!("drm_gem_object_init: obj={:#x} size={}", key, size);
0
}
#[no_mangle]
pub extern "C" fn drm_gem_object_release(obj: *mut u8) {
let key = obj as usize;
with_objects(|objects| {
if let Some(state) = objects.remove(&key) {
for h in &state.handles {
with_handles(|handles| {
handles.remove(h);
});
}
log::debug!(
"drm_gem_object_release: obj={:#x} handles_dropped={}",
key,
state.handles.len()
);
}
});
}
#[no_mangle]
pub extern "C" fn drm_gem_handle_create(_file: *mut u8, obj: *mut u8, handlep: *mut u32) -> i32 {
if handlep.is_null() {
return -22;
}
let key = obj as usize;
let handle = with_objects(|objects| match objects.get_mut(&key) {
Some(state) => {
let handle = next_gem_handle();
state.handle_count += 1;
unsafe {
write_handle_count(obj, state.handle_count);
}
state.handles.push(handle);
Some(handle)
}
None => {
log::error!(
"drm_gem_handle_create: obj={:#x} not initialized (drm_gem_object_init not called)",
key
);
None
}
});
let handle = match handle {
Some(h) => h,
None => return -22,
};
with_handles(|handles| {
handles.insert(handle, key);
});
unsafe { *handlep = handle };
log::debug!("drm_gem_handle_create: handle={} obj={:#x}", handle, key);
0
}
#[no_mangle]
pub extern "C" fn drm_gem_handle_delete(_file: *mut u8, handle: u32) {
let obj_key = with_handles(|handles| handles.remove(&handle));
if let Some(key) = obj_key {
with_objects(|objects| {
if let Some(state) = objects.get_mut(&key) {
state.handles.retain(|h| *h != handle);
state.handle_count = state.handle_count.saturating_sub(1);
unsafe {
write_handle_count(key as *mut u8, state.handle_count);
}
}
});
}
log::debug!("drm_gem_handle_delete: handle={}", handle);
}
#[no_mangle]
pub extern "C" fn drm_gem_handle_lookup(_file: *mut u8, handle: u32) -> *mut u8 {
let obj_key = with_handles(|handles| handles.get(&handle).copied());
match obj_key {
Some(key) => {
let found = with_objects(|objects| objects.contains_key(&key));
if found {
key as *mut u8
} else {
log::warn!(
"drm_gem_handle_lookup: handle={} maps to obj={:#x} but object released",
handle,
key
);
ptr::null_mut()
}
}
None => {
log::warn!("drm_gem_handle_lookup: handle={} not found", handle);
ptr::null_mut()
}
}
}
#[no_mangle]
pub extern "C" fn drm_gem_object_lookup(_file: *mut u8, handle: u32) -> *mut u8 {
let obj_key = with_handles(|handles| handles.get(&handle).copied());
match obj_key {
Some(key) => {
let found = with_objects(|objects| {
if let Some(state) = objects.get_mut(&key) {
state.handle_count += 1;
unsafe {
write_handle_count(key as *mut u8, state.handle_count);
}
true
} else {
false
}
});
if found {
key as *mut u8
} else {
log::warn!(
"drm_gem_object_lookup: handle={} maps to obj={:#x} but object released",
handle,
key
);
ptr::null_mut()
}
}
None => {
log::warn!("drm_gem_object_lookup: handle={} not found", handle);
ptr::null_mut()
}
}
}
#[no_mangle]
pub extern "C" fn drm_gem_object_put(obj: *mut u8) {
if obj.is_null() {
return;
}
let key = obj as usize;
with_objects(|objects| {
if let Some(state) = objects.get_mut(&key) {
state.handle_count = state.handle_count.saturating_sub(1);
unsafe {
write_handle_count(obj, state.handle_count);
}
}
});
}
#[no_mangle]
pub extern "C" fn drm_ioctl(_dev: *mut u8, cmd: u32, _data: *mut u8, _file: *mut u8) -> i32 {
log::trace!("drm_ioctl: cmd={:#x}", cmd);
0
}
#[no_mangle]
pub extern "C" fn drm_mode_config_reset(_dev: *mut u8) {}
#[no_mangle]
pub extern "C" fn drm_connector_register(_connector: *mut u8) -> i32 {
0
}
#[no_mangle]
pub extern "C" fn drm_crtc_handle_vblank(_crtc: *mut u8) -> u32 {
0
}
@@ -0,0 +1,95 @@
use std::ptr;
#[repr(C)]
pub struct Firmware {
pub size: usize,
pub data: *const u8,
}
impl Default for Firmware {
fn default() -> Self {
Firmware {
size: 0,
data: ptr::null(),
}
}
}
impl Drop for Firmware {
fn drop(&mut self) {
if !self.data.is_null() && self.size > 0 {
let layout = match std::alloc::Layout::from_size_align(self.size, 1) {
Ok(l) => l,
Err(_) => return,
};
unsafe { std::alloc::dealloc(self.data as *mut u8, layout) };
self.data = ptr::null();
self.size = 0;
}
}
}
#[no_mangle]
pub extern "C" fn request_firmware(fw: *mut *mut Firmware, name: *const u8, _dev: *mut u8) -> i32 {
if fw.is_null() || name.is_null() {
return -22;
}
let name_str = unsafe {
let len = {
let mut l = 0;
while *name.add(l) != 0 {
l += 1;
}
l
};
let slice = std::slice::from_raw_parts(name, len);
match std::str::from_utf8(slice) {
Ok(s) => s,
Err(_) => return -22,
}
};
let firmware_path = format!("/scheme/firmware/{}", name_str);
log::info!(
"request_firmware: loading '{}' via {}",
name_str,
firmware_path
);
let data = match std::fs::read(&firmware_path) {
Ok(d) => d,
Err(e) => {
log::error!("request_firmware: failed to load '{}': {}", name_str, e);
return -2;
}
};
let size = data.len();
let layout = match std::alloc::Layout::from_size_align(size, 1) {
Ok(l) => l,
Err(_) => return -12,
};
let ptr = unsafe { std::alloc::alloc(layout) };
if ptr.is_null() {
return -12;
}
unsafe { ptr::copy_nonoverlapping(data.as_ptr(), ptr, size) };
let firmware = Box::new(Firmware {
size,
data: ptr as *const u8,
});
unsafe { *fw = Box::into_raw(firmware) };
log::info!("request_firmware: loaded {} bytes for '{}'", size, name_str);
0
}
#[no_mangle]
pub extern "C" fn release_firmware(fw: *mut Firmware) {
if fw.is_null() {
return;
}
unsafe { drop(Box::from_raw(fw)) };
}
@@ -0,0 +1,151 @@
use std::collections::HashMap;
use std::ptr;
const EINVAL: i32 = 22;
const ENOSPC: i32 = 28;
#[repr(C)]
pub struct Idr {
map: HashMap<u32, usize>,
next_id: u32,
}
#[no_mangle]
pub extern "C" fn idr_init(idr: *mut Idr) {
if idr.is_null() {
return;
}
unsafe {
ptr::write(
idr,
Idr {
map: HashMap::new(),
next_id: 0,
},
);
}
}
fn normalize_id(value: i32) -> Option<u32> {
if value < 0 {
None
} else {
Some(value as u32)
}
}
#[no_mangle]
pub extern "C" fn idr_alloc(idr: *mut Idr, ptr: *mut u8, start: i32, end: i32, _gfp: u32) -> i32 {
if idr.is_null() {
return -EINVAL;
}
let start = match normalize_id(start) {
Some(start) => start,
None => return -EINVAL,
};
let end = match end {
0 => None,
value if value > 0 => Some(value as u32),
_ => return -EINVAL,
};
if let Some(end) = end {
if start >= end {
return -EINVAL;
}
}
let idr_ref = unsafe { &mut *idr };
let initial = idr_ref.next_id.max(start);
if let Some(end) = end {
for candidate in initial..end {
if let std::collections::hash_map::Entry::Vacant(entry) = idr_ref.map.entry(candidate) {
entry.insert(ptr as usize);
idr_ref.next_id = candidate.saturating_add(1);
if idr_ref.next_id >= end {
idr_ref.next_id = start;
}
return candidate as i32;
}
}
for candidate in start..initial {
if let std::collections::hash_map::Entry::Vacant(entry) = idr_ref.map.entry(candidate) {
entry.insert(ptr as usize);
idr_ref.next_id = candidate.saturating_add(1);
if idr_ref.next_id >= end {
idr_ref.next_id = start;
}
return candidate as i32;
}
}
return -ENOSPC;
}
for candidate in initial..=u32::MAX {
if let std::collections::hash_map::Entry::Vacant(entry) = idr_ref.map.entry(candidate) {
entry.insert(ptr as usize);
idr_ref.next_id = if candidate == u32::MAX {
start
} else {
candidate.saturating_add(1).max(start)
};
return candidate as i32;
}
}
for candidate in start..initial {
if let std::collections::hash_map::Entry::Vacant(entry) = idr_ref.map.entry(candidate) {
entry.insert(ptr as usize);
idr_ref.next_id = if candidate == u32::MAX {
start
} else {
candidate.saturating_add(1).max(start)
};
return candidate as i32;
}
}
-ENOSPC
}
#[no_mangle]
pub extern "C" fn idr_find(idr: *mut Idr, id: u32) -> *mut u8 {
if idr.is_null() {
return ptr::null_mut();
}
let idr_ref = unsafe { &*idr };
match idr_ref.map.get(&id) {
Some(value) => *value as *mut u8,
None => ptr::null_mut(),
}
}
#[no_mangle]
pub extern "C" fn idr_remove(idr: *mut Idr, id: u32) {
if idr.is_null() {
return;
}
let idr_ref = unsafe { &mut *idr };
idr_ref.map.remove(&id);
if id < idr_ref.next_id {
idr_ref.next_id = id;
}
}
#[no_mangle]
pub extern "C" fn idr_destroy(idr: *mut Idr) {
if idr.is_null() {
return;
}
let idr_ref = unsafe { &mut *idr };
idr_ref.map.clear();
idr_ref.next_id = 0;
}
@@ -0,0 +1,126 @@
use std::collections::HashMap;
use std::ptr;
use std::sync::Mutex;
type PhysAddr = u64;
struct MappedRegion {
size: usize,
}
lazy_static::lazy_static! {
static ref MMIO_MAP_TRACKER: Mutex<HashMap<usize, MappedRegion>> = Mutex::new(HashMap::new());
}
#[no_mangle]
pub extern "C" fn ioremap(phys: PhysAddr, size: usize) -> *mut u8 {
if size == 0 || phys == 0 {
return ptr::null_mut();
}
log::info!(
"ioremap(phys={:#x}, size={}) — mapping via scheme:memory",
phys,
size
);
let ptr = match redox_driver_sys::memory::MmioRegion::map(
phys,
size,
redox_driver_sys::memory::CacheType::DeviceMemory,
redox_driver_sys::memory::MmioProt::READ_WRITE,
) {
Ok(region) => {
let p = region.as_ptr() as *mut u8;
let s = region.size();
if let Ok(mut tracker) = MMIO_MAP_TRACKER.lock() {
tracker.insert(p as usize, MappedRegion { size: s });
}
std::mem::forget(region);
p
}
Err(e) => {
log::error!("ioremap: failed to map {:#x}+{:#x}: {:?}", phys, size, e);
ptr::null_mut()
}
};
ptr
}
#[no_mangle]
pub extern "C" fn iounmap(addr: *mut u8, size: usize) {
if addr.is_null() || size == 0 {
return;
}
if let Ok(mut tracker) = MMIO_MAP_TRACKER.lock() {
if let Some(region) = tracker.remove(&(addr as usize)) {
let _ = unsafe { libredox::call::munmap(addr as *mut (), region.size) };
}
}
}
#[no_mangle]
pub extern "C" fn readl(addr: *const u8) -> u32 {
if addr.is_null() {
return 0;
}
unsafe { ptr::read_volatile(addr as *const u32) }
}
#[no_mangle]
pub extern "C" fn writel(val: u32, addr: *mut u8) {
if addr.is_null() {
return;
}
unsafe { ptr::write_volatile(addr as *mut u32, val) };
}
#[no_mangle]
pub extern "C" fn readq(addr: *const u8) -> u64 {
if addr.is_null() {
return 0;
}
unsafe { ptr::read_volatile(addr as *const u64) }
}
#[no_mangle]
pub extern "C" fn writeq(val: u64, addr: *mut u8) {
if addr.is_null() {
return;
}
unsafe { ptr::write_volatile(addr as *mut u64, val) };
}
#[no_mangle]
pub extern "C" fn readb(addr: *const u8) -> u8 {
if addr.is_null() {
return 0;
}
unsafe { ptr::read_volatile(addr) }
}
#[no_mangle]
pub extern "C" fn writeb(val: u8, addr: *mut u8) {
if addr.is_null() {
return;
}
unsafe { ptr::write_volatile(addr, val) };
}
#[no_mangle]
pub extern "C" fn readw(addr: *const u8) -> u16 {
if addr.is_null() {
return 0;
}
unsafe { ptr::read_volatile(addr as *const u16) }
}
#[no_mangle]
pub extern "C" fn writew(val: u16, addr: *mut u8) {
if addr.is_null() {
return;
}
unsafe { ptr::write_volatile(addr as *mut u16, val) };
}
@@ -0,0 +1,126 @@
use std::collections::HashMap;
use std::fs::File;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
struct SendU8Ptr(*mut u8);
impl SendU8Ptr {
fn as_ptr(&self) -> *mut u8 {
self.0
}
}
unsafe impl Send for SendU8Ptr {}
pub type IrqHandler = extern "C" fn(i32, *mut u8) -> u32;
struct IrqEntry {
cancel: Arc<AtomicBool>,
fd: Option<File>,
handle: Option<std::thread::JoinHandle<()>>,
}
lazy_static::lazy_static! {
static ref IRQ_TABLE: Mutex<HashMap<u32, IrqEntry>> = Mutex::new(HashMap::new());
}
#[no_mangle]
pub extern "C" fn request_irq(
irq: u32,
handler: IrqHandler,
_flags: u32,
_name: *const u8,
dev_id: *mut u8,
) -> i32 {
let path = format!("/scheme/irq/{}", irq);
let fd = match std::fs::File::open(&path) {
Ok(f) => f,
Err(e) => {
log::error!("request_irq: failed to open {} : {}", path, e);
return -22;
}
};
let thread_fd = match fd.try_clone() {
Ok(f) => f,
Err(e) => {
log::error!("request_irq: failed to clone {} : {}", path, e);
return -22;
}
};
let cancel = Arc::new(AtomicBool::new(false));
let cancel_clone = Arc::clone(&cancel);
let send_dev_id = SendU8Ptr(dev_id);
let handle = std::thread::spawn(move || {
use std::io::Read;
let mut fd = thread_fd;
let mut buf = [0u8; 8];
loop {
if cancel_clone.load(Ordering::Acquire) {
break;
}
match fd.read(&mut buf) {
Ok(0) | Err(_) => break,
Ok(_) => {
if cancel_clone.load(Ordering::Acquire) {
break;
}
handler(irq as i32, send_dev_id.as_ptr());
}
}
}
});
let entry = IrqEntry {
cancel: Arc::clone(&cancel),
fd: Some(fd),
handle: Some(handle),
};
if let Ok(mut table) = IRQ_TABLE.lock() {
table.insert(irq, entry);
} else {
cancel.store(true, Ordering::Release);
let mut entry = entry;
let _ = entry.fd.take();
if let Some(handle) = entry.handle.take() {
let _ = handle.join();
}
log::error!("request_irq: failed to record handler for IRQ {}", irq);
return -22;
}
log::info!("request_irq: registered handler for IRQ {}", irq);
0
}
#[no_mangle]
pub extern "C" fn free_irq(irq: u32, _dev_id: *mut u8) {
let entry = if let Ok(mut table) = IRQ_TABLE.lock() {
let mut entry = table.remove(&irq);
if let Some(ref mut entry_ref) = entry {
entry_ref.cancel.store(true, Ordering::Release);
let _ = entry_ref.fd.take();
}
entry
} else {
None
};
if let Some(mut entry) = entry {
if let Some(handle) = entry.handle.take() {
let _ = handle.join();
}
}
log::info!("free_irq: released IRQ {}", irq);
}
#[no_mangle]
pub extern "C" fn enable_irq(_irq: u32) {}
#[no_mangle]
pub extern "C" fn disable_irq(_irq: u32) {}
@@ -0,0 +1,253 @@
use std::alloc::{alloc_zeroed, dealloc, Layout};
use std::collections::HashMap;
use std::ptr;
use std::sync::Mutex;
use syscall::{flag, CallFlags};
struct SendU8Ptr(*mut u8);
impl SendU8Ptr {
#[allow(dead_code)]
fn as_ptr(&self) -> *mut u8 {
self.0
}
}
unsafe impl Send for SendU8Ptr {}
impl PartialEq for SendU8Ptr {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Eq for SendU8Ptr {}
impl std::hash::Hash for SendU8Ptr {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
(self.0 as usize).hash(state);
}
}
lazy_static::lazy_static! {
static ref ALLOC_TRACKER: Mutex<HashMap<SendU8Ptr, Layout>> = Mutex::new(HashMap::new());
static ref DMA32_TRACKER: Mutex<HashMap<SendU8Ptr, Layout>> = Mutex::new(HashMap::new());
}
fn align_up(size: usize, align: usize) -> usize {
(size + align - 1) & !(align - 1)
}
/// Translate virtual address to physical address via scheme:memory/translation.
/// Returns 0 on failure.
fn virt_to_phys(virt: usize) -> usize {
let fd = match libredox::Fd::open("/scheme/memory/translation", flag::O_CLOEXEC as i32, 0) {
Ok(f) => f,
Err(_) => return 0,
};
let mut buf = virt.to_ne_bytes();
let _ = libredox::call::call_ro(fd.raw(), &mut buf, CallFlags::empty(), &[]);
usize::from_ne_bytes(buf)
}
const GFP_DMA32_RETRIES: usize = 8;
const DMA32_LIMIT: u64 = 0x1_0000_0000;
/// Allocate memory with physical address below 4GB (GFP_DMA32).
/// Tries up to GFP_DMA32_RETRIES allocations; if none land below 4GB,
/// returns null rather than giving a buffer the device can't DMA to.
fn dma32_alloc(size: usize) -> *mut u8 {
let layout = match Layout::from_size_align(size, 4096) {
Ok(l) => l,
Err(_) => return ptr::null_mut(),
};
for attempt in 0..GFP_DMA32_RETRIES {
let candidate = unsafe { alloc_zeroed(layout) };
if candidate.is_null() {
return ptr::null_mut();
}
let phys = virt_to_phys(candidate as usize);
if phys == 0 {
log::warn!(
"dma32_alloc: virt_to_phys failed for {:#x}",
candidate as usize
);
unsafe { dealloc(candidate, layout) };
continue;
}
if phys as u64 >= DMA32_LIMIT {
log::debug!(
"dma32_alloc: attempt {} phys={:#x} >= 4GB, retrying",
attempt,
phys
);
unsafe { dealloc(candidate, layout) };
continue;
}
log::debug!(
"dma32_alloc: {} bytes at virt={:#x} phys={:#x} (< 4GB)",
size,
candidate as usize,
phys
);
if let Ok(mut tracker) = DMA32_TRACKER.lock() {
tracker.insert(SendU8Ptr(candidate), layout);
} else {
unsafe { dealloc(candidate, layout) };
return ptr::null_mut();
}
return candidate;
}
log::warn!(
"dma32_alloc: failed to get <4GB physical address after {} retries for {} bytes",
GFP_DMA32_RETRIES,
size
);
ptr::null_mut()
}
const GFP_KERNEL: u32 = 0;
const GFP_ATOMIC: u32 = 1;
const GFP_DMA32: u32 = 2;
#[no_mangle]
/// Allocate kernel memory.
/// GFP_DMA32 flag routes through a dedicated path with physical address verification
/// to ensure allocations are suitable for devices with 32-bit DMA limitations.
pub extern "C" fn kmalloc(size: usize, flags: u32) -> *mut u8 {
if size == 0 {
return ptr::null_mut();
}
// Handle GFP_DMA32 allocations via dedicated path
if flags & GFP_DMA32 != 0 {
return dma32_alloc(size);
}
let aligned_size = align_up(size, 16);
let layout = match Layout::from_size_align(aligned_size, 16) {
Ok(l) => l,
Err(_) => return ptr::null_mut(),
};
let ptr = unsafe { alloc_zeroed(layout) };
if ptr.is_null() {
return ptr::null_mut();
}
if let Ok(mut tracker) = ALLOC_TRACKER.lock() {
tracker.insert(SendU8Ptr(ptr), layout);
}
ptr
}
#[no_mangle]
pub extern "C" fn kzalloc(size: usize, flags: u32) -> *mut u8 {
let ptr = kmalloc(size, flags);
if !ptr.is_null() {
unsafe { ptr::write_bytes(ptr, 0, size) };
}
ptr
}
#[no_mangle]
pub extern "C" fn kfree(ptr: *const u8) {
if ptr.is_null() {
return;
}
// Check DMA32 tracker first
{
let mut dma32_tracker = match DMA32_TRACKER.lock() {
Ok(t) => t,
Err(_) => return,
};
if let Some(layout) = dma32_tracker.remove(&SendU8Ptr(ptr as *mut u8)) {
unsafe { dealloc(ptr as *mut u8, layout) };
return;
}
}
// Check regular allocator tracker
let layout = {
let mut tracker = match ALLOC_TRACKER.lock() {
Ok(t) => t,
Err(_) => return,
};
match tracker.remove(&SendU8Ptr(ptr as *mut u8)) {
Some(l) => l,
None => return,
}
};
unsafe { dealloc(ptr as *mut u8, layout) };
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_kmalloc_basic() {
let p = kmalloc(64, GFP_KERNEL);
assert!(!p.is_null());
kfree(p);
}
#[test]
fn test_kzalloc_zeroed() {
let p = kzalloc(64, GFP_KERNEL);
assert!(!p.is_null());
for i in 0..64 {
assert_eq!(unsafe { *p.add(i) }, 0);
}
kfree(p);
}
#[test]
fn test_kfree_null() {
kfree(ptr::null());
}
#[test]
fn test_kmalloc_zero_size() {
assert!(kmalloc(0, GFP_KERNEL).is_null());
}
#[test]
fn test_kmalloc_dma32_basic() {
let p = kmalloc(64, GFP_DMA32);
assert!(!p.is_null(), "GFP_DMA32 allocation should succeed");
kfree(p);
}
#[test]
fn test_kmalloc_dma32_zero_size() {
assert!(
kmalloc(0, GFP_DMA32).is_null(),
"GFP_DMA32 with size 0 should return null"
);
}
#[test]
fn test_kfree_dma32_null() {
// kfree(null) should not crash
kfree(ptr::null());
}
#[test]
fn test_kmalloc_dma32_multiple() {
// Allocate and free multiple DMA32 buffers
let p1 = kmalloc(128, GFP_DMA32);
let p2 = kmalloc(256, GFP_DMA32);
assert!(!p1.is_null());
assert!(!p2.is_null());
kfree(p1);
kfree(p2);
}
}
@@ -0,0 +1,13 @@
pub mod device;
pub mod dma;
pub mod drm_shim;
pub mod firmware;
pub mod idr;
pub mod io;
pub mod irq;
pub mod memory;
pub mod pci;
pub mod sync;
pub mod timer;
pub mod wait;
pub mod workqueue;
@@ -0,0 +1,443 @@
use std::os::raw::c_ulong;
use std::ptr;
use std::sync::Mutex;
use redox_driver_sys::pci::{
enumerate_pci_class, PciDevice, PciDeviceInfo, PciLocation, PCI_CLASS_DISPLAY,
};
const EINVAL: i32 = 22;
const ENODEV: i32 = 19;
const EIO: i32 = 5;
const PCI_ANY_ID: u32 = !0;
#[repr(C)]
#[derive(Default)]
pub struct Device {
driver: *mut u8,
driver_data: *mut u8,
platform_data: *mut u8,
of_node: *mut u8,
dma_mask: u64,
}
#[repr(C)]
pub struct PciDev {
pub vendor: u16,
pub device: u16,
bus: u8,
dev: u8,
func: u8,
revision: u8,
irq: u32,
bars: [u64; 6],
bar_sizes: [u64; 6],
driver_data: *mut u8,
device_obj: Device,
pub enabled: bool,
}
#[repr(C)]
pub struct PciDeviceId {
vendor: u32,
device: u32,
subvendor: u32,
subdevice: u32,
class: u32,
class_mask: u32,
driver_data: c_ulong,
}
impl Default for PciDev {
fn default() -> Self {
PciDev {
vendor: 0,
device: 0,
bus: 0,
dev: 0,
func: 0,
revision: 0,
irq: 0,
bars: [0; 6],
bar_sizes: [0; 6],
driver_data: ptr::null_mut(),
device_obj: Device::default(),
enabled: false,
}
}
}
#[derive(Clone, Copy, Debug)]
struct CurrentDevice {
location: PciLocation,
ptr: usize,
}
lazy_static::lazy_static! {
static ref CURRENT_DEVICE: Mutex<Option<CurrentDevice>> = Mutex::new(None);
static ref REGISTERED_PROBE: Mutex<Option<PciDriverProbe>> = Mutex::new(None);
}
pub const PCI_VENDOR_ID_AMD: u16 = 0x1002;
pub const PCI_VENDOR_ID_INTEL: u16 = 0x8086;
fn current_location_from_state(dev: *mut PciDev) -> Result<PciLocation, i32> {
if let Ok(state) = CURRENT_DEVICE.lock() {
if let Some(current) = *state {
return Ok(current.location);
}
}
if dev.is_null() {
return Err(-EINVAL);
}
Ok(PciLocation {
segment: 0,
bus: unsafe { (*dev).bus },
device: unsafe { (*dev).dev },
function: unsafe { (*dev).func },
})
}
fn open_current_device(dev: *mut PciDev) -> Result<PciDevice, i32> {
let location = current_location_from_state(dev)?;
PciDevice::open_location(&location).map_err(|error| {
log::warn!("pci: failed to open PCI device {}: {}", location, error);
-ENODEV
})
}
fn matches_id(info: &PciDeviceInfo, id: &PciDeviceId) -> bool {
let class =
((info.class_code as u32) << 16) | ((info.subclass as u32) << 8) | info.prog_if as u32;
let vendor_matches = id.vendor == PCI_ANY_ID || id.vendor == info.vendor_id as u32;
let device_matches = id.device == PCI_ANY_ID || id.device == info.device_id as u32;
let subvendor_matches = id.subvendor == PCI_ANY_ID;
let subdevice_matches = id.subdevice == PCI_ANY_ID;
let class_matches = id.class_mask == 0 || (class & id.class_mask) == (id.class & id.class_mask);
vendor_matches && device_matches && subvendor_matches && subdevice_matches && class_matches
}
fn matching_id_entry(
info: &PciDeviceInfo,
mut id: *const PciDeviceId,
) -> Option<*const PciDeviceId> {
if id.is_null() {
return None;
}
loop {
let current = unsafe { &*id };
if current.vendor == 0
&& current.device == 0
&& current.subvendor == 0
&& current.subdevice == 0
&& current.class == 0
&& current.class_mask == 0
&& current.driver_data == 0
{
return None;
}
if matches_id(info, current) {
return Some(id);
}
id = unsafe { id.add(1) };
}
}
fn build_pci_dev(info: &PciDeviceInfo, id: &PciDeviceId) -> PciDev {
let mut dev = PciDev {
vendor: info.vendor_id,
device: info.device_id,
bus: info.location.bus,
dev: info.location.device,
func: info.location.function,
revision: info.revision,
irq: info.irq.unwrap_or(0),
bars: [0; 6],
bar_sizes: [0; 6],
driver_data: id.driver_data as usize as *mut u8,
device_obj: Device::default(),
enabled: false,
};
for bar in &info.bars {
if bar.index < dev.bars.len() {
dev.bars[bar.index] = bar.addr;
dev.bar_sizes[bar.index] = bar.size;
}
}
dev
}
fn replace_current_device(location: PciLocation, dev_ptr: *mut PciDev) {
if let Ok(mut state) = CURRENT_DEVICE.lock() {
if let Some(previous) = state.replace(CurrentDevice {
location,
ptr: dev_ptr as usize,
}) {
unsafe { drop(Box::from_raw(previous.ptr as *mut PciDev)) };
}
}
}
fn clear_current_device() {
if let Ok(mut state) = CURRENT_DEVICE.lock() {
if let Some(previous) = state.take() {
unsafe { drop(Box::from_raw(previous.ptr as *mut PciDev)) };
}
}
}
#[no_mangle]
pub extern "C" fn pci_enable_device(dev: *mut PciDev) -> i32 {
if dev.is_null() {
return -EINVAL;
}
log::info!(
"pci_enable_device: vendor=0x{:04x} device=0x{:04x}",
unsafe { (*dev).vendor },
unsafe { (*dev).device }
);
unsafe { (*dev).enabled = true };
0
}
#[no_mangle]
pub extern "C" fn pci_disable_device(dev: *mut PciDev) {
if dev.is_null() {
return;
}
log::info!("pci_disable_device");
unsafe { (*dev).enabled = false };
}
#[no_mangle]
pub extern "C" fn pci_iomap(dev: *mut PciDev, bar: u32, max_len: usize) -> *mut u8 {
if dev.is_null() || bar >= 6 {
return ptr::null_mut();
}
let len = if max_len > 0 {
max_len
} else {
unsafe { (*dev).bar_sizes[bar as usize] as usize }
};
if len == 0 {
return ptr::null_mut();
}
log::warn!("pci_iomap: bar={} len={} — using heap fallback", bar, len);
super::io::ioremap(unsafe { (*dev).bars[bar as usize] }, len)
}
#[no_mangle]
pub extern "C" fn pci_iounmap(_dev: *mut PciDev, addr: *mut u8, size: usize) {
super::io::iounmap(addr, size);
}
#[no_mangle]
pub extern "C" fn pci_read_config_dword(dev: *mut PciDev, offset: u32, val: *mut u32) -> i32 {
if dev.is_null() || val.is_null() {
return -EINVAL;
}
let mut pci = match open_current_device(dev) {
Ok(pci) => pci,
Err(error) => return error,
};
match pci.read_config_dword(offset as u64) {
Ok(read) => {
unsafe { *val = read };
log::info!(
"pci_read_config_dword: offset=0x{:x} -> 0x{:08x}",
offset,
read
);
0
}
Err(error) => {
log::warn!(
"pci_read_config_dword: failed at offset=0x{:x}: {}",
offset,
error
);
-EIO
}
}
}
#[no_mangle]
pub extern "C" fn pci_write_config_dword(dev: *mut PciDev, offset: u32, val: u32) -> i32 {
if dev.is_null() {
return -EINVAL;
}
let mut pci = match open_current_device(dev) {
Ok(pci) => pci,
Err(error) => return error,
};
match pci.write_config_dword(offset as u64, val) {
Ok(()) => {
log::info!(
"pci_write_config_dword: offset=0x{:x} val=0x{:08x}",
offset,
val
);
0
}
Err(error) => {
log::warn!(
"pci_write_config_dword: failed at offset=0x{:x} val=0x{:08x}: {}",
offset,
val,
error
);
-EIO
}
}
}
#[no_mangle]
pub extern "C" fn pci_set_master(dev: *mut PciDev) {
if dev.is_null() {
return;
}
log::info!("pci_set_master");
}
#[no_mangle]
pub extern "C" fn pci_resource_start(dev: *const PciDev, bar: u32) -> u64 {
if dev.is_null() || bar >= 6 {
return 0;
}
unsafe { (*dev).bars[bar as usize] }
}
#[no_mangle]
pub extern "C" fn pci_resource_len(dev: *const PciDev, bar: u32) -> u64 {
if dev.is_null() || bar >= 6 {
return 0;
}
unsafe { (*dev).bar_sizes[bar as usize] }
}
pub type PciDriverProbe = extern "C" fn(*mut PciDev, *const PciDeviceId) -> i32;
pub type PciDriverRemove = extern "C" fn(*mut PciDev);
#[repr(C)]
pub struct PciDriver {
name: *const u8,
id_table: *const PciDeviceId,
probe: Option<PciDriverProbe>,
remove: Option<PciDriverRemove>,
}
#[no_mangle]
pub extern "C" fn pci_register_driver(drv: *mut PciDriver) -> i32 {
if drv.is_null() {
return -EINVAL;
}
let driver = unsafe { &*drv };
let probe = match driver.probe {
Some(probe) => probe,
None => {
log::warn!("pci_register_driver: missing probe callback");
return -EINVAL;
}
};
let devices = match enumerate_pci_class(PCI_CLASS_DISPLAY) {
Ok(devices) => devices,
Err(error) => {
log::warn!("pci_register_driver: PCI enumeration failed: {}", error);
return -ENODEV;
}
};
let Some((info, id_ptr)) = devices.into_iter().find_map(|candidate| {
matching_id_entry(&candidate, driver.id_table).map(|id_ptr| (candidate, id_ptr))
}) else {
log::info!("pci_register_driver: no matching PCI display device found");
return -ENODEV;
};
let mut pci = match PciDevice::from_info(&info) {
Ok(pci) => pci,
Err(error) => {
log::warn!(
"pci_register_driver: failed to open {}: {}",
info.location,
error
);
return -ENODEV;
}
};
let full_info = match pci.full_info() {
Ok(full_info) => full_info,
Err(error) => {
log::warn!(
"pci_register_driver: failed to read PCI info for {}: {}",
info.location,
error
);
return -EIO;
}
};
let id = unsafe { &*id_ptr };
let dev_ptr = Box::into_raw(Box::new(build_pci_dev(&full_info, id)));
replace_current_device(full_info.location, dev_ptr);
if let Ok(mut registered_probe) = REGISTERED_PROBE.lock() {
*registered_probe = Some(probe);
}
log::info!(
"pci_register_driver: probing {:04x}:{:04x} at {}",
full_info.vendor_id,
full_info.device_id,
full_info.location
);
let status = probe(dev_ptr, id_ptr);
if status != 0 {
log::warn!("pci_register_driver: probe failed with status {}", status);
clear_current_device();
if let Ok(mut registered_probe) = REGISTERED_PROBE.lock() {
*registered_probe = None;
}
}
status
}
#[no_mangle]
pub extern "C" fn pci_unregister_driver(drv: *mut PciDriver) {
if !drv.is_null() {
let driver = unsafe { &*drv };
if let Some(remove) = driver.remove {
let current_ptr = CURRENT_DEVICE
.lock()
.ok()
.and_then(|state| state.as_ref().map(|current| current.ptr as *mut PciDev));
if let Some(dev_ptr) = current_ptr {
remove(dev_ptr);
}
}
}
clear_current_device();
if let Ok(mut registered_probe) = REGISTERED_PROBE.lock() {
*registered_probe = None;
}
log::info!("pci_unregister_driver: cleared registered PCI driver state");
}
@@ -0,0 +1,177 @@
use std::sync::atomic::{AtomicU8, Ordering};
const UNLOCKED: u8 = 0;
const LOCKED: u8 = 1;
#[repr(C)]
pub struct LinuxMutex {
state: AtomicU8,
}
#[no_mangle]
pub extern "C" fn mutex_init(m: *mut LinuxMutex) {
if m.is_null() {
return;
}
unsafe {
(*m).state = AtomicU8::new(UNLOCKED);
}
}
#[no_mangle]
pub extern "C" fn mutex_lock(m: *mut LinuxMutex) {
if m.is_null() {
return;
}
while unsafe { &*m }
.state
.compare_exchange(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
std::hint::spin_loop();
}
}
#[no_mangle]
pub extern "C" fn mutex_unlock(m: *mut LinuxMutex) {
if m.is_null() {
return;
}
unsafe { &*m }.state.store(UNLOCKED, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn mutex_is_locked(m: *mut LinuxMutex) -> bool {
if m.is_null() {
return false;
}
unsafe { &*m }.state.load(Ordering::Acquire) == LOCKED
}
#[repr(C)]
#[derive(Default)]
pub struct Spinlock {
locked: AtomicU8,
}
#[no_mangle]
pub extern "C" fn spin_lock_init(lock: *mut Spinlock) {
if lock.is_null() {
return;
}
unsafe {
(*lock).locked.store(0, Ordering::SeqCst);
}
}
#[no_mangle]
pub extern "C" fn spin_lock(lock: *mut Spinlock) {
if lock.is_null() {
return;
}
while unsafe {
(*lock)
.locked
.compare_exchange(0, 1, Ordering::Acquire, Ordering::Relaxed)
}
.is_err()
{
std::hint::spin_loop();
}
}
#[no_mangle]
pub extern "C" fn spin_unlock(lock: *mut Spinlock) {
if lock.is_null() {
return;
}
unsafe {
(*lock).locked.store(0, Ordering::Release);
}
}
static IRQ_DEPTH: std::sync::atomic::AtomicU32 = std::sync::atomic::AtomicU32::new(0);
#[no_mangle]
pub extern "C" fn spin_lock_irqsave(lock: *mut Spinlock, flags: *mut u64) -> u64 {
let prev_depth = IRQ_DEPTH.fetch_add(1, Ordering::Acquire);
spin_lock(lock);
if !flags.is_null() {
unsafe { *flags = prev_depth as u64 };
}
prev_depth as u64
}
#[no_mangle]
pub extern "C" fn spin_unlock_irqrestore(lock: *mut Spinlock, flags: u64) {
spin_unlock(lock);
IRQ_DEPTH.store(flags as u32, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn local_irq_save(flags: *mut u64) {
let prev_depth = IRQ_DEPTH.fetch_add(1, Ordering::Acquire);
if !flags.is_null() {
unsafe { *flags = prev_depth as u64 };
}
}
#[no_mangle]
pub extern "C" fn local_irq_restore(flags: u64) {
IRQ_DEPTH.store(flags as u32, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn irqs_disabled() -> bool {
IRQ_DEPTH.load(Ordering::Acquire) > 0
}
use std::ptr;
#[repr(C)]
pub struct Completion {
done: AtomicU8,
_padding: [u8; 63],
}
#[no_mangle]
pub extern "C" fn init_completion(c: *mut Completion) {
if c.is_null() {
return;
}
unsafe {
ptr::write(
c,
Completion {
done: AtomicU8::new(0),
_padding: [0; 63],
},
);
}
}
#[no_mangle]
pub extern "C" fn complete(c: *mut Completion) {
if c.is_null() {
return;
}
unsafe { &*c }.done.store(1, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn wait_for_completion(c: *mut Completion) {
if c.is_null() {
return;
}
while unsafe { &*c }.done.load(Ordering::Acquire) == 0 {
std::hint::spin_loop();
}
}
#[no_mangle]
pub extern "C" fn reinit_completion(c: *mut Completion) {
if c.is_null() {
return;
}
unsafe { &*c }.done.store(0, Ordering::Release);
}
@@ -0,0 +1,256 @@
use std::collections::HashMap;
use std::mem;
use std::os::raw::c_int;
use std::ptr;
use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicU64, Ordering};
use std::sync::{Arc, Mutex, OnceLock};
use std::thread::JoinHandle;
use std::time::Duration;
#[repr(C)]
struct Timespec {
tv_sec: i64,
tv_nsec: i64,
}
unsafe extern "C" {
fn clock_gettime(clock_id: c_int, tp: *mut Timespec) -> c_int;
}
const CLOCK_MONOTONIC: c_int = 1;
struct TimerEntry {
generation: AtomicU64,
active: AtomicBool,
function: AtomicPtr<()>,
data: AtomicPtr<u8>,
handles: Mutex<Vec<JoinHandle<()>>>,
}
#[repr(C)]
pub struct TimerList {
expires: AtomicU64,
function: AtomicPtr<()>,
data: AtomicPtr<u8>,
active: AtomicBool,
}
fn timer_entries() -> &'static Mutex<HashMap<usize, Arc<TimerEntry>>> {
static TIMER_ENTRIES: OnceLock<Mutex<HashMap<usize, Arc<TimerEntry>>>> = OnceLock::new();
TIMER_ENTRIES.get_or_init(|| Mutex::new(HashMap::new()))
}
fn current_jiffies() -> u64 {
let mut ts = Timespec {
tv_sec: 0,
tv_nsec: 0,
};
let result = unsafe { clock_gettime(CLOCK_MONOTONIC, &mut ts) };
if result != 0 || ts.tv_sec < 0 || ts.tv_nsec < 0 {
return 0;
}
(ts.tv_sec as u64)
.saturating_mul(1_000)
.saturating_add((ts.tv_nsec as u64) / 1_000_000)
}
fn lock_timer_entries() -> std::sync::MutexGuard<'static, HashMap<usize, Arc<TimerEntry>>> {
match timer_entries().lock() {
Ok(entries) => entries,
Err(e) => e.into_inner(),
}
}
fn lock_timer_handles(entry: &TimerEntry) -> std::sync::MutexGuard<'_, Vec<JoinHandle<()>>> {
match entry.handles.lock() {
Ok(handles) => handles,
Err(e) => e.into_inner(),
}
}
fn timer_entry(timer: *mut TimerList) -> Arc<TimerEntry> {
let mut entries = lock_timer_entries();
entries
.entry(timer as usize)
.or_insert_with(|| {
Arc::new(TimerEntry {
generation: AtomicU64::new(0),
active: AtomicBool::new(false),
function: AtomicPtr::new(ptr::null_mut()),
data: AtomicPtr::new(ptr::null_mut()),
handles: Mutex::new(Vec::new()),
})
})
.clone()
}
fn reset_timer_entry(timer: *mut TimerList, function: *mut (), data: *mut u8) {
let mut entries = lock_timer_entries();
if let Some(entry) = entries.get(&(timer as usize)) {
entry.active.store(false, Ordering::Release);
entry.generation.fetch_add(1, Ordering::AcqRel);
}
entries.insert(
timer as usize,
Arc::new(TimerEntry {
generation: AtomicU64::new(0),
active: AtomicBool::new(false),
function: AtomicPtr::new(function),
data: AtomicPtr::new(data),
handles: Mutex::new(Vec::new()),
}),
);
}
fn join_all_handles(entry: &TimerEntry) {
let handles = {
let mut guard = lock_timer_handles(entry);
mem::take(&mut *guard)
};
for handle in handles {
let _ = handle.join();
}
}
#[no_mangle]
pub extern "C" fn setup_timer(
timer: *mut TimerList,
function: extern "C" fn(*mut u8),
data: *mut u8,
) {
if timer.is_null() {
return;
}
let function_ptr = function as usize as *mut ();
unsafe {
ptr::write(
timer,
TimerList {
expires: AtomicU64::new(0),
function: AtomicPtr::new(function_ptr),
data: AtomicPtr::new(data),
active: AtomicBool::new(false),
},
);
}
reset_timer_entry(timer, function_ptr, data);
}
#[no_mangle]
pub extern "C" fn mod_timer(timer: *mut TimerList, expires: u64) -> i32 {
if timer.is_null() {
return 0;
}
let timer_ref = unsafe { &*timer };
let entry = timer_entry(timer);
entry.function.store(
timer_ref.function.load(Ordering::Acquire),
Ordering::Release,
);
entry
.data
.store(timer_ref.data.load(Ordering::Acquire), Ordering::Release);
let was_active = entry.active.swap(true, Ordering::AcqRel);
timer_ref.active.store(true, Ordering::Release);
timer_ref.expires.store(expires, Ordering::Release);
let generation = entry
.generation
.fetch_add(1, Ordering::AcqRel)
.wrapping_add(1);
let delay = expires.saturating_sub(current_jiffies());
let function_addr = entry.function.load(Ordering::Acquire) as usize;
let data_addr = entry.data.load(Ordering::Acquire) as usize;
let entry_for_thread = entry.clone();
let handle = std::thread::spawn(move || {
std::thread::sleep(Duration::from_millis(delay));
if !entry_for_thread.active.load(Ordering::Acquire) {
return;
}
if entry_for_thread.generation.load(Ordering::Acquire) != generation {
return;
}
if function_addr == 0 {
entry_for_thread.active.store(false, Ordering::Release);
return;
}
let function =
unsafe { std::mem::transmute::<usize, extern "C" fn(*mut u8)>(function_addr) };
function(data_addr as *mut u8);
if entry_for_thread.generation.load(Ordering::Acquire) == generation {
entry_for_thread.active.store(false, Ordering::Release);
}
});
lock_timer_handles(&entry).push(handle);
if was_active {
1
} else {
0
}
}
#[no_mangle]
pub extern "C" fn del_timer(timer: *mut TimerList) -> i32 {
if timer.is_null() {
return 0;
}
let timer_ref = unsafe { &*timer };
let entry = timer_entry(timer);
let was_active = entry.active.swap(false, Ordering::AcqRel);
entry.generation.fetch_add(1, Ordering::AcqRel);
timer_ref.active.store(false, Ordering::Release);
if was_active {
1
} else {
0
}
}
#[no_mangle]
pub extern "C" fn del_timer_sync(timer: *mut TimerList) -> i32 {
if timer.is_null() {
return 0;
}
let timer_ref = unsafe { &*timer };
let entry = timer_entry(timer);
let was_active = entry.active.swap(false, Ordering::AcqRel);
entry.generation.fetch_add(1, Ordering::AcqRel);
timer_ref.active.store(false, Ordering::Release);
join_all_handles(&entry);
if was_active {
1
} else {
0
}
}
#[no_mangle]
pub extern "C" fn timer_pending(timer: *const TimerList) -> i32 {
if timer.is_null() {
return 0;
}
let entries = lock_timer_entries();
match entries.get(&(timer as usize)) {
Some(entry) if entry.active.load(Ordering::Acquire) => 1,
Some(_) => 0,
None => 0,
}
}
@@ -0,0 +1,186 @@
use std::ptr;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Condvar, Mutex};
use std::time::{Duration, Instant};
use std::collections::HashMap;
use std::sync::{Arc, OnceLock};
struct WaitState {
generation: AtomicU64,
}
#[repr(C)]
pub struct WaitQueueHead {
condvar: Condvar,
mutex: Mutex<bool>,
}
fn wait_states() -> &'static Mutex<HashMap<usize, Arc<WaitState>>> {
static WAIT_STATES: OnceLock<Mutex<HashMap<usize, Arc<WaitState>>>> = OnceLock::new();
WAIT_STATES.get_or_init(|| Mutex::new(HashMap::new()))
}
fn lock_wait_states() -> std::sync::MutexGuard<'static, HashMap<usize, Arc<WaitState>>> {
match wait_states().lock() {
Ok(states) => states,
Err(e) => e.into_inner(),
}
}
fn reset_wait_state(wq: *mut WaitQueueHead) {
lock_wait_states().insert(
wq as usize,
Arc::new(WaitState {
generation: AtomicU64::new(0),
}),
);
}
fn wait_state(wq: *mut WaitQueueHead) -> Arc<WaitState> {
let mut states = lock_wait_states();
states
.entry(wq as usize)
.or_insert_with(|| {
Arc::new(WaitState {
generation: AtomicU64::new(0),
})
})
.clone()
}
fn wait_event_impl<F>(wq: *mut WaitQueueHead, condition: F)
where
F: Fn() -> bool,
{
if wq.is_null() {
return;
}
let wq_ref = unsafe { &*wq };
let state = wait_state(wq);
loop {
if condition() {
return;
}
let mut notified = match wq_ref.mutex.lock() {
Ok(guard) => guard,
Err(e) => e.into_inner(),
};
let generation = state.generation.load(Ordering::Acquire);
while state.generation.load(Ordering::Acquire) == generation && !condition() {
notified = match wq_ref.condvar.wait(notified) {
Ok(guard) => guard,
Err(e) => e.into_inner(),
};
}
*notified = false;
}
}
fn wait_event_timeout_impl<F>(wq: *mut WaitQueueHead, condition: F, timeout_ms: u64) -> i32
where
F: Fn() -> bool,
{
if wq.is_null() {
return 0;
}
let deadline = Instant::now() + Duration::from_millis(timeout_ms);
let wq_ref = unsafe { &*wq };
let state = wait_state(wq);
loop {
if condition() {
return 1;
}
let now = Instant::now();
if now >= deadline {
return 0;
}
let remaining = deadline.saturating_duration_since(now);
let notified = match wq_ref.mutex.lock() {
Ok(guard) => guard,
Err(e) => e.into_inner(),
};
let generation = state.generation.load(Ordering::Acquire);
let (mut notified, wait_result) = match wq_ref.condvar.wait_timeout(notified, remaining) {
Ok(result) => result,
Err(e) => e.into_inner(),
};
if *notified {
*notified = false;
}
if condition() {
return 1;
}
if state.generation.load(Ordering::Acquire) != generation {
continue;
}
if wait_result.timed_out() && !condition() {
return 0;
}
}
}
#[no_mangle]
pub extern "C" fn init_waitqueue_head(wq: *mut WaitQueueHead) {
if wq.is_null() {
return;
}
unsafe {
ptr::write(
wq,
WaitQueueHead {
condvar: Condvar::new(),
mutex: Mutex::new(false),
},
);
}
reset_wait_state(wq);
}
#[no_mangle]
pub extern "C" fn wait_event(wq: *mut WaitQueueHead, condition: extern "C" fn() -> bool) {
wait_event_impl(wq, || condition());
}
#[no_mangle]
pub extern "C" fn wake_up(wq: *mut WaitQueueHead) {
if wq.is_null() {
return;
}
let wq_ref = unsafe { &*wq };
let state = wait_state(wq);
{
let mut notified = match wq_ref.mutex.lock() {
Ok(guard) => guard,
Err(e) => e.into_inner(),
};
*notified = true;
state.generation.fetch_add(1, Ordering::AcqRel);
}
wq_ref.condvar.notify_all();
}
#[no_mangle]
pub extern "C" fn wait_event_timeout(
wq: *mut WaitQueueHead,
condition: extern "C" fn() -> bool,
timeout_ms: u64,
) -> i32 {
wait_event_timeout_impl(wq, || condition(), timeout_ms)
}
@@ -0,0 +1,290 @@
use std::collections::VecDeque;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::{Arc, Condvar, Mutex};
struct SendWorkPtr(*mut WorkStruct);
impl SendWorkPtr {
fn as_ptr(&self) -> *mut WorkStruct {
self.0
}
}
unsafe impl Send for SendWorkPtr {}
#[repr(C)]
pub struct WorkStruct {
pub func: Option<extern "C" fn(*mut WorkStruct)>,
pub __opaque: [u8; 64],
}
#[repr(C)]
pub struct DelayedWork {
pub work: WorkStruct,
pub __timer_opaque: [u8; 64],
}
struct WorkqueueInner {
queue: Mutex<VecDeque<SendWorkPtr>>,
pending_count: AtomicUsize,
done_condvar: Condvar,
shutdown: AtomicBool,
thread_count: usize,
}
pub struct WorkqueueStruct {
inner: Arc<WorkqueueInner>,
_name: String,
handles: Vec<std::thread::JoinHandle<()>>,
}
lazy_static::lazy_static! {
static ref DEFAULT_WQ: Arc<WorkqueueInner> = {
let inner = Arc::new(WorkqueueInner {
queue: Mutex::new(VecDeque::new()),
pending_count: AtomicUsize::new(0),
done_condvar: Condvar::new(),
shutdown: AtomicBool::new(false),
thread_count: 4,
});
let inner_clone = inner.clone();
for _ in 0..inner.thread_count {
let ic = inner_clone.clone();
std::thread::spawn(move || worker_loop(ic));
}
inner
};
}
fn worker_loop(inner: Arc<WorkqueueInner>) {
loop {
if inner.shutdown.load(Ordering::Acquire) {
break;
}
let work = {
let mut queue = match inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
queue.pop_front()
};
if let Some(send_work_ptr) = work {
let work_ptr = send_work_ptr.as_ptr();
if let Some(func) = unsafe { (*work_ptr).func } {
func(work_ptr);
}
let prev = inner.pending_count.fetch_sub(1, Ordering::Release);
if prev == 1 {
let queue = match inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
drop(queue);
inner.done_condvar.notify_all();
}
} else {
std::thread::sleep(std::time::Duration::from_millis(1));
}
}
}
fn dispatch_work(inner: &Arc<WorkqueueInner>, work: *mut WorkStruct) -> i32 {
if work.is_null() {
return 0;
}
{
let mut queue = match inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
queue.push_back(SendWorkPtr(work));
}
inner.pending_count.fetch_add(1, Ordering::Release);
1
}
#[no_mangle]
pub extern "C" fn alloc_workqueue(
name: *const u8,
_flags: u32,
max_active: i32,
) -> *mut WorkqueueStruct {
let name_str = if name.is_null() {
String::from("unknown")
} else {
unsafe {
let mut len = 0;
while *name.add(len) != 0 {
len += 1;
}
match std::str::from_utf8(std::slice::from_raw_parts(name, len)) {
Ok(s) => s.to_string(),
Err(_) => String::from("unknown"),
}
}
};
let thread_count = if max_active > 0 {
max_active as usize
} else {
4
};
let inner = Arc::new(WorkqueueInner {
queue: Mutex::new(VecDeque::new()),
pending_count: AtomicUsize::new(0),
done_condvar: Condvar::new(),
shutdown: AtomicBool::new(false),
thread_count,
});
let mut handles = Vec::with_capacity(inner.thread_count);
for _ in 0..inner.thread_count {
let ic = inner.clone();
handles.push(std::thread::spawn(move || worker_loop(ic)));
}
let wq = Box::new(WorkqueueStruct {
inner,
_name: name_str,
handles,
});
Box::into_raw(wq)
}
#[no_mangle]
pub extern "C" fn destroy_workqueue(wq: *mut WorkqueueStruct) {
if wq.is_null() {
return;
}
let mut wq = unsafe { Box::from_raw(wq) };
{
let mut queue = match wq.inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
while wq.inner.pending_count.load(Ordering::Acquire) > 0 {
queue = match wq.inner.done_condvar.wait(queue) {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: condvar wait failed, recovering: {}", e);
e.into_inner()
}
};
}
}
wq.inner.shutdown.store(true, Ordering::Release);
wq.inner.done_condvar.notify_all();
for handle in wq.handles.drain(..) {
let _ = handle.join();
}
}
#[no_mangle]
pub extern "C" fn queue_work(wq: *mut WorkqueueStruct, work: *mut WorkStruct) -> i32 {
if wq.is_null() {
return 0;
}
let inner = unsafe { &(*wq).inner };
dispatch_work(inner, work)
}
#[no_mangle]
pub extern "C" fn flush_workqueue(wq: *mut WorkqueueStruct) {
if wq.is_null() {
return;
}
let inner = unsafe { &(*wq).inner };
let mut queue = match inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
while inner.pending_count.load(Ordering::Acquire) > 0 {
queue = match inner.done_condvar.wait(queue) {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: condvar wait failed, recovering: {}", e);
e.into_inner()
}
};
}
}
#[no_mangle]
pub extern "C" fn schedule_work(work: *mut WorkStruct) -> i32 {
dispatch_work(&DEFAULT_WQ, work)
}
#[no_mangle]
pub extern "C" fn schedule_delayed_work(dwork: *mut DelayedWork, delay: u64) -> i32 {
if dwork.is_null() {
return 0;
}
let work_ptr = SendWorkPtr(dwork as *mut WorkStruct);
let inner = DEFAULT_WQ.clone();
inner.pending_count.fetch_add(1, Ordering::Release);
std::thread::spawn(move || {
std::thread::sleep(std::time::Duration::from_millis(delay));
let ptr = work_ptr.as_ptr();
if let Some(func) = unsafe { (*ptr).func } {
func(ptr);
}
let prev = inner.pending_count.fetch_sub(1, Ordering::Release);
if prev == 1 {
let queue = match inner.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
drop(queue);
inner.done_condvar.notify_all();
}
});
1
}
#[no_mangle]
pub extern "C" fn flush_scheduled_work() {
let mut queue = match DEFAULT_WQ.queue.lock() {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: lock poisoned, recovering: {}", e);
e.into_inner()
}
};
while DEFAULT_WQ.pending_count.load(Ordering::Acquire) > 0 {
queue = match DEFAULT_WQ.done_condvar.wait(queue) {
Ok(q) => q,
Err(e) => {
log::error!("workqueue: condvar wait failed, recovering: {}", e);
e.into_inner()
}
};
}
}