Red Bear OS — microkernel OS in Rust, based on Redox

Derivative of Redox OS (https://www.redox-os.org) adding:
- AMD GPU driver (amdgpu) via LinuxKPI compat layer
- ext4 filesystem support (ext4d scheme daemon)
- ACPI fixes for AMD bare metal (x2APIC, DMAR, IVRS, MCFG)
- Custom branding (hostname, os-release, boot identity)

Build system is full upstream Redox with RBOS overlay in local/.
Patches for kernel, base, and relibc are symlinked from local/patches/
and protected from make clean/distclean. Custom recipes live in
local/recipes/ with symlinks into the recipes/ search path.

Build:  make all CONFIG_NAME=redbear-full
Sync:   ./local/scripts/sync-upstream.sh
This commit is contained in:
2026-04-12 19:05:00 +01:00
commit 50b731f1b7
3392 changed files with 98327 additions and 0 deletions
@@ -0,0 +1,126 @@
CC = x86_64-unknown-redox-gcc
AR = x86_64-unknown-redox-ar
AMDGPU_SRC ?= ../amdgpu-source/gpu/drm/amd
TTM_SRC ?= ../amdgpu-source/gpu/drm/ttm
AMDGPU_INCLUDES ?= ../amdgpu-source/include
LINUX_KPI ?= ../../drivers/linux-kpi/src/c_headers
CFLAGS ?= -D__redox__ -D__KERNEL__ -DCONFIG_DRM_AMDGPU -DCONFIG_DRM_AMD_DC \
-DCONFIG_DRM_AMD_DC_FP -DCONFIG_DRM_AMD_ACP \
-I$(LINUX_KPI) \
-I. \
-I$(AMDGPU_INCLUDES) \
-I$(AMDGPU_INCLUDES)/drm \
-I$(AMDGPU_SRC)/include \
-I$(AMDGPU_SRC)/include/asic_reg \
-I$(AMDGPU_SRC)/display \
-I$(AMDGPU_SRC)/display/dc \
-I$(AMDGPU_SRC)/display/dc/dml \
-I$(AMDGPU_SRC)/display/dc/dcn20 \
-I$(AMDGPU_SRC)/display/dc/dcn21 \
-I$(AMDGPU_SRC)/display/dc/dcn30 \
-I$(AMDGPU_SRC)/display/dc/dcn301 \
-I$(AMDGPU_SRC)/display/dc/dcn31 \
-I$(AMDGPU_SRC)/display/dc/dcn32 \
-I$(AMDGPU_SRC)/display/dc/dcn35 \
-I$(AMDGPU_SRC)/display/dc/dml2 \
-I$(AMDGPU_SRC)/display/dmub \
-I$(AMDGPU_SRC)/display/modules \
-I$(AMDGPU_SRC)/display/modules/freesync \
-I$(AMDGPU_SRC)/display/modules/color \
-I$(AMDGPU_SRC)/display/modules/info_packet \
-I$(AMDGPU_SRC)/display/modules/power \
-I$(AMDGPU_SRC)/pm/swsmu \
-I$(AMDGPU_SRC)/pm/swsmu/inc \
-I$(AMDGPU_SRC)/pm/powerplay \
-I$(AMDGPU_SRC)/pm/powerplay/inc \
-I$(AMDGPU_SRC)/pm/powerplay/hwmgr \
-fPIC -O2 -Wall -Wno-unused-function -Wno-unused-variable \
-Wno-address-of-packed-member -Wno-initializer-overrides
LDFLAGS ?= -shared
LDLIBS ?= -lredox_driver_sys -llinux_kpi -lm -lpthread
GLUE_OBJS := redox_stubs.o amdgpu_redox_main.o
CORE_SRCS := \
$(AMDGPU_SRC)/amdgpu/amdgpu_device.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_drv.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_i2c.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_atombios.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_atombios_crtc.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_bios.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_mode.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_display.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_fb.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_gem.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_object.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_gmc.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_mmhub.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_irq.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_ring.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_fence.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_ttm.c \
$(AMDGPU_SRC)/amdgpu/amdgpu_bo_list.c
CORE_OBJS := $(patsubst %.c,%.o,$(notdir $(CORE_SRCS)))
DISPLAY_SRCS := $(shell find $(AMDGPU_SRC)/display -name '*.c' ! -path '*/dml/*' ! -path '*/dml2/*')
DISPLAY_OBJS := $(patsubst %.c,%.o,$(notdir $(DISPLAY_SRCS)))
TTM_SRCS := $(shell find $(TTM_SRC) -name '*.c')
TTM_OBJS := $(patsubst %.c,%.o,$(notdir $(TTM_SRCS)))
ALL_OBJS := $(GLUE_OBJS) $(DISPLAY_OBJS) $(TTM_OBJS) $(CORE_OBJS)
.PHONY: all clean check display core ttm
all: libamdgpu_dc_redox.so
libamdgpu_dc_redox.so: $(GLUE_OBJS)
@set -e; \
success=0; failed=0; \
for src in $(DISPLAY_SRCS); do \
obj=$$(basename "$${src%.c}.o"); \
if $(CC) -c $(CFLAGS) "$$src" -o "$$obj"; then \
success=$$((success + 1)); \
else \
failed=$$((failed + 1)); \
echo "ERROR: failed to compile $$src"; \
exit 1; \
fi; \
done; \
for src in $(TTM_SRCS); do \
obj=$$(basename "$${src%.c}.o"); \
if $(CC) -c $(CFLAGS) "$$src" -o "$$obj"; then \
success=$$((success + 1)); \
else \
failed=$$((failed + 1)); \
echo "ERROR: failed to compile $$src"; \
exit 1; \
fi; \
done; \
for src in $(CORE_SRCS); do \
if [ -f "$$src" ]; then \
obj=$$(basename "$${src%.c}.o"); \
if $(CC) -c $(CFLAGS) "$$src" -o "$$obj"; then \
success=$$((success + 1)); \
else \
failed=$$((failed + 1)); \
echo "ERROR: failed to compile $$src"; \
exit 1; \
fi; \
fi; \
done; \
echo "AMD DC: compiled $$success files successfully"; \
$(CC) $(LDFLAGS) -o $@ $$(find . -maxdepth 1 -name '*.o' -size +0c) $(LDLIBS)
redox_stubs.o: redox_stubs.c redox_glue.h
$(CC) -c $(CFLAGS) $< -o $@
amdgpu_redox_main.o: amdgpu_redox_main.c redox_glue.h
$(CC) -c $(CFLAGS) $< -o $@
check: $(GLUE_OBJS)
$(CC) -fsyntax-only $(CFLAGS) amdgpu_redox_main.c
$(CC) -fsyntax-only $(CFLAGS) redox_stubs.c
clean:
rm -f *.o libamdgpu_dc_redox.so
@@ -0,0 +1,427 @@
#include "redox_glue.h"
/* Global state */
static struct drm_device g_drm_dev;
static struct device g_device;
static struct pci_dev *g_pci_dev;
static void __iomem *g_mmio_base;
static size_t g_mmio_size;
static u64 g_fb_phys;
static size_t g_fb_size;
static int g_asic_family = -1;
/* ASIC family definitions based on device IDs */
#define ASIC_FAMILY_NAVI10 0x7310
#define ASIC_FAMILY_NAVI14 0x7340
#define ASIC_FAMILY_NAVI21 0x73A0
#define ASIC_FAMILY_NAVI22 0x73C0
#define ASIC_FAMILY_NAVI23 0x73E0
#define ASIC_FAMILY_NAVI24 0x7420
#define ASIC_FAMILY_NAVI31 0x7440
#define ASIC_FAMILY_NAVI32 0x7480
#define ASIC_FAMILY_NAVI33 0x74A0
#define AMDGPU_DC_HPD_STATUS_REG 0x4A00
#define AMDGPU_DC_MAX_CONNECTORS 4
#define AMDGPU_DC_BYTES_PER_PIXEL 4U
#define AMDGPU_DC_PIXEL_FORMAT_ARGB8888 3U
#define AMDGPU_DC_OTG_CONTROL 0x00
#define AMDGPU_DC_OTG_VIEWPORT_SIZE 0x10
#define AMDGPU_DC_OTG_VSYNC_ADJUST 0x14
#define AMDGPU_DC_OTG_H_TOTAL 0x18
#define AMDGPU_DC_OTG_V_TOTAL 0x1C
#define AMDGPU_DC_OTG_VSTARTUP 0x20
#define AMDGPU_DC_HUBP_PRIMARY_ADDR_LOW 0x00
#define AMDGPU_DC_HUBP_PRIMARY_ADDR_HIGH 0x04
#define AMDGPU_DC_HUBP_SURFACE_PITCH 0x08
#define AMDGPU_DC_HUBP_SURFACE_CONFIG 0x0C
#define AMDGPU_DC_HUBP_VIEWPORT_START 0x10
#define AMDGPU_DC_HUBP_VIEWPORT_SIZE 0x14
#define AMDGPU_DC_HUBP_FLIP_CONTROL 0x18
#define AMDGPU_DC_HUBP_FLIP_ADDR_LOW 0x1C
#define AMDGPU_DC_HUBP_FLIP_ADDR_HIGH 0x20
struct connector_info_ffi {
int id;
int connector_type;
int connector_type_id;
int connection;
int mm_width;
int mm_height;
int encoder_id;
};
struct amdgpu_redox_connector_desc {
int id;
u32 hpd_mask;
int connector_type;
int connector_type_id;
int encoder_id;
int mm_width;
int mm_height;
};
static const struct amdgpu_redox_connector_desc g_connector_descs[AMDGPU_DC_MAX_CONNECTORS] = {
{ .id = 1, .hpd_mask = 0x01, .connector_type = 10, .connector_type_id = 1, .encoder_id = 1, .mm_width = 600, .mm_height = 340 },
{ .id = 2, .hpd_mask = 0x02, .connector_type = 10, .connector_type_id = 2, .encoder_id = 2, .mm_width = 600, .mm_height = 340 },
{ .id = 3, .hpd_mask = 0x04, .connector_type = 11, .connector_type_id = 3, .encoder_id = 3, .mm_width = 600, .mm_height = 340 },
{ .id = 4, .hpd_mask = 0x08, .connector_type = 11, .connector_type_id = 4, .encoder_id = 4, .mm_width = 600, .mm_height = 340 },
};
static inline void __iomem *amdgpu_dc_reg_ptr(u32 base, u32 offset)
{
return (u8 __iomem *)g_mmio_base + base + offset;
}
static int amdgpu_dc_validate_mmio_access(u32 base, u32 offset)
{
u64 end = (u64)base + (u64)offset + sizeof(u32);
if (!g_mmio_base) {
return -ENODEV;
}
if (end > g_mmio_size) {
pr_err("amdgpu_redox: MMIO access %#x+%#x outside aperture %zu\n",
base, offset, g_mmio_size);
return -EINVAL;
}
return 0;
}
static inline void amdgpu_dc_write_reg(u32 base, u32 offset, u32 value)
{
if (amdgpu_dc_validate_mmio_access(base, offset) != 0) {
return;
}
writel(value, amdgpu_dc_reg_ptr(base, offset));
}
static inline u32 amdgpu_dc_read_reg(u32 base, u32 offset)
{
if (amdgpu_dc_validate_mmio_access(base, offset) != 0) {
return 0;
}
return readl(amdgpu_dc_reg_ptr(base, offset));
}
static inline u32 amdgpu_dc_hpd_status(void)
{
if (amdgpu_dc_validate_mmio_access(0, AMDGPU_DC_HPD_STATUS_REG) != 0) {
return 0;
}
return readl((u8 __iomem *)g_mmio_base + AMDGPU_DC_HPD_STATUS_REG);
}
/* Initialize AMD Display Core */
int amdgpu_dc_init(void *mmio_base, size_t mmio_size)
{
int ret = 0;
u32 gpu_id = 0;
const char *firmware_name = NULL;
printk("amdgpu_redox: initializing AMD Display Core\n");
if (!mmio_base || mmio_size < sizeof(u32)) {
pr_err("amdgpu_redox: invalid MMIO for DC init\n");
return -EINVAL;
}
gpu_id = readl(mmio_base);
printk("amdgpu_redox: GPU ID = %#010x\n", gpu_id);
switch (gpu_id) {
case ASIC_FAMILY_NAVI10:
g_asic_family = ASIC_FAMILY_NAVI10;
firmware_name = "dmcub_dcn20.bin";
break;
case ASIC_FAMILY_NAVI14:
g_asic_family = ASIC_FAMILY_NAVI14;
firmware_name = "dmcub_dcn20.bin";
break;
case ASIC_FAMILY_NAVI21:
g_asic_family = ASIC_FAMILY_NAVI21;
firmware_name = "dmcub_dcn31.bin";
break;
case ASIC_FAMILY_NAVI22:
g_asic_family = ASIC_FAMILY_NAVI22;
firmware_name = "dmcub_dcn31.bin";
break;
case ASIC_FAMILY_NAVI23:
g_asic_family = ASIC_FAMILY_NAVI23;
firmware_name = "dmcub_dcn31.bin";
break;
case ASIC_FAMILY_NAVI24:
g_asic_family = ASIC_FAMILY_NAVI24;
firmware_name = "dmcub_dcn31.bin";
break;
case ASIC_FAMILY_NAVI31:
g_asic_family = ASIC_FAMILY_NAVI31;
firmware_name = "dmcub_dcn31.bin";
break;
case ASIC_FAMILY_NAVI32:
g_asic_family = ASIC_FAMILY_NAVI32;
firmware_name = "dmcub_dcn31.bin";
break;
case ASIC_FAMILY_NAVI33:
g_asic_family = ASIC_FAMILY_NAVI33;
firmware_name = "dmcub_dcn31.bin";
break;
default:
pr_warn("amdgpu_redox: unknown ASIC %#010x, using DCN31 firmware\n", gpu_id);
g_asic_family = gpu_id;
firmware_name = "dmcub_dcn31.bin";
break;
}
printk("amdgpu_redox: ASIC family identified, loading firmware: %s\n", firmware_name);
{
const struct firmware *fw = NULL;
int fw_ret = request_firmware(&fw, firmware_name, NULL);
if (fw_ret != 0 || !fw) {
pr_warn("amdgpu_redox: firmware %s not available (err=%d), continuing without\n",
firmware_name, fw_ret);
} else {
printk("amdgpu_redox: firmware %s loaded (%zu bytes)\n", firmware_name, fw->size);
release_firmware(fw);
}
}
return ret;
}
/* Initialize AMD GPU hardware for display */
int amdgpu_redox_init(void *mmio_base, size_t mmio_size, uint64_t fb_phys, size_t fb_size)
{
int ret;
printk("amdgpu_redox: initializing AMD GPU display\n");
printk("amdgpu_redox: MMIO base=%p size=%zu\n", mmio_base, mmio_size);
printk("amdgpu_redox: FB phys=%#llx size=%zu\n", (unsigned long long)fb_phys, fb_size);
if (!mmio_base || mmio_size == 0) {
pr_err("amdgpu_redox: invalid MMIO mapping provided by redox-drm\n");
return -EINVAL;
}
memset(&g_drm_dev, 0, sizeof(g_drm_dev));
memset(&g_device, 0, sizeof(g_device));
g_mmio_base = mmio_base;
g_mmio_size = mmio_size;
g_fb_phys = fb_phys;
g_fb_size = fb_size;
g_pci_dev = redox_pci_find_amd_gpu();
if (!g_pci_dev) {
pr_err("amdgpu_redox: no AMD PCI device available from integration layer\n");
return -ENODEV;
}
g_pci_dev->mmio_base = g_mmio_base;
g_pci_dev->resource_start[0] = (phys_addr_t)(uintptr_t)g_mmio_base;
g_pci_dev->resource_len[0] = g_mmio_size;
g_device.pci_dev = g_pci_dev;
g_drm_dev.dev = &g_device;
ret = amdgpu_dc_init(mmio_base, mmio_size);
if (ret != 0) {
pr_err("amdgpu_redox: failed to initialize DC\n");
return ret;
}
return 0;
}
/* Cleanup */
void amdgpu_redox_cleanup(void)
{
printk("amdgpu_redox: cleanup\n");
if (g_pci_dev) {
redox_pci_dev_put(g_pci_dev);
g_pci_dev = NULL;
}
g_mmio_base = NULL;
g_mmio_size = 0;
g_fb_phys = 0;
g_fb_size = 0;
memset(&g_drm_dev, 0, sizeof(g_drm_dev));
memset(&g_device, 0, sizeof(g_device));
}
/* Get connector info — called by redox-drm */
int amdgpu_dc_detect_connectors(void)
{
int num_connectors = 0;
if (!g_mmio_base) {
pr_err("amdgpu_redox: detect_connectors called before init\n");
return -ENODEV;
}
#ifdef __redox__
u32 hpd_status = amdgpu_dc_hpd_status();
int i;
for (i = 0; i < AMDGPU_DC_MAX_CONNECTORS; ++i) {
if (hpd_status & g_connector_descs[i].hpd_mask) {
num_connectors++;
}
}
printk("amdgpu_redox: detected %d connector(s)\n", num_connectors);
#else
printk("amdgpu_redox: running on Linux, using AMD DC detection\n");
#endif
return num_connectors;
}
/* Get connector info by index */
int amdgpu_dc_get_connector_info(int idx, void *info)
{
struct connector_info_ffi *ffi_info = (struct connector_info_ffi *)info;
if (!g_mmio_base) {
pr_err("amdgpu_redox: get_connector_info called before init\n");
return -ENODEV;
}
if (idx < 0 || !ffi_info) {
return -EINVAL;
}
#ifdef __redox__
{
u32 hpd_status = amdgpu_dc_hpd_status();
int active_index = 0;
int i;
for (i = 0; i < AMDGPU_DC_MAX_CONNECTORS; ++i) {
const struct amdgpu_redox_connector_desc *desc = &g_connector_descs[i];
if (!(hpd_status & desc->hpd_mask)) {
continue;
}
if (active_index == idx) {
ffi_info->id = desc->id;
ffi_info->connector_type = desc->connector_type;
ffi_info->connector_type_id = desc->connector_type_id;
ffi_info->connection = 1;
ffi_info->mm_width = desc->mm_width;
ffi_info->mm_height = desc->mm_height;
ffi_info->encoder_id = desc->encoder_id;
return 0;
}
active_index++;
}
}
#endif
return -ENOENT;
}
/* Set CRTC mode — called by redox-drm for modesetting */
int amdgpu_dc_set_crtc(int crtc_id, uint64_t fb_addr, uint32_t width, uint32_t height)
{
printk("amdgpu_redox: set_crtc(%d, fb=%#llx, %ux%u)\n",
crtc_id,
(unsigned long long)fb_addr,
width,
height);
if (!g_mmio_base) {
pr_err("amdgpu_redox: set_crtc called before amdgpu_redox_init\n");
return -ENODEV;
}
#ifdef __redox__
const u32 bytes_per_pixel = AMDGPU_DC_BYTES_PER_PIXEL;
u32 pitch;
u32 viewport_size;
const u32 h_total = width + 160U;
const u32 v_total = height + 45U;
const u32 v_sync_start = height + 3U;
const u32 v_sync_end = v_sync_start + 5U;
const u32 v_sync_adjust = (v_sync_start & 0xFFFFU) | (v_sync_end << 16);
const u32 vstartup = v_sync_start > 1U ? (v_sync_start - 1U) : 0U;
u64 required_bytes;
if (crtc_id < 0 || crtc_id > 3) {
pr_err("amdgpu_redox: invalid crtc_id %d\n", crtc_id);
return -EINVAL;
}
if (width == 0 || height == 0 || width > 0xFFFFU || height > 0xFFFFU) {
pr_err("amdgpu_redox: invalid mode %ux%u\n", width, height);
return -EINVAL;
}
if (width > (UINT32_MAX / bytes_per_pixel)) {
pr_err("amdgpu_redox: pitch overflow for width %u\n", width);
return -EINVAL;
}
pitch = width * bytes_per_pixel;
viewport_size = (width & 0xFFFFU) | (height << 16);
required_bytes = (u64)pitch * (u64)height;
/* The Rust-side allocates scanout buffers via GTT VA space (0..256MiB).
* The display controller programs these GPU-virtual addresses directly;
* the GTT hardware translates them to physical backing pages at runtime.
* Validate only that the address + size fits in a u64 and that the
* programmed registers can hold the values. */
if (required_bytes == 0) {
pr_err("amdgpu_redox: zero-sized framebuffer for crtc %d\n", crtc_id);
return -EINVAL;
}
u32 otg_base = 0x4800 + (crtc_id * 0x800);
u32 hubp_base = 0x5800 + (crtc_id * 0x400);
u32 otg_control;
if (amdgpu_dc_validate_mmio_access(otg_base, AMDGPU_DC_OTG_VSTARTUP) != 0 ||
amdgpu_dc_validate_mmio_access(hubp_base, AMDGPU_DC_HUBP_FLIP_ADDR_HIGH) != 0) {
return -EINVAL;
}
otg_control = amdgpu_dc_read_reg(otg_base, AMDGPU_DC_OTG_CONTROL);
otg_control &= ~0x01U;
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_CONTROL, otg_control);
mb();
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_PRIMARY_ADDR_LOW, (u32)(fb_addr & 0xFFFFFFFFULL));
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_PRIMARY_ADDR_HIGH, (u32)((fb_addr >> 32) & 0xFFFFFFFFULL));
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_SURFACE_PITCH, pitch);
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_SURFACE_CONFIG, AMDGPU_DC_PIXEL_FORMAT_ARGB8888);
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_VIEWPORT_START, 0);
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_VIEWPORT_SIZE, viewport_size);
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_FLIP_ADDR_LOW, (u32)(fb_addr & 0xFFFFFFFFULL));
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_FLIP_ADDR_HIGH, (u32)((fb_addr >> 32) & 0xFFFFFFFFULL));
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_FLIP_CONTROL, 0);
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_VIEWPORT_SIZE, viewport_size);
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_VSYNC_ADJUST, v_sync_adjust);
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_H_TOTAL, h_total);
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_V_TOTAL, v_total);
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_VSTARTUP, vstartup);
mb();
otg_control |= 0x01;
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_CONTROL, otg_control);
printk("amdgpu_redox: CRTC %d enabled at %ux%u, fb=%#llx\n",
crtc_id, width, height, (unsigned long long)fb_addr);
#else
printk("amdgpu_redox: running on Linux, using AMD DC modesetting\n");
#endif
return 0;
}
@@ -0,0 +1,548 @@
#ifndef _REDOX_GLUE_H
#define _REDOX_GLUE_H
/*
* Redox-specific Linux compatibility surface for the AMDGPU display port.
* The real build enables this via -D__redox__, but the declarations stay
* visible unconditionally so editor/LSP diagnostics can parse the sources.
*/
/* ---- Standard types ---- */
#include <errno.h>
#include <pthread.h>
#include <stddef.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#ifndef __iomem
#define __iomem
#endif
#ifndef __user
#define __user
#endif
#ifndef __force
#define __force
#endif
#ifndef __must_check
#define __must_check
#endif
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
typedef int8_t s8;
typedef int16_t s16;
typedef int32_t s32;
typedef int64_t s64;
typedef unsigned long ulong;
typedef unsigned long long ullong;
typedef unsigned int uint;
typedef size_t phys_addr_t;
typedef u64 dma_addr_t;
typedef u32 __be32;
typedef u16 __be16;
typedef u32 __le32;
typedef u16 __le16;
typedef unsigned int gfp_t;
/* ---- Kernel replacements ---- */
#define GFP_KERNEL 0U
#define GFP_ATOMIC 1U
#define GFP_DMA32 2U
#define GFP_NOWAIT 3U
#define GFP_KERNEL_ACCOUNT 0U
extern void *kmalloc(size_t size, unsigned int flags);
extern void *kzalloc(size_t size, unsigned int flags);
extern void kfree(const void *ptr);
extern void *vmalloc(unsigned long size);
extern void vfree(const void *addr);
extern void *krealloc(const void *ptr, size_t new_size, unsigned int flags);
/* printk → stderr */
#define printk(fmt, ...) fprintf(stderr, "[amdgpu] " fmt, ##__VA_ARGS__)
#define pr_err(fmt, ...) fprintf(stderr, "[amdgpu ERR] " fmt, ##__VA_ARGS__)
#define pr_warn(fmt, ...) fprintf(stderr, "[amdgpu WARN] " fmt, ##__VA_ARGS__)
#define pr_info(fmt, ...) fprintf(stderr, "[amdgpu INFO] " fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) fprintf(stderr, "[amdgpu DBG] " fmt, ##__VA_ARGS__)
#define dev_err(dev, fmt, ...) fprintf(stderr, "[amdgpu ERR] " fmt, ##__VA_ARGS__)
#define dev_warn(dev, fmt, ...) fprintf(stderr, "[amdgpu WARN] " fmt, ##__VA_ARGS__)
#define dev_info(dev, fmt, ...) fprintf(stderr, "[amdgpu INFO] " fmt, ##__VA_ARGS__)
#define dev_dbg(dev, fmt, ...) fprintf(stderr, "[amdgpu DBG] " fmt, ##__VA_ARGS__)
/* ---- Module system replacement ---- */
#define module_init(fn) /* noop */
#define module_exit(fn) /* noop */
#define module_param(name, type, perm) /* noop */
#define MODULE_PARM_DESC(name, desc) /* noop */
#define MODULE_LICENSE(license) /* noop */
#define MODULE_AUTHOR(author) /* noop */
#define MODULE_DESCRIPTION(desc) /* noop */
#define MODULE_DEVICE_TABLE(type, table) /* noop */
#define EXPORT_SYMBOL(sym) /* noop */
#define EXPORT_SYMBOL_GPL(sym) /* noop */
#define MODULE_FIRMWARE(fw) /* noop */
#define THIS_MODULE NULL
/* ---- Atomic operations ---- */
typedef struct {
volatile int counter;
} atomic_t;
typedef struct {
volatile long counter;
} atomic_long_t;
typedef struct {
volatile u64 counter;
} atomic64_t;
#define atomic_read(v) ((v)->counter)
#define atomic_set(v, i) ((v)->counter = (i))
#define atomic_inc(v) __sync_add_and_fetch(&(v)->counter, 1)
#define atomic_dec(v) __sync_sub_and_fetch(&(v)->counter, 1)
#define atomic_add(i, v) __sync_add_and_fetch(&(v)->counter, (i))
#define atomic_sub(i, v) __sync_sub_and_fetch(&(v)->counter, (i))
#define atomic_inc_return(v) __sync_add_and_fetch(&(v)->counter, 1)
#define atomic_dec_return(v) __sync_sub_and_fetch(&(v)->counter, 1)
#define atomic_cmpxchg(v, oldv, newv) __sync_val_compare_and_swap(&(v)->counter, (oldv), (newv))
/* ---- Locking ---- */
typedef pthread_mutex_t mutex_t;
#define DEFINE_MUTEX(name) pthread_mutex_t name = PTHREAD_MUTEX_INITIALIZER
#define mutex_init(m) pthread_mutex_init((m), NULL)
#define mutex_lock(m) pthread_mutex_lock((m))
#define mutex_unlock(m) pthread_mutex_unlock((m))
#define mutex_destroy(m) pthread_mutex_destroy((m))
#define mutex_is_locked(m) (pthread_mutex_trylock((m)) != 0)
typedef struct {
volatile int lock;
} spinlock_t;
#define spin_lock_init(l) ((l)->lock = 0)
#define spin_lock(l) while (__sync_lock_test_and_set(&(l)->lock, 1)) {}
#define spin_unlock(l) __sync_lock_release(&(l)->lock)
#define spin_lock_irqsave(l, flags) do { (flags) = 0; spin_lock((l)); } while (0)
#define spin_unlock_irqrestore(l, flags) do { (void)(flags); spin_unlock((l)); } while (0)
#define spin_lock_irq(l) spin_lock((l))
#define spin_unlock_irq(l) spin_unlock((l))
/* ---- Power management stubs ---- */
#define pm_runtime_get_sync(dev) 0
#define pm_runtime_put_autosuspend(dev) 0
#define pm_runtime_allow(dev) 0
#define pm_runtime_forbid(dev) 0
#define pm_runtime_set_active(dev) 0
#define pm_runtime_enable(dev) 0
#define pm_runtime_disable(dev) 0
#define pm_runtime_idle(dev) 0
#define pm_runtime_put_noidle(dev) 0
#define pm_runtime_get_noresume(dev) 0
#define pm_suspend_ignore_children(dev, enable) /* noop */
/* ---- I/O memory — maps to redox-driver-sys MmioRegion ---- */
extern void __iomem *redox_ioremap(phys_addr_t offset, size_t size);
extern void redox_iounmap(void __iomem *addr);
extern void redox_iowrite32(u32 val, void __iomem *addr);
extern u32 redox_ioread32(const void __iomem *addr);
extern void redox_iowrite16(u16 val, void __iomem *addr);
extern u16 redox_ioread16(const void __iomem *addr);
extern void redox_iowrite8(u8 val, void __iomem *addr);
extern u8 redox_ioread8(const void __iomem *addr);
extern void redox_mmio_write32(void *base, u32 offset, u32 val);
extern u32 redox_mmio_read32(void *base, u32 offset);
#define ioremap(offset, size) redox_ioremap((offset), (size))
#define ioremap_wc(offset, size) redox_ioremap((offset), (size))
#define ioremap_np(offset, size) redox_ioremap((offset), (size))
#define iounmap(addr) redox_iounmap((addr))
#define iowrite32(val, addr) redox_iowrite32((val), (addr))
#define ioread32(addr) redox_ioread32((addr))
#define iowrite16(val, addr) redox_iowrite16((val), (addr))
#define ioread16(addr) redox_ioread16((addr))
#define iowrite8(val, addr) redox_iowrite8((val), (addr))
#define ioread8(addr) redox_ioread8((addr))
#define writel(val, addr) (*(volatile u32 *)(addr) = (val))
#define readl(addr) (*(volatile const u32 *)(addr))
#define writew(val, addr) (*(volatile u16 *)(addr) = (val))
#define readw(addr) (*(volatile const u16 *)(addr))
#define writeb(val, addr) (*(volatile u8 *)(addr) = (val))
#define readb(addr) (*(volatile const u8 *)(addr))
#define writeq(val, addr) (*(volatile u64 *)(addr) = (val))
#define readq(addr) (*(volatile const u64 *)(addr))
/* ---- Memory barriers ---- */
#define mb() __sync_synchronize()
#define rmb() __sync_synchronize()
#define wmb() __sync_synchronize()
#define smp_mb() __sync_synchronize()
#define smp_rmb() __sync_synchronize()
#define smp_wmb() __sync_synchronize()
#define barrier() __asm__ __volatile__("" : : : "memory")
/* ---- DMA mapping — maps to redox-driver-sys DmaBuffer ---- */
extern void *redox_dma_alloc_coherent(size_t size, dma_addr_t *dma_handle);
extern void redox_dma_free_coherent(size_t size, void *vaddr, dma_addr_t dma_handle);
#define dma_alloc_coherent(dev, size, dma_handle, flags) redox_dma_alloc_coherent((size), (dma_handle))
#define dma_free_coherent(dev, size, vaddr, dma_handle) redox_dma_free_coherent((size), (vaddr), (dma_handle))
#define dma_map_page(dev, page, offset, size, dir) ((dma_addr_t)0)
#define dma_unmap_page(dev, addr, size, dir) /* noop */
#define dma_map_single(dev, ptr, size, dir) ((dma_addr_t)(uintptr_t)(ptr))
#define dma_unmap_single(dev, addr, size, dir) /* noop */
#define dma_mapping_error(dev, addr) 0
/* ---- PCI — maps to redox-driver-sys PCI ---- */
struct pci_dev {
u16 vendor;
u16 device;
u8 revision;
u8 irq;
phys_addr_t resource_start[6];
u64 resource_len[6];
u32 resource_flags[6];
void *driver_data;
void __iomem *mmio_base;
int is_amdgpu;
};
extern struct pci_dev *redox_pci_find_amd_gpu(void);
extern void redox_pci_dev_put(struct pci_dev *pdev);
extern int redox_pci_enable_device(struct pci_dev *pdev);
extern void redox_pci_set_master(struct pci_dev *pdev);
extern int redox_pci_request_regions(struct pci_dev *pdev, const char *name);
extern void redox_pci_release_regions(struct pci_dev *pdev);
#define pci_get_device(vendor, device, from) redox_pci_find_amd_gpu()
#define pci_dev_put(pdev) redox_pci_dev_put((pdev))
#define pci_enable_device(pdev) redox_pci_enable_device((pdev))
#define pci_set_master(pdev) redox_pci_set_master((pdev))
#define pci_request_regions(pdev, name) redox_pci_request_regions((pdev), (name))
#define pci_release_regions(pdev) redox_pci_release_regions((pdev))
#define pci_resource_start(pdev, bar) ((pdev)->resource_start[(bar)])
#define pci_resource_len(pdev, bar) ((pdev)->resource_len[(bar)])
#define pci_resource_flags(pdev, bar) ((pdev)->resource_flags[(bar)])
#define pci_resource_end(pdev, bar) ((pdev)->resource_start[(bar)] + (pdev)->resource_len[(bar)] - 1)
#define IORESOURCE_MEM 0x00000200U
#define IORESOURCE_IO 0x00000100U
#define IORESOURCE_MEM_64 0x00040000U
#define IORESOURCE_PREFETCH 0x00001000U
/* ---- Firmware loading — maps to scheme:firmware ---- */
struct firmware {
size_t size;
const u8 *data;
};
extern int redox_request_firmware(const struct firmware **fw, const char *name, void *dev);
extern void redox_release_firmware(const struct firmware *fw);
#define request_firmware(fw, name, dev) redox_request_firmware((fw), (name), (dev))
#define release_firmware(fw) redox_release_firmware((fw))
/* ---- Device model ---- */
struct device {
void *driver_data;
struct pci_dev *pci_dev;
};
#define dev_get_drvdata(dev) ((dev)->driver_data)
#define dev_set_drvdata(dev, data) ((dev)->driver_data = (data))
/* ---- Interrupts ---- */
typedef int (*irq_handler_t)(int irq, void *dev_id);
extern int redox_request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev);
extern void redox_free_irq(unsigned int irq, void *dev_id);
#define IRQF_SHARED 0x00000080UL
#define IRQF_TRIGGER_FALLING 0x00000002UL
/* ---- Workqueue ---- */
struct work_struct {
void (*func)(struct work_struct *work);
};
struct delayed_work {
struct work_struct work;
unsigned long delay;
};
#define INIT_WORK(w, fn) ((w)->func = (fn))
#define INIT_DELAYED_WORK(w, fn) INIT_WORK(&(w)->work, (fn))
#define schedule_work(w) do { if ((w)->func) { (w)->func((w)); } } while (0)
#define schedule_delayed_work(w, delayv) do { (void)(delayv); if ((w)->work.func) { (w)->work.func(&(w)->work); } } while (0)
#define cancel_work_sync(w) /* noop */
#define cancel_delayed_work_sync(w) /* noop */
#define flush_workqueue(wq) /* noop */
#define flush_scheduled_work() /* noop */
/* ---- Completion ---- */
struct completion {
volatile int done;
pthread_mutex_t mutex;
pthread_cond_t cond;
};
#define init_completion(c) do { \
(c)->done = 0; \
pthread_mutex_init(&(c)->mutex, NULL); \
pthread_cond_init(&(c)->cond, NULL); \
} while (0)
#define reinit_completion(c) do { (c)->done = 0; } while (0)
#define complete(c) do { \
pthread_mutex_lock(&(c)->mutex); \
(c)->done = 1; \
pthread_cond_broadcast(&(c)->cond); \
pthread_mutex_unlock(&(c)->mutex); \
} while (0)
#define wait_for_completion(c) do { \
pthread_mutex_lock(&(c)->mutex); \
while (!(c)->done) { \
pthread_cond_wait(&(c)->cond, &(c)->mutex); \
} \
pthread_mutex_unlock(&(c)->mutex); \
} while (0)
#define wait_for_completion_timeout(c, timeout) ({ (void)(timeout); wait_for_completion((c)); 1UL; })
/* ---- Error helpers ---- */
#ifndef EOPNOTSUPP
#define EOPNOTSUPP 95
#endif
#define IS_ERR(ptr) ((unsigned long)(uintptr_t)(ptr) >= (unsigned long)-4095)
#define PTR_ERR(ptr) ((long)(intptr_t)(ptr))
#define ERR_PTR(err) ((void *)(intptr_t)(err))
#define IS_ERR_OR_NULL(ptr) (!(ptr) || IS_ERR(ptr))
/* ---- Min/Max ---- */
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min_t(type, a, b) ((type)(a) < (type)(b) ? (type)(a) : (type)(b))
#define max_t(type, a, b) ((type)(a) > (type)(b) ? (type)(a) : (type)(b))
#define clamp(val, lo, hi) min(max((val), (lo)), (hi))
#define clamp_t(type, val, lo, hi) ((type)clamp((val), (lo), (hi)))
#define clamp_val(val, lo, hi) clamp((val), (lo), (hi))
#define swap(a, b) do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
/* ---- DIV_ROUND_UP, alignment ---- */
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define DIV_ROUND_UP_ULL(n, d) DIV_ROUND_UP((n), (d))
#define DIV_ROUND_CLOSEST(n, d) (((n) + ((d) / 2)) / (d))
#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
#define IS_ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
#define PAGE_SHIFT 12
#define PAGE_SIZE 4096UL
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define PAGE_ALIGN(x) ALIGN((x), PAGE_SIZE)
/* ---- msleep, udelay — implemented in redox_stubs.c ---- */
extern void msleep(unsigned int msecs);
extern void udelay(unsigned long usecs);
extern void mdelay(unsigned long msecs);
extern unsigned long jiffies;
extern unsigned long msecs_to_jiffies(unsigned int msecs);
extern unsigned long usecs_to_jiffies(unsigned int usecs);
/* ---- Kconfig macros ---- */
#define IS_ENABLED(option) 0
#define IS_REACHABLE(option) 0
#ifndef CONFIG_DRM_AMDGPU
#define CONFIG_DRM_AMDGPU 1
#endif
#ifndef CONFIG_DRM_AMD_DC
#define CONFIG_DRM_AMD_DC 1
#endif
#ifndef CONFIG_DRM_AMD_DC_FP
#define CONFIG_DRM_AMD_DC_FP 1
#endif
#ifndef CONFIG_DRM_AMD_ACP
#define CONFIG_DRM_AMD_ACP 0
#endif
#ifndef CONFIG_DRM_AMD_SECURE_DISPLAY
#define CONFIG_DRM_AMD_SECURE_DISPLAY 0
#endif
#ifndef CONFIG_DRM_AMDGPU_SI
#define CONFIG_DRM_AMDGPU_SI 0
#endif
#ifndef CONFIG_DRM_AMDGPU_CIK
#define CONFIG_DRM_AMDGPU_CIK 0
#endif
#ifndef CONFIG_DEBUG_FS
#define CONFIG_DEBUG_FS 0
#endif
#ifndef CONFIG_FAULT_INJECTION
#define CONFIG_FAULT_INJECTION 0
#endif
#ifndef CONFIG_ACPI
#define CONFIG_ACPI 0
#endif
#ifndef CONFIG_HWMON
#define CONFIG_HWMON 0
#endif
#ifndef CONFIG_PM
#define CONFIG_PM 0
#endif
#ifndef CONFIG_SLEEP
#define CONFIG_SLEEP 0
#endif
#ifndef CONFIG_BACKLIGHT_CLASS_DEVICE
#define CONFIG_BACKLIGHT_CLASS_DEVICE 0
#endif
#ifndef CONFIG_BACKLIGHT_LCD_SUPPORT
#define CONFIG_BACKLIGHT_LCD_SUPPORT 0
#endif
#ifndef CONFIG_DRM_AMD_DC_HDCP
#define CONFIG_DRM_AMD_DC_HDCP 0
#endif
#ifndef CONFIG_DRM_AMD_DC_DSC
#define CONFIG_DRM_AMD_DC_DSC 1
#endif
#ifndef CONFIG_DRM_AMD_DC_DCN
#define CONFIG_DRM_AMD_DC_DCN 1
#endif
#ifndef CONFIG_DRM_AMD_DC_DML2
#define CONFIG_DRM_AMD_DC_DML2 0
#endif
#ifndef CONFIG_DRM_AMD_DC_SMU
#define CONFIG_DRM_AMD_DC_SMU 0
#endif
/* ---- Linked list ---- */
struct list_head {
struct list_head *next;
struct list_head *prev;
};
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) struct list_head name = LIST_HEAD_INIT(name)
static inline void INIT_LIST_HEAD(struct list_head *list) {
list->next = list;
list->prev = list;
}
static inline void list_add(struct list_head *new_entry, struct list_head *head) {
head->next->prev = new_entry;
new_entry->next = head->next;
new_entry->prev = head;
head->next = new_entry;
}
static inline void list_add_tail(struct list_head *new_entry, struct list_head *head) {
head->prev->next = new_entry;
new_entry->prev = head->prev;
new_entry->next = head;
head->prev = new_entry;
}
static inline void list_del(struct list_head *entry) {
entry->next->prev = entry->prev;
entry->prev->next = entry->next;
entry->next = (struct list_head *)(uintptr_t)0xDEADBEEF;
entry->prev = (struct list_head *)(uintptr_t)0xDEADBEEF;
}
static inline int list_empty(const struct list_head *head) {
return head->next == head;
}
#define list_entry(ptr, type, member) ((type *)((char *)(ptr) - offsetof(type, member)))
#define list_for_each(pos, head) for ((pos) = (head)->next; (pos) != (head); (pos) = (pos)->next)
#define list_for_each_safe(pos, n, head) for ((pos) = (head)->next, (n) = (pos)->next; (pos) != (head); (pos) = (n), (n) = (pos)->next)
#define list_for_each_entry(pos, head, member) \
for ((pos) = list_entry((head)->next, typeof(*(pos)), member); \
&(pos)->member != (head); \
(pos) = list_entry((pos)->member.next, typeof(*(pos)), member))
/* ---- IDR ---- */
struct idr {
int next_id;
};
#define DEFINE_IDR(name) struct idr name = { .next_id = 1 }
static inline int idr_alloc(struct idr *idr, void *ptr, int start, int end, int flags) {
(void)ptr;
(void)start;
(void)end;
(void)flags;
return idr->next_id++;
}
static inline void *idr_find(struct idr *idr, int id) {
(void)idr;
(void)id;
return NULL;
}
static inline void idr_remove(struct idr *idr, int id) {
(void)idr;
(void)id;
}
static inline void idr_destroy(struct idr *idr) {
(void)idr;
}
#define idr_for_each_entry(idr, entry, id) for ((id) = 0; ((entry) = idr_find((idr), (id))) != NULL; (id)++)
/* ---- Misc ---- */
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#define BITS_PER_LONG (sizeof(long) * 8)
#define BIT(n) (1UL << (n))
#define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - 1 - (h))) & (~0UL << (l)))
#define GENMASK_ULL(h, l) (((~0ULL) >> (63 - (h))) & (~0ULL << (l)))
#define container_of(ptr, type, member) ((type *)((char *)(ptr) - offsetof(type, member)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define WARN_ON(condition) ({ int __ret = !!(condition); if (__ret) fprintf(stderr, "WARN_ON: %s at %s:%d\n", #condition, __FILE__, __LINE__); __ret; })
#define WARN_ON_ONCE(condition) WARN_ON(condition)
#define BUG_ON(condition) do { if (condition) { fprintf(stderr, "BUG: %s at %s:%d\n", #condition, __FILE__, __LINE__); abort(); } } while (0)
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)]))
/* ---- Enum constants ---- */
#define DRM_MODE_DPMS_ON 0
#define DRM_MODE_DPMS_STANDBY 1
#define DRM_MODE_DPMS_SUSPEND 2
#define DRM_MODE_DPMS_OFF 3
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
/* ---- Minimal DRM structures ---- */
struct drm_device {
void *dev_private;
struct device *dev;
};
struct drm_file {
int filp;
};
struct drm_mode_object {
int id;
int type;
};
/* ---- DRM logging helpers ---- */
#define drm_dbg_core(dev, fmt, ...) /* noop */
#define drm_dbg_kms(dev, fmt, ...) /* noop */
#define drm_err(dev, fmt, ...) fprintf(stderr, "[drm ERR] " fmt, ##__VA_ARGS__)
#define drm_info(dev, fmt, ...) fprintf(stderr, "[drm INFO] " fmt, ##__VA_ARGS__)
#define drm_warn(dev, fmt, ...) fprintf(stderr, "[drm WARN] " fmt, ##__VA_ARGS__)
#define drm_dbg(dev, fmt, ...) /* noop */
#endif /* _REDOX_GLUE_H */
@@ -0,0 +1,380 @@
#include "redox_glue.h"
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
unsigned long jiffies;
struct redox_mapped_region {
void *addr;
size_t size;
int fd;
struct redox_mapped_region *next;
};
static pthread_mutex_t g_region_lock = PTHREAD_MUTEX_INITIALIZER;
static struct redox_mapped_region *g_regions;
static void redox_jiffies_advance(unsigned long delta)
{
__sync_add_and_fetch(&jiffies, delta);
}
void *kmalloc(size_t size, unsigned int flags)
{
(void)flags;
return malloc(size);
}
void *kzalloc(size_t size, unsigned int flags)
{
(void)flags;
return calloc(1, size);
}
void kfree(const void *ptr)
{
free((void *)ptr);
}
void *vmalloc(unsigned long size)
{
return malloc((size_t)size);
}
void vfree(const void *addr)
{
free((void *)addr);
}
void *krealloc(const void *ptr, size_t new_size, unsigned int flags)
{
(void)flags;
return realloc((void *)ptr, new_size);
}
static void redox_track_region(void *addr, size_t size, int fd)
{
struct redox_mapped_region *region = malloc(sizeof(*region));
if (!region) {
if (fd >= 0) {
close(fd);
}
return;
}
region->addr = addr;
region->size = size;
region->fd = fd;
pthread_mutex_lock(&g_region_lock);
region->next = g_regions;
g_regions = region;
pthread_mutex_unlock(&g_region_lock);
}
static struct redox_mapped_region *redox_untrack_region(const void *addr)
{
struct redox_mapped_region *prev = NULL;
struct redox_mapped_region *cur;
pthread_mutex_lock(&g_region_lock);
cur = g_regions;
while (cur) {
if (cur->addr == addr) {
if (prev) {
prev->next = cur->next;
} else {
g_regions = cur->next;
}
pthread_mutex_unlock(&g_region_lock);
return cur;
}
prev = cur;
cur = cur->next;
}
pthread_mutex_unlock(&g_region_lock);
return NULL;
}
void __iomem *redox_ioremap(phys_addr_t offset, size_t size)
{
int fd = open("/scheme/memory/physical", O_RDWR);
void *addr;
if (fd >= 0) {
addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)offset);
if (addr != MAP_FAILED) {
redox_track_region(addr, size, fd);
return addr;
}
close(fd);
}
addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
pr_err("ioremap fallback failed for %#llx (%zu bytes): %s\n",
(unsigned long long)offset, size, strerror(errno));
return NULL;
}
memset(addr, 0, size);
redox_track_region(addr, size, -1);
return addr;
}
void redox_iounmap(void __iomem *addr)
{
struct redox_mapped_region *region;
if (!addr) {
return;
}
region = redox_untrack_region(addr);
if (!region) {
return;
}
munmap(region->addr, region->size);
if (region->fd >= 0) {
close(region->fd);
}
free(region);
}
void redox_iowrite32(u32 val, void __iomem *addr)
{
*(volatile u32 *)addr = val;
}
u32 redox_ioread32(const void __iomem *addr)
{
return *(volatile const u32 *)addr;
}
void redox_iowrite16(u16 val, void __iomem *addr)
{
*(volatile u16 *)addr = val;
}
u16 redox_ioread16(const void __iomem *addr)
{
return *(volatile const u16 *)addr;
}
void redox_iowrite8(u8 val, void __iomem *addr)
{
*(volatile u8 *)addr = val;
}
u8 redox_ioread8(const void __iomem *addr)
{
return *(volatile const u8 *)addr;
}
void redox_mmio_write32(void *base, u32 offset, u32 val)
{
if (!base) {
return;
}
*(volatile u32 *)((u8 *)base + offset) = val;
}
u32 redox_mmio_read32(void *base, u32 offset)
{
if (!base) {
return 0;
}
return *(volatile u32 *)((u8 *)base + offset);
}
void *redox_dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
{
void *ptr = NULL;
if (posix_memalign(&ptr, PAGE_SIZE, PAGE_ALIGN(size)) != 0) {
return NULL;
}
memset(ptr, 0, PAGE_ALIGN(size));
if (dma_handle) {
*dma_handle = (dma_addr_t)(uintptr_t)ptr;
}
return ptr;
}
void redox_dma_free_coherent(size_t size, void *vaddr, dma_addr_t dma_handle)
{
(void)size;
(void)dma_handle;
free(vaddr);
}
struct pci_dev *redox_pci_find_amd_gpu(void)
{
static struct pci_dev dev = {
.vendor = 0x1002,
.device = 0,
.revision = 0,
.irq = 0,
.resource_start = {0},
.resource_len = {0},
.resource_flags = {IORESOURCE_MEM, 0, 0, 0, 0, 0},
.driver_data = NULL,
.mmio_base = NULL,
.is_amdgpu = 1,
};
return &dev;
}
void redox_pci_dev_put(struct pci_dev *pdev)
{
(void)pdev;
}
int redox_pci_enable_device(struct pci_dev *pdev)
{
return pdev ? 0 : -ENODEV;
}
void redox_pci_set_master(struct pci_dev *pdev)
{
(void)pdev;
}
int redox_pci_request_regions(struct pci_dev *pdev, const char *name)
{
(void)name;
return pdev ? 0 : -ENODEV;
}
void redox_pci_release_regions(struct pci_dev *pdev)
{
(void)pdev;
}
int redox_request_firmware(const struct firmware **fw, const char *name, void *dev)
{
char path[512];
int fd;
struct stat st;
struct firmware *image;
u8 *data;
ssize_t nread;
(void)dev;
if (!fw || !name) {
return -EINVAL;
}
snprintf(path, sizeof(path), "/scheme/firmware/amdgpu/%s", name);
fd = open(path, O_RDONLY);
if (fd < 0) {
return -ENOENT;
}
if (fstat(fd, &st) != 0 || st.st_size < 0) {
close(fd);
return -EIO;
}
image = calloc(1, sizeof(*image));
data = malloc((size_t)st.st_size);
if (!image || !data) {
free(image);
free(data);
close(fd);
return -ENOMEM;
}
nread = read(fd, data, (size_t)st.st_size);
close(fd);
if (nread != st.st_size) {
free(image);
free(data);
return -EIO;
}
image->size = (size_t)st.st_size;
image->data = data;
*fw = image;
return 0;
}
void redox_release_firmware(const struct firmware *fw)
{
struct firmware *owned = (struct firmware *)fw;
if (!owned) {
return;
}
free((void *)owned->data);
free(owned);
}
int redox_request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev)
{
char path[128];
int fd;
(void)handler;
(void)flags;
(void)name;
(void)dev;
snprintf(path, sizeof(path), "/scheme/irq/%u", irq);
fd = open(path, O_RDWR);
if (fd < 0) {
return -ENOENT;
}
close(fd);
return 0;
}
void redox_free_irq(unsigned int irq, void *dev_id)
{
(void)irq;
(void)dev_id;
}
void msleep(unsigned int msecs)
{
struct timespec ts;
ts.tv_sec = msecs / 1000U;
ts.tv_nsec = (long)(msecs % 1000U) * 1000000L;
nanosleep(&ts, NULL);
redox_jiffies_advance(msecs_to_jiffies(msecs));
}
void udelay(unsigned long usecs)
{
struct timespec ts;
ts.tv_sec = usecs / 1000000UL;
ts.tv_nsec = (long)(usecs % 1000000UL) * 1000L;
nanosleep(&ts, NULL);
redox_jiffies_advance(usecs_to_jiffies((unsigned int)usecs));
}
void mdelay(unsigned long msecs)
{
msleep((unsigned int)msecs);
}
unsigned long msecs_to_jiffies(unsigned int msecs)
{
return (unsigned long)msecs;
}
unsigned long usecs_to_jiffies(unsigned int usecs)
{
return (unsigned long)DIV_ROUND_UP(usecs, 1000U);
}