Red Bear OS — microkernel OS in Rust, based on Redox
Derivative of Redox OS (https://www.redox-os.org) adding: - AMD GPU driver (amdgpu) via LinuxKPI compat layer - ext4 filesystem support (ext4d scheme daemon) - ACPI fixes for AMD bare metal (x2APIC, DMAR, IVRS, MCFG) - Custom branding (hostname, os-release, boot identity) Build system is full upstream Redox with RBOS overlay in local/. Patches for kernel, base, and relibc are symlinked from local/patches/ and protected from make clean/distclean. Custom recipes live in local/recipes/ with symlinks into the recipes/ search path. Build: make all CONFIG_NAME=redbear-full Sync: ./local/scripts/sync-upstream.sh
This commit is contained in:
@@ -0,0 +1,141 @@
|
||||
# AMD GPU driver port for Redox OS — Phase P2: Display Core (modesetting only)
|
||||
# Scope: AMD DC modesetting, connector detection, EDID, CRTC programming.
|
||||
# Full amdgpu (acceleration, compute, video decode) is Phase P5.
|
||||
|
||||
[source]
|
||||
# Local overlay recipe. The extracted Linux 7.0-rc7 AMDGPU tree lives next to this
|
||||
# recipe at ../amdgpu-source and is referenced by the custom build script below.
|
||||
path = "source"
|
||||
|
||||
[build]
|
||||
template = "custom"
|
||||
dependencies = [
|
||||
"redox-driver-sys",
|
||||
"linux-kpi",
|
||||
"firmware-loader",
|
||||
]
|
||||
script = """
|
||||
DYNAMIC_INIT
|
||||
|
||||
# Paths
|
||||
AMD_ROOT="${COOKBOOK_SOURCE}/../amdgpu-source/gpu/drm/amd"
|
||||
AMD_SRC="${AMD_ROOT}"
|
||||
TTM_SRC="${COOKBOOK_SOURCE}/../amdgpu-source/gpu/drm/ttm"
|
||||
INCLUDES="${COOKBOOK_SOURCE}/../amdgpu-source/include"
|
||||
LINUX_KPI="${COOKBOOK_SYSROOT}/include/linux-kpi"
|
||||
REDOX_GLUE="${COOKBOOK_SOURCE}"
|
||||
TARGET_CC="${COOKBOOK_TARGET}-gcc"
|
||||
|
||||
# Compiler flags for AMD driver
|
||||
export CFLAGS="-D__redox__ -D__KERNEL__ -DCONFIG_DRM_AMDGPU -DCONFIG_DRM_AMD_DC \
|
||||
-DCONFIG_DRM_AMD_DC_FP -DCONFIG_DRM_AMD_ACP \
|
||||
-I${LINUX_KPI} \
|
||||
-I${REDOX_GLUE} \
|
||||
-I${INCLUDES} \
|
||||
-I${INCLUDES}/drm \
|
||||
-I${AMD_SRC}/include \
|
||||
-I${AMD_SRC}/include/asic_reg \
|
||||
-I${AMD_SRC}/display \
|
||||
-I${AMD_SRC}/display/dc \
|
||||
-I${AMD_SRC}/display/dc/dml \
|
||||
-I${AMD_SRC}/display/dc/dcn20 \
|
||||
-I${AMD_SRC}/display/dc/dcn21 \
|
||||
-I${AMD_SRC}/display/dc/dcn30 \
|
||||
-I${AMD_SRC}/display/dc/dcn301 \
|
||||
-I${AMD_SRC}/display/dc/dcn31 \
|
||||
-I${AMD_SRC}/display/dc/dcn32 \
|
||||
-I${AMD_SRC}/display/dc/dcn35 \
|
||||
-I${AMD_SRC}/display/dc/dml2 \
|
||||
-I${AMD_SRC}/display/dmub \
|
||||
-I${AMD_SRC}/display/modules \
|
||||
-I${AMD_SRC}/display/modules/freesync \
|
||||
-I${AMD_SRC}/display/modules/color \
|
||||
-I${AMD_SRC}/display/modules/info_packet \
|
||||
-I${AMD_SRC}/display/modules/power \
|
||||
-I${AMD_SRC}/pm/swsmu \
|
||||
-I${AMD_SRC}/pm/swsmu/inc \
|
||||
-I${AMD_SRC}/pm/powerplay \
|
||||
-I${AMD_SRC}/pm/powerplay/inc \
|
||||
-I${AMD_SRC}/pm/powerplay/hwmgr \
|
||||
-fPIC -O2 -Wall -Wno-unused-function -Wno-unused-variable \
|
||||
-Wno-address-of-packed-member -Wno-initializer-overrides"
|
||||
|
||||
# Stage 1: Compile Redox glue code
|
||||
"${TARGET_CC}" -c ${CFLAGS} "${REDOX_GLUE}/amdgpu_redox_main.c" -o amdgpu_redox_main.o
|
||||
"${TARGET_CC}" -c ${CFLAGS} "${REDOX_GLUE}/redox_stubs.c" -o redox_stubs.o
|
||||
|
||||
# Stage 2: Compile AMD Display Core (DC) — display pipeline only
|
||||
# Each file MUST compile. Any failure is a hard error.
|
||||
success=0
|
||||
failed=0
|
||||
find "${AMD_SRC}/display/" -name '*.c' | grep -v '/dml2/' | grep -v '/dml/' | while read -r src; do
|
||||
obj=$(basename "${src%.c}.o")
|
||||
if "${TARGET_CC}" -c ${CFLAGS} "$src" -o "$obj" 2>"${obj}.log"; then
|
||||
success=$((success + 1))
|
||||
else
|
||||
failed=$((failed + 1))
|
||||
echo "ERROR: failed to compile $(basename $src)"
|
||||
cat "${obj}.log"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "Stage 2: AMD DC compiled ${success} files, ${failed} failed"
|
||||
|
||||
# Stage 3: Compile TTM memory manager
|
||||
success=0
|
||||
failed=0
|
||||
find "${TTM_SRC}/" -name '*.c' | while read -r src; do
|
||||
obj=$(basename "${src%.c}.o")
|
||||
if "${TARGET_CC}" -c ${CFLAGS} "$src" -o "$obj" 2>"${obj}.log"; then
|
||||
success=$((success + 1))
|
||||
else
|
||||
failed=$((failed + 1))
|
||||
echo "ERROR: failed to compile $(basename $src)"
|
||||
cat "${obj}.log"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "Stage 3: TTM compiled ${success} files, ${failed} failed"
|
||||
|
||||
# Stage 4: Compile minimal amdgpu core (enough for display init)
|
||||
CORE_SRCS="amdgpu_device.c amdgpu_drv.c amdgpu_i2c.c amdgpu_atombios.c \
|
||||
amdgpu_atombios_crtc.c amdgpu_bios.c amdgpu_mode.c amdgpu_display.c \
|
||||
amdgpu_fb.c amdgpu_gem.c amdgpu_object.c amdgpu_gmc.c amdgpu_mmhub.c \
|
||||
amdgpu_irq.c amdgpu_ring.c amdgpu_fence.c amdgpu_ttm.c amdgpu_bo_list.c"
|
||||
|
||||
success=0
|
||||
failed=0
|
||||
for src_name in $CORE_SRCS; do
|
||||
src="${AMD_SRC}/amdgpu/${src_name}"
|
||||
if [ -f "$src" ]; then
|
||||
obj="${src_name%.c}.o"
|
||||
if "${TARGET_CC}" -c ${CFLAGS} "$src" -o "$obj" 2>"${obj}.log"; then
|
||||
success=$((success + 1))
|
||||
else
|
||||
failed=$((failed + 1))
|
||||
echo "ERROR: failed to compile $src_name"
|
||||
cat "${obj}.log"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
echo "Stage 4: amdgpu core compiled ${success} files, ${failed} failed"
|
||||
|
||||
# Stage 5: Link into shared library
|
||||
OBJS=""
|
||||
for obj in $(find . -name '*.o' -size +0c); do
|
||||
OBJS="$OBJS $obj"
|
||||
done
|
||||
if [ -z "$OBJS" ]; then
|
||||
echo "ERROR: no object files compiled successfully"
|
||||
exit 1
|
||||
fi
|
||||
"${TARGET_CC}" -shared -o libamdgpu_dc_redox.so $OBJS \
|
||||
-lredox_driver_sys -llinux_kpi -lm -lpthread
|
||||
|
||||
# Install
|
||||
mkdir -p "${COOKBOOK_STAGE}/usr/lib/redox/drivers"
|
||||
cp libamdgpu_dc_redox.so "${COOKBOOK_STAGE}/usr/lib/redox/drivers/"
|
||||
mkdir -p "${COOKBOOK_STAGE}/usr/include/amdgpu-redox"
|
||||
cp "${REDOX_GLUE}/redox_glue.h" "${COOKBOOK_STAGE}/usr/include/amdgpu-redox/"
|
||||
"""
|
||||
@@ -0,0 +1,126 @@
|
||||
CC = x86_64-unknown-redox-gcc
|
||||
AR = x86_64-unknown-redox-ar
|
||||
|
||||
AMDGPU_SRC ?= ../amdgpu-source/gpu/drm/amd
|
||||
TTM_SRC ?= ../amdgpu-source/gpu/drm/ttm
|
||||
AMDGPU_INCLUDES ?= ../amdgpu-source/include
|
||||
LINUX_KPI ?= ../../drivers/linux-kpi/src/c_headers
|
||||
|
||||
CFLAGS ?= -D__redox__ -D__KERNEL__ -DCONFIG_DRM_AMDGPU -DCONFIG_DRM_AMD_DC \
|
||||
-DCONFIG_DRM_AMD_DC_FP -DCONFIG_DRM_AMD_ACP \
|
||||
-I$(LINUX_KPI) \
|
||||
-I. \
|
||||
-I$(AMDGPU_INCLUDES) \
|
||||
-I$(AMDGPU_INCLUDES)/drm \
|
||||
-I$(AMDGPU_SRC)/include \
|
||||
-I$(AMDGPU_SRC)/include/asic_reg \
|
||||
-I$(AMDGPU_SRC)/display \
|
||||
-I$(AMDGPU_SRC)/display/dc \
|
||||
-I$(AMDGPU_SRC)/display/dc/dml \
|
||||
-I$(AMDGPU_SRC)/display/dc/dcn20 \
|
||||
-I$(AMDGPU_SRC)/display/dc/dcn21 \
|
||||
-I$(AMDGPU_SRC)/display/dc/dcn30 \
|
||||
-I$(AMDGPU_SRC)/display/dc/dcn301 \
|
||||
-I$(AMDGPU_SRC)/display/dc/dcn31 \
|
||||
-I$(AMDGPU_SRC)/display/dc/dcn32 \
|
||||
-I$(AMDGPU_SRC)/display/dc/dcn35 \
|
||||
-I$(AMDGPU_SRC)/display/dc/dml2 \
|
||||
-I$(AMDGPU_SRC)/display/dmub \
|
||||
-I$(AMDGPU_SRC)/display/modules \
|
||||
-I$(AMDGPU_SRC)/display/modules/freesync \
|
||||
-I$(AMDGPU_SRC)/display/modules/color \
|
||||
-I$(AMDGPU_SRC)/display/modules/info_packet \
|
||||
-I$(AMDGPU_SRC)/display/modules/power \
|
||||
-I$(AMDGPU_SRC)/pm/swsmu \
|
||||
-I$(AMDGPU_SRC)/pm/swsmu/inc \
|
||||
-I$(AMDGPU_SRC)/pm/powerplay \
|
||||
-I$(AMDGPU_SRC)/pm/powerplay/inc \
|
||||
-I$(AMDGPU_SRC)/pm/powerplay/hwmgr \
|
||||
-fPIC -O2 -Wall -Wno-unused-function -Wno-unused-variable \
|
||||
-Wno-address-of-packed-member -Wno-initializer-overrides
|
||||
|
||||
LDFLAGS ?= -shared
|
||||
LDLIBS ?= -lredox_driver_sys -llinux_kpi -lm -lpthread
|
||||
|
||||
GLUE_OBJS := redox_stubs.o amdgpu_redox_main.o
|
||||
CORE_SRCS := \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_device.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_drv.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_i2c.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_atombios.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_atombios_crtc.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_bios.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_mode.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_display.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_fb.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_gem.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_object.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_gmc.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_mmhub.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_irq.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_ring.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_fence.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_ttm.c \
|
||||
$(AMDGPU_SRC)/amdgpu/amdgpu_bo_list.c
|
||||
CORE_OBJS := $(patsubst %.c,%.o,$(notdir $(CORE_SRCS)))
|
||||
DISPLAY_SRCS := $(shell find $(AMDGPU_SRC)/display -name '*.c' ! -path '*/dml/*' ! -path '*/dml2/*')
|
||||
DISPLAY_OBJS := $(patsubst %.c,%.o,$(notdir $(DISPLAY_SRCS)))
|
||||
TTM_SRCS := $(shell find $(TTM_SRC) -name '*.c')
|
||||
TTM_OBJS := $(patsubst %.c,%.o,$(notdir $(TTM_SRCS)))
|
||||
|
||||
ALL_OBJS := $(GLUE_OBJS) $(DISPLAY_OBJS) $(TTM_OBJS) $(CORE_OBJS)
|
||||
|
||||
.PHONY: all clean check display core ttm
|
||||
|
||||
all: libamdgpu_dc_redox.so
|
||||
|
||||
libamdgpu_dc_redox.so: $(GLUE_OBJS)
|
||||
@set -e; \
|
||||
success=0; failed=0; \
|
||||
for src in $(DISPLAY_SRCS); do \
|
||||
obj=$$(basename "$${src%.c}.o"); \
|
||||
if $(CC) -c $(CFLAGS) "$$src" -o "$$obj"; then \
|
||||
success=$$((success + 1)); \
|
||||
else \
|
||||
failed=$$((failed + 1)); \
|
||||
echo "ERROR: failed to compile $$src"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
done; \
|
||||
for src in $(TTM_SRCS); do \
|
||||
obj=$$(basename "$${src%.c}.o"); \
|
||||
if $(CC) -c $(CFLAGS) "$$src" -o "$$obj"; then \
|
||||
success=$$((success + 1)); \
|
||||
else \
|
||||
failed=$$((failed + 1)); \
|
||||
echo "ERROR: failed to compile $$src"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
done; \
|
||||
for src in $(CORE_SRCS); do \
|
||||
if [ -f "$$src" ]; then \
|
||||
obj=$$(basename "$${src%.c}.o"); \
|
||||
if $(CC) -c $(CFLAGS) "$$src" -o "$$obj"; then \
|
||||
success=$$((success + 1)); \
|
||||
else \
|
||||
failed=$$((failed + 1)); \
|
||||
echo "ERROR: failed to compile $$src"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
fi; \
|
||||
done; \
|
||||
echo "AMD DC: compiled $$success files successfully"; \
|
||||
$(CC) $(LDFLAGS) -o $@ $$(find . -maxdepth 1 -name '*.o' -size +0c) $(LDLIBS)
|
||||
|
||||
redox_stubs.o: redox_stubs.c redox_glue.h
|
||||
$(CC) -c $(CFLAGS) $< -o $@
|
||||
|
||||
amdgpu_redox_main.o: amdgpu_redox_main.c redox_glue.h
|
||||
$(CC) -c $(CFLAGS) $< -o $@
|
||||
|
||||
check: $(GLUE_OBJS)
|
||||
$(CC) -fsyntax-only $(CFLAGS) amdgpu_redox_main.c
|
||||
$(CC) -fsyntax-only $(CFLAGS) redox_stubs.c
|
||||
|
||||
clean:
|
||||
rm -f *.o libamdgpu_dc_redox.so
|
||||
@@ -0,0 +1,427 @@
|
||||
#include "redox_glue.h"
|
||||
|
||||
/* Global state */
|
||||
static struct drm_device g_drm_dev;
|
||||
static struct device g_device;
|
||||
static struct pci_dev *g_pci_dev;
|
||||
static void __iomem *g_mmio_base;
|
||||
static size_t g_mmio_size;
|
||||
static u64 g_fb_phys;
|
||||
static size_t g_fb_size;
|
||||
static int g_asic_family = -1;
|
||||
|
||||
/* ASIC family definitions based on device IDs */
|
||||
#define ASIC_FAMILY_NAVI10 0x7310
|
||||
#define ASIC_FAMILY_NAVI14 0x7340
|
||||
#define ASIC_FAMILY_NAVI21 0x73A0
|
||||
#define ASIC_FAMILY_NAVI22 0x73C0
|
||||
#define ASIC_FAMILY_NAVI23 0x73E0
|
||||
#define ASIC_FAMILY_NAVI24 0x7420
|
||||
#define ASIC_FAMILY_NAVI31 0x7440
|
||||
#define ASIC_FAMILY_NAVI32 0x7480
|
||||
#define ASIC_FAMILY_NAVI33 0x74A0
|
||||
|
||||
#define AMDGPU_DC_HPD_STATUS_REG 0x4A00
|
||||
#define AMDGPU_DC_MAX_CONNECTORS 4
|
||||
#define AMDGPU_DC_BYTES_PER_PIXEL 4U
|
||||
#define AMDGPU_DC_PIXEL_FORMAT_ARGB8888 3U
|
||||
|
||||
#define AMDGPU_DC_OTG_CONTROL 0x00
|
||||
#define AMDGPU_DC_OTG_VIEWPORT_SIZE 0x10
|
||||
#define AMDGPU_DC_OTG_VSYNC_ADJUST 0x14
|
||||
#define AMDGPU_DC_OTG_H_TOTAL 0x18
|
||||
#define AMDGPU_DC_OTG_V_TOTAL 0x1C
|
||||
#define AMDGPU_DC_OTG_VSTARTUP 0x20
|
||||
|
||||
#define AMDGPU_DC_HUBP_PRIMARY_ADDR_LOW 0x00
|
||||
#define AMDGPU_DC_HUBP_PRIMARY_ADDR_HIGH 0x04
|
||||
#define AMDGPU_DC_HUBP_SURFACE_PITCH 0x08
|
||||
#define AMDGPU_DC_HUBP_SURFACE_CONFIG 0x0C
|
||||
#define AMDGPU_DC_HUBP_VIEWPORT_START 0x10
|
||||
#define AMDGPU_DC_HUBP_VIEWPORT_SIZE 0x14
|
||||
#define AMDGPU_DC_HUBP_FLIP_CONTROL 0x18
|
||||
#define AMDGPU_DC_HUBP_FLIP_ADDR_LOW 0x1C
|
||||
#define AMDGPU_DC_HUBP_FLIP_ADDR_HIGH 0x20
|
||||
|
||||
struct connector_info_ffi {
|
||||
int id;
|
||||
int connector_type;
|
||||
int connector_type_id;
|
||||
int connection;
|
||||
int mm_width;
|
||||
int mm_height;
|
||||
int encoder_id;
|
||||
};
|
||||
|
||||
struct amdgpu_redox_connector_desc {
|
||||
int id;
|
||||
u32 hpd_mask;
|
||||
int connector_type;
|
||||
int connector_type_id;
|
||||
int encoder_id;
|
||||
int mm_width;
|
||||
int mm_height;
|
||||
};
|
||||
|
||||
static const struct amdgpu_redox_connector_desc g_connector_descs[AMDGPU_DC_MAX_CONNECTORS] = {
|
||||
{ .id = 1, .hpd_mask = 0x01, .connector_type = 10, .connector_type_id = 1, .encoder_id = 1, .mm_width = 600, .mm_height = 340 },
|
||||
{ .id = 2, .hpd_mask = 0x02, .connector_type = 10, .connector_type_id = 2, .encoder_id = 2, .mm_width = 600, .mm_height = 340 },
|
||||
{ .id = 3, .hpd_mask = 0x04, .connector_type = 11, .connector_type_id = 3, .encoder_id = 3, .mm_width = 600, .mm_height = 340 },
|
||||
{ .id = 4, .hpd_mask = 0x08, .connector_type = 11, .connector_type_id = 4, .encoder_id = 4, .mm_width = 600, .mm_height = 340 },
|
||||
};
|
||||
|
||||
static inline void __iomem *amdgpu_dc_reg_ptr(u32 base, u32 offset)
|
||||
{
|
||||
return (u8 __iomem *)g_mmio_base + base + offset;
|
||||
}
|
||||
|
||||
static int amdgpu_dc_validate_mmio_access(u32 base, u32 offset)
|
||||
{
|
||||
u64 end = (u64)base + (u64)offset + sizeof(u32);
|
||||
|
||||
if (!g_mmio_base) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (end > g_mmio_size) {
|
||||
pr_err("amdgpu_redox: MMIO access %#x+%#x outside aperture %zu\n",
|
||||
base, offset, g_mmio_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void amdgpu_dc_write_reg(u32 base, u32 offset, u32 value)
|
||||
{
|
||||
if (amdgpu_dc_validate_mmio_access(base, offset) != 0) {
|
||||
return;
|
||||
}
|
||||
writel(value, amdgpu_dc_reg_ptr(base, offset));
|
||||
}
|
||||
|
||||
static inline u32 amdgpu_dc_read_reg(u32 base, u32 offset)
|
||||
{
|
||||
if (amdgpu_dc_validate_mmio_access(base, offset) != 0) {
|
||||
return 0;
|
||||
}
|
||||
return readl(amdgpu_dc_reg_ptr(base, offset));
|
||||
}
|
||||
|
||||
static inline u32 amdgpu_dc_hpd_status(void)
|
||||
{
|
||||
if (amdgpu_dc_validate_mmio_access(0, AMDGPU_DC_HPD_STATUS_REG) != 0) {
|
||||
return 0;
|
||||
}
|
||||
return readl((u8 __iomem *)g_mmio_base + AMDGPU_DC_HPD_STATUS_REG);
|
||||
}
|
||||
|
||||
/* Initialize AMD Display Core */
|
||||
int amdgpu_dc_init(void *mmio_base, size_t mmio_size)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 gpu_id = 0;
|
||||
const char *firmware_name = NULL;
|
||||
|
||||
printk("amdgpu_redox: initializing AMD Display Core\n");
|
||||
|
||||
if (!mmio_base || mmio_size < sizeof(u32)) {
|
||||
pr_err("amdgpu_redox: invalid MMIO for DC init\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gpu_id = readl(mmio_base);
|
||||
printk("amdgpu_redox: GPU ID = %#010x\n", gpu_id);
|
||||
|
||||
switch (gpu_id) {
|
||||
case ASIC_FAMILY_NAVI10:
|
||||
g_asic_family = ASIC_FAMILY_NAVI10;
|
||||
firmware_name = "dmcub_dcn20.bin";
|
||||
break;
|
||||
case ASIC_FAMILY_NAVI14:
|
||||
g_asic_family = ASIC_FAMILY_NAVI14;
|
||||
firmware_name = "dmcub_dcn20.bin";
|
||||
break;
|
||||
case ASIC_FAMILY_NAVI21:
|
||||
g_asic_family = ASIC_FAMILY_NAVI21;
|
||||
firmware_name = "dmcub_dcn31.bin";
|
||||
break;
|
||||
case ASIC_FAMILY_NAVI22:
|
||||
g_asic_family = ASIC_FAMILY_NAVI22;
|
||||
firmware_name = "dmcub_dcn31.bin";
|
||||
break;
|
||||
case ASIC_FAMILY_NAVI23:
|
||||
g_asic_family = ASIC_FAMILY_NAVI23;
|
||||
firmware_name = "dmcub_dcn31.bin";
|
||||
break;
|
||||
case ASIC_FAMILY_NAVI24:
|
||||
g_asic_family = ASIC_FAMILY_NAVI24;
|
||||
firmware_name = "dmcub_dcn31.bin";
|
||||
break;
|
||||
case ASIC_FAMILY_NAVI31:
|
||||
g_asic_family = ASIC_FAMILY_NAVI31;
|
||||
firmware_name = "dmcub_dcn31.bin";
|
||||
break;
|
||||
case ASIC_FAMILY_NAVI32:
|
||||
g_asic_family = ASIC_FAMILY_NAVI32;
|
||||
firmware_name = "dmcub_dcn31.bin";
|
||||
break;
|
||||
case ASIC_FAMILY_NAVI33:
|
||||
g_asic_family = ASIC_FAMILY_NAVI33;
|
||||
firmware_name = "dmcub_dcn31.bin";
|
||||
break;
|
||||
default:
|
||||
pr_warn("amdgpu_redox: unknown ASIC %#010x, using DCN31 firmware\n", gpu_id);
|
||||
g_asic_family = gpu_id;
|
||||
firmware_name = "dmcub_dcn31.bin";
|
||||
break;
|
||||
}
|
||||
|
||||
printk("amdgpu_redox: ASIC family identified, loading firmware: %s\n", firmware_name);
|
||||
|
||||
{
|
||||
const struct firmware *fw = NULL;
|
||||
int fw_ret = request_firmware(&fw, firmware_name, NULL);
|
||||
if (fw_ret != 0 || !fw) {
|
||||
pr_warn("amdgpu_redox: firmware %s not available (err=%d), continuing without\n",
|
||||
firmware_name, fw_ret);
|
||||
} else {
|
||||
printk("amdgpu_redox: firmware %s loaded (%zu bytes)\n", firmware_name, fw->size);
|
||||
release_firmware(fw);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Initialize AMD GPU hardware for display */
|
||||
int amdgpu_redox_init(void *mmio_base, size_t mmio_size, uint64_t fb_phys, size_t fb_size)
|
||||
{
|
||||
int ret;
|
||||
printk("amdgpu_redox: initializing AMD GPU display\n");
|
||||
printk("amdgpu_redox: MMIO base=%p size=%zu\n", mmio_base, mmio_size);
|
||||
printk("amdgpu_redox: FB phys=%#llx size=%zu\n", (unsigned long long)fb_phys, fb_size);
|
||||
|
||||
if (!mmio_base || mmio_size == 0) {
|
||||
pr_err("amdgpu_redox: invalid MMIO mapping provided by redox-drm\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&g_drm_dev, 0, sizeof(g_drm_dev));
|
||||
memset(&g_device, 0, sizeof(g_device));
|
||||
|
||||
g_mmio_base = mmio_base;
|
||||
g_mmio_size = mmio_size;
|
||||
g_fb_phys = fb_phys;
|
||||
g_fb_size = fb_size;
|
||||
|
||||
g_pci_dev = redox_pci_find_amd_gpu();
|
||||
if (!g_pci_dev) {
|
||||
pr_err("amdgpu_redox: no AMD PCI device available from integration layer\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
g_pci_dev->mmio_base = g_mmio_base;
|
||||
g_pci_dev->resource_start[0] = (phys_addr_t)(uintptr_t)g_mmio_base;
|
||||
g_pci_dev->resource_len[0] = g_mmio_size;
|
||||
|
||||
g_device.pci_dev = g_pci_dev;
|
||||
g_drm_dev.dev = &g_device;
|
||||
|
||||
ret = amdgpu_dc_init(mmio_base, mmio_size);
|
||||
if (ret != 0) {
|
||||
pr_err("amdgpu_redox: failed to initialize DC\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Cleanup */
|
||||
void amdgpu_redox_cleanup(void)
|
||||
{
|
||||
printk("amdgpu_redox: cleanup\n");
|
||||
if (g_pci_dev) {
|
||||
redox_pci_dev_put(g_pci_dev);
|
||||
g_pci_dev = NULL;
|
||||
}
|
||||
|
||||
g_mmio_base = NULL;
|
||||
g_mmio_size = 0;
|
||||
g_fb_phys = 0;
|
||||
g_fb_size = 0;
|
||||
memset(&g_drm_dev, 0, sizeof(g_drm_dev));
|
||||
memset(&g_device, 0, sizeof(g_device));
|
||||
}
|
||||
|
||||
/* Get connector info — called by redox-drm */
|
||||
int amdgpu_dc_detect_connectors(void)
|
||||
{
|
||||
int num_connectors = 0;
|
||||
|
||||
if (!g_mmio_base) {
|
||||
pr_err("amdgpu_redox: detect_connectors called before init\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#ifdef __redox__
|
||||
u32 hpd_status = amdgpu_dc_hpd_status();
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_DC_MAX_CONNECTORS; ++i) {
|
||||
if (hpd_status & g_connector_descs[i].hpd_mask) {
|
||||
num_connectors++;
|
||||
}
|
||||
}
|
||||
|
||||
printk("amdgpu_redox: detected %d connector(s)\n", num_connectors);
|
||||
#else
|
||||
printk("amdgpu_redox: running on Linux, using AMD DC detection\n");
|
||||
#endif
|
||||
|
||||
return num_connectors;
|
||||
}
|
||||
|
||||
/* Get connector info by index */
|
||||
int amdgpu_dc_get_connector_info(int idx, void *info)
|
||||
{
|
||||
struct connector_info_ffi *ffi_info = (struct connector_info_ffi *)info;
|
||||
|
||||
if (!g_mmio_base) {
|
||||
pr_err("amdgpu_redox: get_connector_info called before init\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (idx < 0 || !ffi_info) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef __redox__
|
||||
{
|
||||
u32 hpd_status = amdgpu_dc_hpd_status();
|
||||
int active_index = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_DC_MAX_CONNECTORS; ++i) {
|
||||
const struct amdgpu_redox_connector_desc *desc = &g_connector_descs[i];
|
||||
|
||||
if (!(hpd_status & desc->hpd_mask)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (active_index == idx) {
|
||||
ffi_info->id = desc->id;
|
||||
ffi_info->connector_type = desc->connector_type;
|
||||
ffi_info->connector_type_id = desc->connector_type_id;
|
||||
ffi_info->connection = 1;
|
||||
ffi_info->mm_width = desc->mm_width;
|
||||
ffi_info->mm_height = desc->mm_height;
|
||||
ffi_info->encoder_id = desc->encoder_id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
active_index++;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Set CRTC mode — called by redox-drm for modesetting */
|
||||
int amdgpu_dc_set_crtc(int crtc_id, uint64_t fb_addr, uint32_t width, uint32_t height)
|
||||
{
|
||||
printk("amdgpu_redox: set_crtc(%d, fb=%#llx, %ux%u)\n",
|
||||
crtc_id,
|
||||
(unsigned long long)fb_addr,
|
||||
width,
|
||||
height);
|
||||
|
||||
if (!g_mmio_base) {
|
||||
pr_err("amdgpu_redox: set_crtc called before amdgpu_redox_init\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#ifdef __redox__
|
||||
const u32 bytes_per_pixel = AMDGPU_DC_BYTES_PER_PIXEL;
|
||||
u32 pitch;
|
||||
u32 viewport_size;
|
||||
const u32 h_total = width + 160U;
|
||||
const u32 v_total = height + 45U;
|
||||
const u32 v_sync_start = height + 3U;
|
||||
const u32 v_sync_end = v_sync_start + 5U;
|
||||
const u32 v_sync_adjust = (v_sync_start & 0xFFFFU) | (v_sync_end << 16);
|
||||
const u32 vstartup = v_sync_start > 1U ? (v_sync_start - 1U) : 0U;
|
||||
u64 required_bytes;
|
||||
|
||||
if (crtc_id < 0 || crtc_id > 3) {
|
||||
pr_err("amdgpu_redox: invalid crtc_id %d\n", crtc_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (width == 0 || height == 0 || width > 0xFFFFU || height > 0xFFFFU) {
|
||||
pr_err("amdgpu_redox: invalid mode %ux%u\n", width, height);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (width > (UINT32_MAX / bytes_per_pixel)) {
|
||||
pr_err("amdgpu_redox: pitch overflow for width %u\n", width);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pitch = width * bytes_per_pixel;
|
||||
viewport_size = (width & 0xFFFFU) | (height << 16);
|
||||
required_bytes = (u64)pitch * (u64)height;
|
||||
|
||||
/* The Rust-side allocates scanout buffers via GTT VA space (0..256MiB).
|
||||
* The display controller programs these GPU-virtual addresses directly;
|
||||
* the GTT hardware translates them to physical backing pages at runtime.
|
||||
* Validate only that the address + size fits in a u64 and that the
|
||||
* programmed registers can hold the values. */
|
||||
if (required_bytes == 0) {
|
||||
pr_err("amdgpu_redox: zero-sized framebuffer for crtc %d\n", crtc_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
u32 otg_base = 0x4800 + (crtc_id * 0x800);
|
||||
u32 hubp_base = 0x5800 + (crtc_id * 0x400);
|
||||
u32 otg_control;
|
||||
|
||||
if (amdgpu_dc_validate_mmio_access(otg_base, AMDGPU_DC_OTG_VSTARTUP) != 0 ||
|
||||
amdgpu_dc_validate_mmio_access(hubp_base, AMDGPU_DC_HUBP_FLIP_ADDR_HIGH) != 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
otg_control = amdgpu_dc_read_reg(otg_base, AMDGPU_DC_OTG_CONTROL);
|
||||
otg_control &= ~0x01U;
|
||||
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_CONTROL, otg_control);
|
||||
mb();
|
||||
|
||||
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_PRIMARY_ADDR_LOW, (u32)(fb_addr & 0xFFFFFFFFULL));
|
||||
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_PRIMARY_ADDR_HIGH, (u32)((fb_addr >> 32) & 0xFFFFFFFFULL));
|
||||
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_SURFACE_PITCH, pitch);
|
||||
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_SURFACE_CONFIG, AMDGPU_DC_PIXEL_FORMAT_ARGB8888);
|
||||
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_VIEWPORT_START, 0);
|
||||
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_VIEWPORT_SIZE, viewport_size);
|
||||
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_FLIP_ADDR_LOW, (u32)(fb_addr & 0xFFFFFFFFULL));
|
||||
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_FLIP_ADDR_HIGH, (u32)((fb_addr >> 32) & 0xFFFFFFFFULL));
|
||||
amdgpu_dc_write_reg(hubp_base, AMDGPU_DC_HUBP_FLIP_CONTROL, 0);
|
||||
|
||||
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_VIEWPORT_SIZE, viewport_size);
|
||||
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_VSYNC_ADJUST, v_sync_adjust);
|
||||
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_H_TOTAL, h_total);
|
||||
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_V_TOTAL, v_total);
|
||||
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_VSTARTUP, vstartup);
|
||||
mb();
|
||||
|
||||
otg_control |= 0x01;
|
||||
amdgpu_dc_write_reg(otg_base, AMDGPU_DC_OTG_CONTROL, otg_control);
|
||||
|
||||
printk("amdgpu_redox: CRTC %d enabled at %ux%u, fb=%#llx\n",
|
||||
crtc_id, width, height, (unsigned long long)fb_addr);
|
||||
#else
|
||||
printk("amdgpu_redox: running on Linux, using AMD DC modesetting\n");
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,548 @@
|
||||
#ifndef _REDOX_GLUE_H
|
||||
#define _REDOX_GLUE_H
|
||||
|
||||
/*
|
||||
* Redox-specific Linux compatibility surface for the AMDGPU display port.
|
||||
* The real build enables this via -D__redox__, but the declarations stay
|
||||
* visible unconditionally so editor/LSP diagnostics can parse the sources.
|
||||
*/
|
||||
|
||||
/* ---- Standard types ---- */
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
|
||||
#ifndef __iomem
|
||||
#define __iomem
|
||||
#endif
|
||||
|
||||
#ifndef __user
|
||||
#define __user
|
||||
#endif
|
||||
|
||||
#ifndef __force
|
||||
#define __force
|
||||
#endif
|
||||
|
||||
#ifndef __must_check
|
||||
#define __must_check
|
||||
#endif
|
||||
|
||||
typedef uint8_t u8;
|
||||
typedef uint16_t u16;
|
||||
typedef uint32_t u32;
|
||||
typedef uint64_t u64;
|
||||
typedef int8_t s8;
|
||||
typedef int16_t s16;
|
||||
typedef int32_t s32;
|
||||
typedef int64_t s64;
|
||||
|
||||
typedef unsigned long ulong;
|
||||
typedef unsigned long long ullong;
|
||||
typedef unsigned int uint;
|
||||
typedef size_t phys_addr_t;
|
||||
typedef u64 dma_addr_t;
|
||||
typedef u32 __be32;
|
||||
typedef u16 __be16;
|
||||
typedef u32 __le32;
|
||||
typedef u16 __le16;
|
||||
typedef unsigned int gfp_t;
|
||||
|
||||
/* ---- Kernel replacements ---- */
|
||||
#define GFP_KERNEL 0U
|
||||
#define GFP_ATOMIC 1U
|
||||
#define GFP_DMA32 2U
|
||||
#define GFP_NOWAIT 3U
|
||||
#define GFP_KERNEL_ACCOUNT 0U
|
||||
|
||||
extern void *kmalloc(size_t size, unsigned int flags);
|
||||
extern void *kzalloc(size_t size, unsigned int flags);
|
||||
extern void kfree(const void *ptr);
|
||||
extern void *vmalloc(unsigned long size);
|
||||
extern void vfree(const void *addr);
|
||||
extern void *krealloc(const void *ptr, size_t new_size, unsigned int flags);
|
||||
|
||||
/* printk → stderr */
|
||||
#define printk(fmt, ...) fprintf(stderr, "[amdgpu] " fmt, ##__VA_ARGS__)
|
||||
#define pr_err(fmt, ...) fprintf(stderr, "[amdgpu ERR] " fmt, ##__VA_ARGS__)
|
||||
#define pr_warn(fmt, ...) fprintf(stderr, "[amdgpu WARN] " fmt, ##__VA_ARGS__)
|
||||
#define pr_info(fmt, ...) fprintf(stderr, "[amdgpu INFO] " fmt, ##__VA_ARGS__)
|
||||
#define pr_debug(fmt, ...) fprintf(stderr, "[amdgpu DBG] " fmt, ##__VA_ARGS__)
|
||||
#define dev_err(dev, fmt, ...) fprintf(stderr, "[amdgpu ERR] " fmt, ##__VA_ARGS__)
|
||||
#define dev_warn(dev, fmt, ...) fprintf(stderr, "[amdgpu WARN] " fmt, ##__VA_ARGS__)
|
||||
#define dev_info(dev, fmt, ...) fprintf(stderr, "[amdgpu INFO] " fmt, ##__VA_ARGS__)
|
||||
#define dev_dbg(dev, fmt, ...) fprintf(stderr, "[amdgpu DBG] " fmt, ##__VA_ARGS__)
|
||||
|
||||
/* ---- Module system replacement ---- */
|
||||
#define module_init(fn) /* noop */
|
||||
#define module_exit(fn) /* noop */
|
||||
#define module_param(name, type, perm) /* noop */
|
||||
#define MODULE_PARM_DESC(name, desc) /* noop */
|
||||
#define MODULE_LICENSE(license) /* noop */
|
||||
#define MODULE_AUTHOR(author) /* noop */
|
||||
#define MODULE_DESCRIPTION(desc) /* noop */
|
||||
#define MODULE_DEVICE_TABLE(type, table) /* noop */
|
||||
#define EXPORT_SYMBOL(sym) /* noop */
|
||||
#define EXPORT_SYMBOL_GPL(sym) /* noop */
|
||||
#define MODULE_FIRMWARE(fw) /* noop */
|
||||
#define THIS_MODULE NULL
|
||||
|
||||
/* ---- Atomic operations ---- */
|
||||
typedef struct {
|
||||
volatile int counter;
|
||||
} atomic_t;
|
||||
|
||||
typedef struct {
|
||||
volatile long counter;
|
||||
} atomic_long_t;
|
||||
|
||||
typedef struct {
|
||||
volatile u64 counter;
|
||||
} atomic64_t;
|
||||
|
||||
#define atomic_read(v) ((v)->counter)
|
||||
#define atomic_set(v, i) ((v)->counter = (i))
|
||||
#define atomic_inc(v) __sync_add_and_fetch(&(v)->counter, 1)
|
||||
#define atomic_dec(v) __sync_sub_and_fetch(&(v)->counter, 1)
|
||||
#define atomic_add(i, v) __sync_add_and_fetch(&(v)->counter, (i))
|
||||
#define atomic_sub(i, v) __sync_sub_and_fetch(&(v)->counter, (i))
|
||||
#define atomic_inc_return(v) __sync_add_and_fetch(&(v)->counter, 1)
|
||||
#define atomic_dec_return(v) __sync_sub_and_fetch(&(v)->counter, 1)
|
||||
#define atomic_cmpxchg(v, oldv, newv) __sync_val_compare_and_swap(&(v)->counter, (oldv), (newv))
|
||||
|
||||
/* ---- Locking ---- */
|
||||
typedef pthread_mutex_t mutex_t;
|
||||
#define DEFINE_MUTEX(name) pthread_mutex_t name = PTHREAD_MUTEX_INITIALIZER
|
||||
#define mutex_init(m) pthread_mutex_init((m), NULL)
|
||||
#define mutex_lock(m) pthread_mutex_lock((m))
|
||||
#define mutex_unlock(m) pthread_mutex_unlock((m))
|
||||
#define mutex_destroy(m) pthread_mutex_destroy((m))
|
||||
#define mutex_is_locked(m) (pthread_mutex_trylock((m)) != 0)
|
||||
|
||||
typedef struct {
|
||||
volatile int lock;
|
||||
} spinlock_t;
|
||||
|
||||
#define spin_lock_init(l) ((l)->lock = 0)
|
||||
#define spin_lock(l) while (__sync_lock_test_and_set(&(l)->lock, 1)) {}
|
||||
#define spin_unlock(l) __sync_lock_release(&(l)->lock)
|
||||
#define spin_lock_irqsave(l, flags) do { (flags) = 0; spin_lock((l)); } while (0)
|
||||
#define spin_unlock_irqrestore(l, flags) do { (void)(flags); spin_unlock((l)); } while (0)
|
||||
#define spin_lock_irq(l) spin_lock((l))
|
||||
#define spin_unlock_irq(l) spin_unlock((l))
|
||||
|
||||
/* ---- Power management stubs ---- */
|
||||
#define pm_runtime_get_sync(dev) 0
|
||||
#define pm_runtime_put_autosuspend(dev) 0
|
||||
#define pm_runtime_allow(dev) 0
|
||||
#define pm_runtime_forbid(dev) 0
|
||||
#define pm_runtime_set_active(dev) 0
|
||||
#define pm_runtime_enable(dev) 0
|
||||
#define pm_runtime_disable(dev) 0
|
||||
#define pm_runtime_idle(dev) 0
|
||||
#define pm_runtime_put_noidle(dev) 0
|
||||
#define pm_runtime_get_noresume(dev) 0
|
||||
#define pm_suspend_ignore_children(dev, enable) /* noop */
|
||||
|
||||
/* ---- I/O memory — maps to redox-driver-sys MmioRegion ---- */
|
||||
extern void __iomem *redox_ioremap(phys_addr_t offset, size_t size);
|
||||
extern void redox_iounmap(void __iomem *addr);
|
||||
extern void redox_iowrite32(u32 val, void __iomem *addr);
|
||||
extern u32 redox_ioread32(const void __iomem *addr);
|
||||
extern void redox_iowrite16(u16 val, void __iomem *addr);
|
||||
extern u16 redox_ioread16(const void __iomem *addr);
|
||||
extern void redox_iowrite8(u8 val, void __iomem *addr);
|
||||
extern u8 redox_ioread8(const void __iomem *addr);
|
||||
extern void redox_mmio_write32(void *base, u32 offset, u32 val);
|
||||
extern u32 redox_mmio_read32(void *base, u32 offset);
|
||||
|
||||
#define ioremap(offset, size) redox_ioremap((offset), (size))
|
||||
#define ioremap_wc(offset, size) redox_ioremap((offset), (size))
|
||||
#define ioremap_np(offset, size) redox_ioremap((offset), (size))
|
||||
#define iounmap(addr) redox_iounmap((addr))
|
||||
#define iowrite32(val, addr) redox_iowrite32((val), (addr))
|
||||
#define ioread32(addr) redox_ioread32((addr))
|
||||
#define iowrite16(val, addr) redox_iowrite16((val), (addr))
|
||||
#define ioread16(addr) redox_ioread16((addr))
|
||||
#define iowrite8(val, addr) redox_iowrite8((val), (addr))
|
||||
#define ioread8(addr) redox_ioread8((addr))
|
||||
|
||||
#define writel(val, addr) (*(volatile u32 *)(addr) = (val))
|
||||
#define readl(addr) (*(volatile const u32 *)(addr))
|
||||
#define writew(val, addr) (*(volatile u16 *)(addr) = (val))
|
||||
#define readw(addr) (*(volatile const u16 *)(addr))
|
||||
#define writeb(val, addr) (*(volatile u8 *)(addr) = (val))
|
||||
#define readb(addr) (*(volatile const u8 *)(addr))
|
||||
#define writeq(val, addr) (*(volatile u64 *)(addr) = (val))
|
||||
#define readq(addr) (*(volatile const u64 *)(addr))
|
||||
|
||||
/* ---- Memory barriers ---- */
|
||||
#define mb() __sync_synchronize()
|
||||
#define rmb() __sync_synchronize()
|
||||
#define wmb() __sync_synchronize()
|
||||
#define smp_mb() __sync_synchronize()
|
||||
#define smp_rmb() __sync_synchronize()
|
||||
#define smp_wmb() __sync_synchronize()
|
||||
#define barrier() __asm__ __volatile__("" : : : "memory")
|
||||
|
||||
/* ---- DMA mapping — maps to redox-driver-sys DmaBuffer ---- */
|
||||
extern void *redox_dma_alloc_coherent(size_t size, dma_addr_t *dma_handle);
|
||||
extern void redox_dma_free_coherent(size_t size, void *vaddr, dma_addr_t dma_handle);
|
||||
|
||||
#define dma_alloc_coherent(dev, size, dma_handle, flags) redox_dma_alloc_coherent((size), (dma_handle))
|
||||
#define dma_free_coherent(dev, size, vaddr, dma_handle) redox_dma_free_coherent((size), (vaddr), (dma_handle))
|
||||
#define dma_map_page(dev, page, offset, size, dir) ((dma_addr_t)0)
|
||||
#define dma_unmap_page(dev, addr, size, dir) /* noop */
|
||||
#define dma_map_single(dev, ptr, size, dir) ((dma_addr_t)(uintptr_t)(ptr))
|
||||
#define dma_unmap_single(dev, addr, size, dir) /* noop */
|
||||
#define dma_mapping_error(dev, addr) 0
|
||||
|
||||
/* ---- PCI — maps to redox-driver-sys PCI ---- */
|
||||
struct pci_dev {
|
||||
u16 vendor;
|
||||
u16 device;
|
||||
u8 revision;
|
||||
u8 irq;
|
||||
phys_addr_t resource_start[6];
|
||||
u64 resource_len[6];
|
||||
u32 resource_flags[6];
|
||||
void *driver_data;
|
||||
void __iomem *mmio_base;
|
||||
int is_amdgpu;
|
||||
};
|
||||
|
||||
extern struct pci_dev *redox_pci_find_amd_gpu(void);
|
||||
extern void redox_pci_dev_put(struct pci_dev *pdev);
|
||||
extern int redox_pci_enable_device(struct pci_dev *pdev);
|
||||
extern void redox_pci_set_master(struct pci_dev *pdev);
|
||||
extern int redox_pci_request_regions(struct pci_dev *pdev, const char *name);
|
||||
extern void redox_pci_release_regions(struct pci_dev *pdev);
|
||||
|
||||
#define pci_get_device(vendor, device, from) redox_pci_find_amd_gpu()
|
||||
#define pci_dev_put(pdev) redox_pci_dev_put((pdev))
|
||||
#define pci_enable_device(pdev) redox_pci_enable_device((pdev))
|
||||
#define pci_set_master(pdev) redox_pci_set_master((pdev))
|
||||
#define pci_request_regions(pdev, name) redox_pci_request_regions((pdev), (name))
|
||||
#define pci_release_regions(pdev) redox_pci_release_regions((pdev))
|
||||
#define pci_resource_start(pdev, bar) ((pdev)->resource_start[(bar)])
|
||||
#define pci_resource_len(pdev, bar) ((pdev)->resource_len[(bar)])
|
||||
#define pci_resource_flags(pdev, bar) ((pdev)->resource_flags[(bar)])
|
||||
#define pci_resource_end(pdev, bar) ((pdev)->resource_start[(bar)] + (pdev)->resource_len[(bar)] - 1)
|
||||
|
||||
#define IORESOURCE_MEM 0x00000200U
|
||||
#define IORESOURCE_IO 0x00000100U
|
||||
#define IORESOURCE_MEM_64 0x00040000U
|
||||
#define IORESOURCE_PREFETCH 0x00001000U
|
||||
|
||||
/* ---- Firmware loading — maps to scheme:firmware ---- */
|
||||
struct firmware {
|
||||
size_t size;
|
||||
const u8 *data;
|
||||
};
|
||||
|
||||
extern int redox_request_firmware(const struct firmware **fw, const char *name, void *dev);
|
||||
extern void redox_release_firmware(const struct firmware *fw);
|
||||
|
||||
#define request_firmware(fw, name, dev) redox_request_firmware((fw), (name), (dev))
|
||||
#define release_firmware(fw) redox_release_firmware((fw))
|
||||
|
||||
/* ---- Device model ---- */
|
||||
struct device {
|
||||
void *driver_data;
|
||||
struct pci_dev *pci_dev;
|
||||
};
|
||||
|
||||
#define dev_get_drvdata(dev) ((dev)->driver_data)
|
||||
#define dev_set_drvdata(dev, data) ((dev)->driver_data = (data))
|
||||
|
||||
/* ---- Interrupts ---- */
|
||||
typedef int (*irq_handler_t)(int irq, void *dev_id);
|
||||
extern int redox_request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev);
|
||||
extern void redox_free_irq(unsigned int irq, void *dev_id);
|
||||
|
||||
#define IRQF_SHARED 0x00000080UL
|
||||
#define IRQF_TRIGGER_FALLING 0x00000002UL
|
||||
|
||||
/* ---- Workqueue ---- */
|
||||
struct work_struct {
|
||||
void (*func)(struct work_struct *work);
|
||||
};
|
||||
|
||||
struct delayed_work {
|
||||
struct work_struct work;
|
||||
unsigned long delay;
|
||||
};
|
||||
|
||||
#define INIT_WORK(w, fn) ((w)->func = (fn))
|
||||
#define INIT_DELAYED_WORK(w, fn) INIT_WORK(&(w)->work, (fn))
|
||||
#define schedule_work(w) do { if ((w)->func) { (w)->func((w)); } } while (0)
|
||||
#define schedule_delayed_work(w, delayv) do { (void)(delayv); if ((w)->work.func) { (w)->work.func(&(w)->work); } } while (0)
|
||||
#define cancel_work_sync(w) /* noop */
|
||||
#define cancel_delayed_work_sync(w) /* noop */
|
||||
#define flush_workqueue(wq) /* noop */
|
||||
#define flush_scheduled_work() /* noop */
|
||||
|
||||
/* ---- Completion ---- */
|
||||
struct completion {
|
||||
volatile int done;
|
||||
pthread_mutex_t mutex;
|
||||
pthread_cond_t cond;
|
||||
};
|
||||
|
||||
#define init_completion(c) do { \
|
||||
(c)->done = 0; \
|
||||
pthread_mutex_init(&(c)->mutex, NULL); \
|
||||
pthread_cond_init(&(c)->cond, NULL); \
|
||||
} while (0)
|
||||
#define reinit_completion(c) do { (c)->done = 0; } while (0)
|
||||
#define complete(c) do { \
|
||||
pthread_mutex_lock(&(c)->mutex); \
|
||||
(c)->done = 1; \
|
||||
pthread_cond_broadcast(&(c)->cond); \
|
||||
pthread_mutex_unlock(&(c)->mutex); \
|
||||
} while (0)
|
||||
#define wait_for_completion(c) do { \
|
||||
pthread_mutex_lock(&(c)->mutex); \
|
||||
while (!(c)->done) { \
|
||||
pthread_cond_wait(&(c)->cond, &(c)->mutex); \
|
||||
} \
|
||||
pthread_mutex_unlock(&(c)->mutex); \
|
||||
} while (0)
|
||||
#define wait_for_completion_timeout(c, timeout) ({ (void)(timeout); wait_for_completion((c)); 1UL; })
|
||||
|
||||
/* ---- Error helpers ---- */
|
||||
#ifndef EOPNOTSUPP
|
||||
#define EOPNOTSUPP 95
|
||||
#endif
|
||||
|
||||
#define IS_ERR(ptr) ((unsigned long)(uintptr_t)(ptr) >= (unsigned long)-4095)
|
||||
#define PTR_ERR(ptr) ((long)(intptr_t)(ptr))
|
||||
#define ERR_PTR(err) ((void *)(intptr_t)(err))
|
||||
#define IS_ERR_OR_NULL(ptr) (!(ptr) || IS_ERR(ptr))
|
||||
|
||||
/* ---- Min/Max ---- */
|
||||
#define min(a, b) ((a) < (b) ? (a) : (b))
|
||||
#define max(a, b) ((a) > (b) ? (a) : (b))
|
||||
#define min_t(type, a, b) ((type)(a) < (type)(b) ? (type)(a) : (type)(b))
|
||||
#define max_t(type, a, b) ((type)(a) > (type)(b) ? (type)(a) : (type)(b))
|
||||
#define clamp(val, lo, hi) min(max((val), (lo)), (hi))
|
||||
#define clamp_t(type, val, lo, hi) ((type)clamp((val), (lo), (hi)))
|
||||
#define clamp_val(val, lo, hi) clamp((val), (lo), (hi))
|
||||
#define swap(a, b) do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
|
||||
|
||||
/* ---- DIV_ROUND_UP, alignment ---- */
|
||||
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
|
||||
#define DIV_ROUND_UP_ULL(n, d) DIV_ROUND_UP((n), (d))
|
||||
#define DIV_ROUND_CLOSEST(n, d) (((n) + ((d) / 2)) / (d))
|
||||
#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
|
||||
#define IS_ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
|
||||
#define PAGE_SHIFT 12
|
||||
#define PAGE_SIZE 4096UL
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
#define PAGE_ALIGN(x) ALIGN((x), PAGE_SIZE)
|
||||
|
||||
/* ---- msleep, udelay — implemented in redox_stubs.c ---- */
|
||||
extern void msleep(unsigned int msecs);
|
||||
extern void udelay(unsigned long usecs);
|
||||
extern void mdelay(unsigned long msecs);
|
||||
extern unsigned long jiffies;
|
||||
extern unsigned long msecs_to_jiffies(unsigned int msecs);
|
||||
extern unsigned long usecs_to_jiffies(unsigned int usecs);
|
||||
|
||||
/* ---- Kconfig macros ---- */
|
||||
#define IS_ENABLED(option) 0
|
||||
#define IS_REACHABLE(option) 0
|
||||
#ifndef CONFIG_DRM_AMDGPU
|
||||
#define CONFIG_DRM_AMDGPU 1
|
||||
#endif
|
||||
#ifndef CONFIG_DRM_AMD_DC
|
||||
#define CONFIG_DRM_AMD_DC 1
|
||||
#endif
|
||||
#ifndef CONFIG_DRM_AMD_DC_FP
|
||||
#define CONFIG_DRM_AMD_DC_FP 1
|
||||
#endif
|
||||
#ifndef CONFIG_DRM_AMD_ACP
|
||||
#define CONFIG_DRM_AMD_ACP 0
|
||||
#endif
|
||||
#ifndef CONFIG_DRM_AMD_SECURE_DISPLAY
|
||||
#define CONFIG_DRM_AMD_SECURE_DISPLAY 0
|
||||
#endif
|
||||
#ifndef CONFIG_DRM_AMDGPU_SI
|
||||
#define CONFIG_DRM_AMDGPU_SI 0
|
||||
#endif
|
||||
#ifndef CONFIG_DRM_AMDGPU_CIK
|
||||
#define CONFIG_DRM_AMDGPU_CIK 0
|
||||
#endif
|
||||
#ifndef CONFIG_DEBUG_FS
|
||||
#define CONFIG_DEBUG_FS 0
|
||||
#endif
|
||||
#ifndef CONFIG_FAULT_INJECTION
|
||||
#define CONFIG_FAULT_INJECTION 0
|
||||
#endif
|
||||
#ifndef CONFIG_ACPI
|
||||
#define CONFIG_ACPI 0
|
||||
#endif
|
||||
#ifndef CONFIG_HWMON
|
||||
#define CONFIG_HWMON 0
|
||||
#endif
|
||||
#ifndef CONFIG_PM
|
||||
#define CONFIG_PM 0
|
||||
#endif
|
||||
#ifndef CONFIG_SLEEP
|
||||
#define CONFIG_SLEEP 0
|
||||
#endif
|
||||
#ifndef CONFIG_BACKLIGHT_CLASS_DEVICE
|
||||
#define CONFIG_BACKLIGHT_CLASS_DEVICE 0
|
||||
#endif
|
||||
#ifndef CONFIG_BACKLIGHT_LCD_SUPPORT
|
||||
#define CONFIG_BACKLIGHT_LCD_SUPPORT 0
|
||||
#endif
|
||||
#ifndef CONFIG_DRM_AMD_DC_HDCP
|
||||
#define CONFIG_DRM_AMD_DC_HDCP 0
|
||||
#endif
|
||||
#ifndef CONFIG_DRM_AMD_DC_DSC
|
||||
#define CONFIG_DRM_AMD_DC_DSC 1
|
||||
#endif
|
||||
#ifndef CONFIG_DRM_AMD_DC_DCN
|
||||
#define CONFIG_DRM_AMD_DC_DCN 1
|
||||
#endif
|
||||
#ifndef CONFIG_DRM_AMD_DC_DML2
|
||||
#define CONFIG_DRM_AMD_DC_DML2 0
|
||||
#endif
|
||||
#ifndef CONFIG_DRM_AMD_DC_SMU
|
||||
#define CONFIG_DRM_AMD_DC_SMU 0
|
||||
#endif
|
||||
|
||||
/* ---- Linked list ---- */
|
||||
struct list_head {
|
||||
struct list_head *next;
|
||||
struct list_head *prev;
|
||||
};
|
||||
|
||||
#define LIST_HEAD_INIT(name) { &(name), &(name) }
|
||||
#define LIST_HEAD(name) struct list_head name = LIST_HEAD_INIT(name)
|
||||
|
||||
static inline void INIT_LIST_HEAD(struct list_head *list) {
|
||||
list->next = list;
|
||||
list->prev = list;
|
||||
}
|
||||
|
||||
static inline void list_add(struct list_head *new_entry, struct list_head *head) {
|
||||
head->next->prev = new_entry;
|
||||
new_entry->next = head->next;
|
||||
new_entry->prev = head;
|
||||
head->next = new_entry;
|
||||
}
|
||||
|
||||
static inline void list_add_tail(struct list_head *new_entry, struct list_head *head) {
|
||||
head->prev->next = new_entry;
|
||||
new_entry->prev = head->prev;
|
||||
new_entry->next = head;
|
||||
head->prev = new_entry;
|
||||
}
|
||||
|
||||
static inline void list_del(struct list_head *entry) {
|
||||
entry->next->prev = entry->prev;
|
||||
entry->prev->next = entry->next;
|
||||
entry->next = (struct list_head *)(uintptr_t)0xDEADBEEF;
|
||||
entry->prev = (struct list_head *)(uintptr_t)0xDEADBEEF;
|
||||
}
|
||||
|
||||
static inline int list_empty(const struct list_head *head) {
|
||||
return head->next == head;
|
||||
}
|
||||
|
||||
#define list_entry(ptr, type, member) ((type *)((char *)(ptr) - offsetof(type, member)))
|
||||
#define list_for_each(pos, head) for ((pos) = (head)->next; (pos) != (head); (pos) = (pos)->next)
|
||||
#define list_for_each_safe(pos, n, head) for ((pos) = (head)->next, (n) = (pos)->next; (pos) != (head); (pos) = (n), (n) = (pos)->next)
|
||||
#define list_for_each_entry(pos, head, member) \
|
||||
for ((pos) = list_entry((head)->next, typeof(*(pos)), member); \
|
||||
&(pos)->member != (head); \
|
||||
(pos) = list_entry((pos)->member.next, typeof(*(pos)), member))
|
||||
|
||||
/* ---- IDR ---- */
|
||||
struct idr {
|
||||
int next_id;
|
||||
};
|
||||
|
||||
#define DEFINE_IDR(name) struct idr name = { .next_id = 1 }
|
||||
|
||||
static inline int idr_alloc(struct idr *idr, void *ptr, int start, int end, int flags) {
|
||||
(void)ptr;
|
||||
(void)start;
|
||||
(void)end;
|
||||
(void)flags;
|
||||
return idr->next_id++;
|
||||
}
|
||||
|
||||
static inline void *idr_find(struct idr *idr, int id) {
|
||||
(void)idr;
|
||||
(void)id;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void idr_remove(struct idr *idr, int id) {
|
||||
(void)idr;
|
||||
(void)id;
|
||||
}
|
||||
|
||||
static inline void idr_destroy(struct idr *idr) {
|
||||
(void)idr;
|
||||
}
|
||||
|
||||
#define idr_for_each_entry(idr, entry, id) for ((id) = 0; ((entry) = idr_find((idr), (id))) != NULL; (id)++)
|
||||
|
||||
/* ---- Misc ---- */
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
|
||||
#define BITS_PER_LONG (sizeof(long) * 8)
|
||||
#define BIT(n) (1UL << (n))
|
||||
#define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - 1 - (h))) & (~0UL << (l)))
|
||||
#define GENMASK_ULL(h, l) (((~0ULL) >> (63 - (h))) & (~0ULL << (l)))
|
||||
#define container_of(ptr, type, member) ((type *)((char *)(ptr) - offsetof(type, member)))
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
#define WARN_ON(condition) ({ int __ret = !!(condition); if (__ret) fprintf(stderr, "WARN_ON: %s at %s:%d\n", #condition, __FILE__, __LINE__); __ret; })
|
||||
#define WARN_ON_ONCE(condition) WARN_ON(condition)
|
||||
#define BUG_ON(condition) do { if (condition) { fprintf(stderr, "BUG: %s at %s:%d\n", #condition, __FILE__, __LINE__); abort(); } } while (0)
|
||||
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2 * !!(condition)]))
|
||||
|
||||
/* ---- Enum constants ---- */
|
||||
#define DRM_MODE_DPMS_ON 0
|
||||
#define DRM_MODE_DPMS_STANDBY 1
|
||||
#define DRM_MODE_DPMS_SUSPEND 2
|
||||
#define DRM_MODE_DPMS_OFF 3
|
||||
|
||||
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
|
||||
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
|
||||
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
|
||||
|
||||
/* ---- Minimal DRM structures ---- */
|
||||
struct drm_device {
|
||||
void *dev_private;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
struct drm_file {
|
||||
int filp;
|
||||
};
|
||||
|
||||
struct drm_mode_object {
|
||||
int id;
|
||||
int type;
|
||||
};
|
||||
|
||||
/* ---- DRM logging helpers ---- */
|
||||
#define drm_dbg_core(dev, fmt, ...) /* noop */
|
||||
#define drm_dbg_kms(dev, fmt, ...) /* noop */
|
||||
#define drm_err(dev, fmt, ...) fprintf(stderr, "[drm ERR] " fmt, ##__VA_ARGS__)
|
||||
#define drm_info(dev, fmt, ...) fprintf(stderr, "[drm INFO] " fmt, ##__VA_ARGS__)
|
||||
#define drm_warn(dev, fmt, ...) fprintf(stderr, "[drm WARN] " fmt, ##__VA_ARGS__)
|
||||
#define drm_dbg(dev, fmt, ...) /* noop */
|
||||
|
||||
#endif /* _REDOX_GLUE_H */
|
||||
@@ -0,0 +1,380 @@
|
||||
#include "redox_glue.h"
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
unsigned long jiffies;
|
||||
|
||||
struct redox_mapped_region {
|
||||
void *addr;
|
||||
size_t size;
|
||||
int fd;
|
||||
struct redox_mapped_region *next;
|
||||
};
|
||||
|
||||
static pthread_mutex_t g_region_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static struct redox_mapped_region *g_regions;
|
||||
|
||||
static void redox_jiffies_advance(unsigned long delta)
|
||||
{
|
||||
__sync_add_and_fetch(&jiffies, delta);
|
||||
}
|
||||
|
||||
void *kmalloc(size_t size, unsigned int flags)
|
||||
{
|
||||
(void)flags;
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
void *kzalloc(size_t size, unsigned int flags)
|
||||
{
|
||||
(void)flags;
|
||||
return calloc(1, size);
|
||||
}
|
||||
|
||||
void kfree(const void *ptr)
|
||||
{
|
||||
free((void *)ptr);
|
||||
}
|
||||
|
||||
void *vmalloc(unsigned long size)
|
||||
{
|
||||
return malloc((size_t)size);
|
||||
}
|
||||
|
||||
void vfree(const void *addr)
|
||||
{
|
||||
free((void *)addr);
|
||||
}
|
||||
|
||||
void *krealloc(const void *ptr, size_t new_size, unsigned int flags)
|
||||
{
|
||||
(void)flags;
|
||||
return realloc((void *)ptr, new_size);
|
||||
}
|
||||
|
||||
static void redox_track_region(void *addr, size_t size, int fd)
|
||||
{
|
||||
struct redox_mapped_region *region = malloc(sizeof(*region));
|
||||
if (!region) {
|
||||
if (fd >= 0) {
|
||||
close(fd);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
region->addr = addr;
|
||||
region->size = size;
|
||||
region->fd = fd;
|
||||
|
||||
pthread_mutex_lock(&g_region_lock);
|
||||
region->next = g_regions;
|
||||
g_regions = region;
|
||||
pthread_mutex_unlock(&g_region_lock);
|
||||
}
|
||||
|
||||
static struct redox_mapped_region *redox_untrack_region(const void *addr)
|
||||
{
|
||||
struct redox_mapped_region *prev = NULL;
|
||||
struct redox_mapped_region *cur;
|
||||
|
||||
pthread_mutex_lock(&g_region_lock);
|
||||
cur = g_regions;
|
||||
while (cur) {
|
||||
if (cur->addr == addr) {
|
||||
if (prev) {
|
||||
prev->next = cur->next;
|
||||
} else {
|
||||
g_regions = cur->next;
|
||||
}
|
||||
pthread_mutex_unlock(&g_region_lock);
|
||||
return cur;
|
||||
}
|
||||
prev = cur;
|
||||
cur = cur->next;
|
||||
}
|
||||
pthread_mutex_unlock(&g_region_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void __iomem *redox_ioremap(phys_addr_t offset, size_t size)
|
||||
{
|
||||
int fd = open("/scheme/memory/physical", O_RDWR);
|
||||
void *addr;
|
||||
|
||||
if (fd >= 0) {
|
||||
addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)offset);
|
||||
if (addr != MAP_FAILED) {
|
||||
redox_track_region(addr, size, fd);
|
||||
return addr;
|
||||
}
|
||||
close(fd);
|
||||
}
|
||||
|
||||
addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (addr == MAP_FAILED) {
|
||||
pr_err("ioremap fallback failed for %#llx (%zu bytes): %s\n",
|
||||
(unsigned long long)offset, size, strerror(errno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(addr, 0, size);
|
||||
redox_track_region(addr, size, -1);
|
||||
return addr;
|
||||
}
|
||||
|
||||
void redox_iounmap(void __iomem *addr)
|
||||
{
|
||||
struct redox_mapped_region *region;
|
||||
|
||||
if (!addr) {
|
||||
return;
|
||||
}
|
||||
|
||||
region = redox_untrack_region(addr);
|
||||
if (!region) {
|
||||
return;
|
||||
}
|
||||
|
||||
munmap(region->addr, region->size);
|
||||
if (region->fd >= 0) {
|
||||
close(region->fd);
|
||||
}
|
||||
free(region);
|
||||
}
|
||||
|
||||
void redox_iowrite32(u32 val, void __iomem *addr)
|
||||
{
|
||||
*(volatile u32 *)addr = val;
|
||||
}
|
||||
|
||||
u32 redox_ioread32(const void __iomem *addr)
|
||||
{
|
||||
return *(volatile const u32 *)addr;
|
||||
}
|
||||
|
||||
void redox_iowrite16(u16 val, void __iomem *addr)
|
||||
{
|
||||
*(volatile u16 *)addr = val;
|
||||
}
|
||||
|
||||
u16 redox_ioread16(const void __iomem *addr)
|
||||
{
|
||||
return *(volatile const u16 *)addr;
|
||||
}
|
||||
|
||||
void redox_iowrite8(u8 val, void __iomem *addr)
|
||||
{
|
||||
*(volatile u8 *)addr = val;
|
||||
}
|
||||
|
||||
u8 redox_ioread8(const void __iomem *addr)
|
||||
{
|
||||
return *(volatile const u8 *)addr;
|
||||
}
|
||||
|
||||
void redox_mmio_write32(void *base, u32 offset, u32 val)
|
||||
{
|
||||
if (!base) {
|
||||
return;
|
||||
}
|
||||
*(volatile u32 *)((u8 *)base + offset) = val;
|
||||
}
|
||||
|
||||
u32 redox_mmio_read32(void *base, u32 offset)
|
||||
{
|
||||
if (!base) {
|
||||
return 0;
|
||||
}
|
||||
return *(volatile u32 *)((u8 *)base + offset);
|
||||
}
|
||||
|
||||
void *redox_dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
|
||||
{
|
||||
void *ptr = NULL;
|
||||
|
||||
if (posix_memalign(&ptr, PAGE_SIZE, PAGE_ALIGN(size)) != 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(ptr, 0, PAGE_ALIGN(size));
|
||||
if (dma_handle) {
|
||||
*dma_handle = (dma_addr_t)(uintptr_t)ptr;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void redox_dma_free_coherent(size_t size, void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
(void)size;
|
||||
(void)dma_handle;
|
||||
free(vaddr);
|
||||
}
|
||||
|
||||
struct pci_dev *redox_pci_find_amd_gpu(void)
|
||||
{
|
||||
static struct pci_dev dev = {
|
||||
.vendor = 0x1002,
|
||||
.device = 0,
|
||||
.revision = 0,
|
||||
.irq = 0,
|
||||
.resource_start = {0},
|
||||
.resource_len = {0},
|
||||
.resource_flags = {IORESOURCE_MEM, 0, 0, 0, 0, 0},
|
||||
.driver_data = NULL,
|
||||
.mmio_base = NULL,
|
||||
.is_amdgpu = 1,
|
||||
};
|
||||
|
||||
return &dev;
|
||||
}
|
||||
|
||||
void redox_pci_dev_put(struct pci_dev *pdev)
|
||||
{
|
||||
(void)pdev;
|
||||
}
|
||||
|
||||
int redox_pci_enable_device(struct pci_dev *pdev)
|
||||
{
|
||||
return pdev ? 0 : -ENODEV;
|
||||
}
|
||||
|
||||
void redox_pci_set_master(struct pci_dev *pdev)
|
||||
{
|
||||
(void)pdev;
|
||||
}
|
||||
|
||||
int redox_pci_request_regions(struct pci_dev *pdev, const char *name)
|
||||
{
|
||||
(void)name;
|
||||
return pdev ? 0 : -ENODEV;
|
||||
}
|
||||
|
||||
void redox_pci_release_regions(struct pci_dev *pdev)
|
||||
{
|
||||
(void)pdev;
|
||||
}
|
||||
|
||||
int redox_request_firmware(const struct firmware **fw, const char *name, void *dev)
|
||||
{
|
||||
char path[512];
|
||||
int fd;
|
||||
struct stat st;
|
||||
struct firmware *image;
|
||||
u8 *data;
|
||||
ssize_t nread;
|
||||
|
||||
(void)dev;
|
||||
if (!fw || !name) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
snprintf(path, sizeof(path), "/scheme/firmware/amdgpu/%s", name);
|
||||
fd = open(path, O_RDONLY);
|
||||
if (fd < 0) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (fstat(fd, &st) != 0 || st.st_size < 0) {
|
||||
close(fd);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
image = calloc(1, sizeof(*image));
|
||||
data = malloc((size_t)st.st_size);
|
||||
if (!image || !data) {
|
||||
free(image);
|
||||
free(data);
|
||||
close(fd);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nread = read(fd, data, (size_t)st.st_size);
|
||||
close(fd);
|
||||
if (nread != st.st_size) {
|
||||
free(image);
|
||||
free(data);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
image->size = (size_t)st.st_size;
|
||||
image->data = data;
|
||||
*fw = image;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void redox_release_firmware(const struct firmware *fw)
|
||||
{
|
||||
struct firmware *owned = (struct firmware *)fw;
|
||||
|
||||
if (!owned) {
|
||||
return;
|
||||
}
|
||||
|
||||
free((void *)owned->data);
|
||||
free(owned);
|
||||
}
|
||||
|
||||
int redox_request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev)
|
||||
{
|
||||
char path[128];
|
||||
int fd;
|
||||
|
||||
(void)handler;
|
||||
(void)flags;
|
||||
(void)name;
|
||||
(void)dev;
|
||||
|
||||
snprintf(path, sizeof(path), "/scheme/irq/%u", irq);
|
||||
fd = open(path, O_RDWR);
|
||||
if (fd < 0) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
close(fd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void redox_free_irq(unsigned int irq, void *dev_id)
|
||||
{
|
||||
(void)irq;
|
||||
(void)dev_id;
|
||||
}
|
||||
|
||||
void msleep(unsigned int msecs)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
ts.tv_sec = msecs / 1000U;
|
||||
ts.tv_nsec = (long)(msecs % 1000U) * 1000000L;
|
||||
nanosleep(&ts, NULL);
|
||||
redox_jiffies_advance(msecs_to_jiffies(msecs));
|
||||
}
|
||||
|
||||
void udelay(unsigned long usecs)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
ts.tv_sec = usecs / 1000000UL;
|
||||
ts.tv_nsec = (long)(usecs % 1000000UL) * 1000L;
|
||||
nanosleep(&ts, NULL);
|
||||
redox_jiffies_advance(usecs_to_jiffies((unsigned int)usecs));
|
||||
}
|
||||
|
||||
void mdelay(unsigned long msecs)
|
||||
{
|
||||
msleep((unsigned int)msecs);
|
||||
}
|
||||
|
||||
unsigned long msecs_to_jiffies(unsigned int msecs)
|
||||
{
|
||||
return (unsigned long)msecs;
|
||||
}
|
||||
|
||||
unsigned long usecs_to_jiffies(unsigned int usecs)
|
||||
{
|
||||
return (unsigned long)DIV_ROUND_UP(usecs, 1000U);
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
[source]
|
||||
path = "source"
|
||||
|
||||
[build]
|
||||
template = "cargo"
|
||||
dependencies = [
|
||||
"redox-driver-sys",
|
||||
"linux-kpi",
|
||||
]
|
||||
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "redox-drm"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "DRM scheme daemon for Redox OS — provides GPU modesetting and buffer management"
|
||||
|
||||
[dependencies]
|
||||
redox-driver-sys = { version = "0.1", path = "../../drivers/redox-driver-sys/source" }
|
||||
linux-kpi = { version = "0.1", path = "../../drivers/linux-kpi/source" }
|
||||
libredox = "0.1"
|
||||
redox_syscall = { version = "0.7", features = ["std"] }
|
||||
syscall04 = { package = "redox_syscall", version = "0.4" }
|
||||
redox_scheme = { package = "redox-scheme", version = "0.1" }
|
||||
log = "0.4"
|
||||
thiserror = "2"
|
||||
bitflags = "2"
|
||||
|
||||
[patch.crates-io]
|
||||
redox-driver-sys = { path = "../../drivers/redox-driver-sys/source" }
|
||||
linux-kpi = { path = "../../drivers/linux-kpi/source" }
|
||||
@@ -0,0 +1,60 @@
|
||||
use std::env;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
const LIB_NAME: &str = "libamdgpu_dc_redox.so";
|
||||
const ENV_HINTS: &[&str] = &[
|
||||
"AMDGPU_DC_LIB_DIR",
|
||||
"COOKBOOK_STAGE",
|
||||
"REDOX_SYSROOT",
|
||||
"SYSROOT",
|
||||
"TARGET_SYSROOT",
|
||||
];
|
||||
|
||||
fn push_candidate_dirs(candidates: &mut Vec<PathBuf>, base: &Path) {
|
||||
candidates.push(base.to_path_buf());
|
||||
candidates.push(base.join("usr/lib/redox/drivers"));
|
||||
candidates.push(base.join("lib"));
|
||||
candidates.push(base.join("usr/lib"));
|
||||
}
|
||||
|
||||
fn register_candidate_watch(path: &Path) {
|
||||
println!("cargo:rerun-if-changed={}", path.display());
|
||||
}
|
||||
|
||||
fn find_amdgpu_dc_library(manifest_dir: &Path) -> Option<PathBuf> {
|
||||
let mut candidates = Vec::new();
|
||||
|
||||
for key in ENV_HINTS {
|
||||
println!("cargo:rerun-if-env-changed={key}");
|
||||
if let Some(value) = env::var_os(key) {
|
||||
push_candidate_dirs(&mut candidates, Path::new(&value));
|
||||
}
|
||||
}
|
||||
|
||||
push_candidate_dirs(&mut candidates, &manifest_dir.join("../amdgpu"));
|
||||
push_candidate_dirs(&mut candidates, &manifest_dir.join("../amdgpu/stage"));
|
||||
|
||||
for dir in candidates {
|
||||
register_candidate_watch(&dir.join(LIB_NAME));
|
||||
if dir.join(LIB_NAME).exists() {
|
||||
return Some(dir);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rustc-check-cfg=cfg(no_amdgpu_c)");
|
||||
|
||||
let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("missing manifest dir"));
|
||||
|
||||
if let Some(dir) = find_amdgpu_dc_library(&manifest_dir) {
|
||||
println!("cargo:rustc-link-search=native={}", dir.display());
|
||||
println!("cargo:rustc-link-lib=amdgpu_dc_redox");
|
||||
println!("cargo:rustc-link-lib=pthread");
|
||||
println!("cargo:rustc-link-lib=m");
|
||||
} else {
|
||||
println!("cargo:rustc-cfg=no_amdgpu_c");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,201 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use log::{debug, warn};
|
||||
|
||||
use crate::driver::{DriverError, Result};
|
||||
use crate::gem::GemHandle;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct DmabufInfo {
|
||||
pub phys_addr: usize,
|
||||
pub size: u64,
|
||||
pub gem_handle: GemHandle,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct DmabufEntry {
|
||||
#[allow(dead_code)]
|
||||
info: DmabufInfo,
|
||||
#[allow(dead_code)]
|
||||
scheme_path: String,
|
||||
#[allow(dead_code)]
|
||||
refcount: usize,
|
||||
}
|
||||
|
||||
pub struct DmabufManager {
|
||||
#[allow(dead_code)]
|
||||
next_fd: i32,
|
||||
#[allow(dead_code)]
|
||||
exported: BTreeMap<i32, GemHandle>,
|
||||
#[allow(dead_code)]
|
||||
entries: BTreeMap<GemHandle, DmabufEntry>,
|
||||
}
|
||||
|
||||
impl DmabufManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
next_fd: 10_000,
|
||||
exported: BTreeMap::new(),
|
||||
entries: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn export(&mut self, handle: GemHandle) -> Result<i32> {
|
||||
self.export_with_info(handle, 0, 0)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn export_with_info(
|
||||
&mut self,
|
||||
handle: GemHandle,
|
||||
phys_addr: usize,
|
||||
size: u64,
|
||||
) -> Result<i32> {
|
||||
if handle == 0 {
|
||||
return Err(DriverError::InvalidArgument(
|
||||
"DMA-BUF export requires a non-zero GEM handle",
|
||||
));
|
||||
}
|
||||
|
||||
let fd = self.allocate_fd()?;
|
||||
let scheme_path = Self::scheme_path(handle);
|
||||
|
||||
if let Some(entry) = self.entries.get_mut(&handle) {
|
||||
entry.info.phys_addr = Self::merge_phys_addr(entry.info.phys_addr, phys_addr)?;
|
||||
entry.info.size = Self::merge_size(entry.info.size, size)?;
|
||||
entry.refcount = entry.refcount.checked_add(1).ok_or_else(|| {
|
||||
DriverError::Buffer(format!(
|
||||
"DMA-BUF refcount overflow for GEM handle {}",
|
||||
handle
|
||||
))
|
||||
})?;
|
||||
|
||||
debug!(
|
||||
"redox-drm: dup() DMA-BUF export fd {} -> {} (GEM handle {}, refs={})",
|
||||
entry.scheme_path, fd, handle, entry.refcount
|
||||
);
|
||||
} else {
|
||||
self.entries.insert(
|
||||
handle,
|
||||
DmabufEntry {
|
||||
info: DmabufInfo {
|
||||
phys_addr,
|
||||
size,
|
||||
gem_handle: handle,
|
||||
},
|
||||
scheme_path: scheme_path.clone(),
|
||||
refcount: 1,
|
||||
},
|
||||
);
|
||||
|
||||
warn!(
|
||||
"redox-drm: exported DMA-BUF {} as synthetic fd {} for GEM handle {} \
|
||||
(phys={:#x}, size={})",
|
||||
scheme_path, fd, handle, phys_addr, size
|
||||
);
|
||||
}
|
||||
|
||||
self.exported.insert(fd, handle);
|
||||
Ok(fd)
|
||||
}
|
||||
|
||||
pub fn import(&self, fd: i32) -> Result<GemHandle> {
|
||||
let info = self
|
||||
.lookup(fd)
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown synthetic dma-buf fd {fd}")))?;
|
||||
|
||||
debug!(
|
||||
"redox-drm: imported DMA-BUF fd {} -> GEM handle {} (phys={:#x}, size={})",
|
||||
fd, info.gem_handle, info.phys_addr, info.size
|
||||
);
|
||||
|
||||
Ok(info.gem_handle)
|
||||
}
|
||||
|
||||
pub fn close(&mut self, fd: i32) -> Result<()> {
|
||||
let handle = self
|
||||
.exported
|
||||
.remove(&fd)
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown synthetic dma-buf fd {fd}")))?;
|
||||
|
||||
let remove_entry = {
|
||||
let entry = self.entries.get_mut(&handle).ok_or_else(|| {
|
||||
DriverError::NotFound(format!(
|
||||
"DMA-BUF bookkeeping missing for GEM handle {}",
|
||||
handle
|
||||
))
|
||||
})?;
|
||||
|
||||
if entry.refcount == 0 {
|
||||
return Err(DriverError::Buffer(format!(
|
||||
"DMA-BUF refcount underflow for GEM handle {}",
|
||||
handle
|
||||
)));
|
||||
}
|
||||
|
||||
entry.refcount -= 1;
|
||||
debug!(
|
||||
"redox-drm: closed DMA-BUF fd {} for {} (GEM handle {}, refs={})",
|
||||
fd, entry.scheme_path, handle, entry.refcount
|
||||
);
|
||||
entry.refcount == 0
|
||||
};
|
||||
|
||||
if remove_entry {
|
||||
let _ = self.entries.remove(&handle);
|
||||
warn!(
|
||||
"redox-drm: released final DMA-BUF export for GEM handle {}",
|
||||
handle
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn lookup(&self, fd: i32) -> Option<DmabufInfo> {
|
||||
let handle = self.exported.get(&fd)?;
|
||||
self.entries.get(handle).map(|entry| entry.info)
|
||||
}
|
||||
|
||||
pub fn dup(&mut self, fd: i32) -> Result<i32> {
|
||||
let info = self
|
||||
.lookup(fd)
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown synthetic dma-buf fd {fd}")))?;
|
||||
self.export_with_info(info.gem_handle, info.phys_addr, info.size)
|
||||
}
|
||||
|
||||
fn allocate_fd(&mut self) -> Result<i32> {
|
||||
let fd = self.next_fd;
|
||||
self.next_fd = self.next_fd.checked_add(1).ok_or_else(|| {
|
||||
DriverError::Buffer("synthetic DMA-BUF fd space exhausted".to_string())
|
||||
})?;
|
||||
Ok(fd)
|
||||
}
|
||||
|
||||
fn scheme_path(handle: GemHandle) -> String {
|
||||
format!("drm:card0/dmabuf/{handle}")
|
||||
}
|
||||
|
||||
fn merge_phys_addr(current: usize, incoming: usize) -> Result<usize> {
|
||||
if current == 0 || incoming == 0 || current == incoming {
|
||||
return Ok(current.max(incoming));
|
||||
}
|
||||
|
||||
Err(DriverError::Buffer(format!(
|
||||
"conflicting DMA-BUF physical addresses: existing={:#x}, incoming={:#x}",
|
||||
current, incoming
|
||||
)))
|
||||
}
|
||||
|
||||
fn merge_size(current: u64, incoming: u64) -> Result<u64> {
|
||||
if current == 0 || incoming == 0 || current == incoming {
|
||||
return Ok(current.max(incoming));
|
||||
}
|
||||
|
||||
Err(DriverError::Buffer(format!(
|
||||
"conflicting DMA-BUF sizes: existing={}, incoming={}",
|
||||
current, incoming
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::gem::GemHandle;
|
||||
use crate::kms::{ConnectorInfo, ModeInfo};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, DriverError>;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum DriverError {
|
||||
#[error("driver initialization failed: {0}")]
|
||||
Initialization(String),
|
||||
|
||||
#[error("invalid argument: {0}")]
|
||||
InvalidArgument(&'static str),
|
||||
|
||||
#[error("resource not found: {0}")]
|
||||
NotFound(String),
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[error("operation not supported: {0}")]
|
||||
Unsupported(&'static str),
|
||||
|
||||
#[error("MMIO failure: {0}")]
|
||||
Mmio(String),
|
||||
|
||||
#[error("PCI failure: {0}")]
|
||||
Pci(String),
|
||||
|
||||
#[error("buffer failure: {0}")]
|
||||
Buffer(String),
|
||||
|
||||
#[error("I/O failure: {0}")]
|
||||
Io(String),
|
||||
}
|
||||
|
||||
pub trait GpuDriver: Send + Sync {
|
||||
fn driver_name(&self) -> &str;
|
||||
fn driver_desc(&self) -> &str;
|
||||
#[allow(dead_code)]
|
||||
fn driver_date(&self) -> &str;
|
||||
|
||||
fn detect_connectors(&self) -> Vec<ConnectorInfo>;
|
||||
fn get_modes(&self, connector_id: u32) -> Vec<ModeInfo>;
|
||||
fn set_crtc(
|
||||
&self,
|
||||
crtc_id: u32,
|
||||
fb_handle: u32,
|
||||
connectors: &[u32],
|
||||
mode: &ModeInfo,
|
||||
) -> Result<()>;
|
||||
fn page_flip(&self, crtc_id: u32, fb_handle: u32, flags: u32) -> Result<u64>;
|
||||
#[allow(dead_code)]
|
||||
fn get_vblank(&self, crtc_id: u32) -> Result<u64>;
|
||||
|
||||
fn gem_create(&self, size: u64) -> Result<GemHandle>;
|
||||
fn gem_close(&self, handle: GemHandle) -> Result<()>;
|
||||
fn gem_mmap(&self, handle: GemHandle) -> Result<usize>;
|
||||
fn gem_size(&self, handle: GemHandle) -> Result<u64>;
|
||||
#[allow(dead_code)]
|
||||
fn gem_export_dmafd(&self, handle: GemHandle) -> Result<i32>;
|
||||
#[allow(dead_code)]
|
||||
fn gem_import_dmafd(&self, fd: i32) -> Result<GemHandle>;
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get_edid(&self, connector_id: u32) -> Vec<u8>;
|
||||
fn handle_irq(&self) -> Result<Option<(u32, u64)>>;
|
||||
}
|
||||
@@ -0,0 +1,516 @@
|
||||
use log::{info, warn};
|
||||
use std::ptr;
|
||||
#[cfg(no_amdgpu_c)]
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::driver::{DriverError, Result};
|
||||
use crate::kms::connector::synthetic_edid;
|
||||
use crate::kms::{ConnectorInfo, ConnectorStatus, ConnectorType, ModeInfo};
|
||||
|
||||
#[repr(C)]
|
||||
pub struct ConnectorInfoFFI {
|
||||
pub id: i32,
|
||||
pub connector_type: i32,
|
||||
pub connector_type_id: i32,
|
||||
pub connection: i32,
|
||||
pub mm_width: i32,
|
||||
pub mm_height: i32,
|
||||
pub encoder_id: i32,
|
||||
}
|
||||
|
||||
#[cfg(not(no_amdgpu_c))]
|
||||
unsafe extern "C" {
|
||||
/// Full hardware initialization: sets MMIO base, FB aperture, PCI device.
|
||||
/// Must be called before any other DC function — the C side depends on
|
||||
/// globals populated here (g_mmio_base, g_fb_phys, etc.).
|
||||
#[link_name = "amdgpu_redox_init"]
|
||||
fn ffi_amdgpu_redox_init(
|
||||
mmio_base: *const u8,
|
||||
mmio_size: usize,
|
||||
fb_phys: u64,
|
||||
fb_size: usize,
|
||||
) -> i32;
|
||||
|
||||
#[link_name = "amdgpu_dc_detect_connectors"]
|
||||
fn ffi_amdgpu_dc_detect_connectors() -> i32;
|
||||
#[link_name = "amdgpu_dc_get_connector_info"]
|
||||
fn ffi_amdgpu_dc_get_connector_info(idx: i32, info: *mut ConnectorInfoFFI) -> i32;
|
||||
#[link_name = "amdgpu_dc_set_crtc"]
|
||||
fn ffi_amdgpu_dc_set_crtc(crtc_id: i32, fb_addr: u64, width: u32, height: u32) -> i32;
|
||||
|
||||
/// Releases global state in the C layer.
|
||||
#[link_name = "amdgpu_redox_cleanup"]
|
||||
fn ffi_amdgpu_redox_cleanup();
|
||||
}
|
||||
|
||||
#[cfg(no_amdgpu_c)]
|
||||
static FALLBACK_MMIO_BASE: AtomicUsize = AtomicUsize::new(0);
|
||||
#[cfg(no_amdgpu_c)]
|
||||
static FALLBACK_MMIO_SIZE: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
#[cfg(no_amdgpu_c)]
|
||||
const FALLBACK_ENOENT: i32 = 2;
|
||||
|
||||
#[cfg(no_amdgpu_c)]
|
||||
fn amdgpu_dc_init(mmio_base: *const u8, mmio_size: usize) -> i32 {
|
||||
FALLBACK_MMIO_BASE.store(mmio_base as usize, Ordering::Relaxed);
|
||||
FALLBACK_MMIO_SIZE.store(mmio_size, Ordering::Relaxed);
|
||||
0
|
||||
}
|
||||
|
||||
#[cfg(no_amdgpu_c)]
|
||||
fn amdgpu_dc_init_with_fb(
|
||||
mmio_base: *const u8,
|
||||
mmio_size: usize,
|
||||
_fb_phys: u64,
|
||||
_fb_size: usize,
|
||||
) -> i32 {
|
||||
FALLBACK_MMIO_BASE.store(mmio_base as usize, Ordering::Relaxed);
|
||||
FALLBACK_MMIO_SIZE.store(mmio_size, Ordering::Relaxed);
|
||||
0
|
||||
}
|
||||
|
||||
#[cfg(no_amdgpu_c)]
|
||||
fn amdgpu_dc_detect_connectors() -> i32 {
|
||||
warn!("redox-drm: compiled without AMD C backend (no_amdgpu_c); no real connector detection available");
|
||||
0
|
||||
}
|
||||
|
||||
#[cfg(no_amdgpu_c)]
|
||||
fn amdgpu_dc_get_connector_info(_idx: i32, _info: *mut ConnectorInfoFFI) -> i32 {
|
||||
-FALLBACK_ENOENT
|
||||
}
|
||||
|
||||
#[cfg(no_amdgpu_c)]
|
||||
fn amdgpu_dc_set_crtc(_crtc_id: i32, _fb_addr: u64, _width: u32, _height: u32) -> i32 {
|
||||
0
|
||||
}
|
||||
|
||||
#[cfg(no_amdgpu_c)]
|
||||
fn amdgpu_dc_cleanup() {
|
||||
FALLBACK_MMIO_BASE.store(0, Ordering::Relaxed);
|
||||
FALLBACK_MMIO_SIZE.store(0, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
#[cfg(not(no_amdgpu_c))]
|
||||
fn amdgpu_dc_init(mmio_base: *const u8, mmio_size: usize) -> i32 {
|
||||
unsafe { ffi_amdgpu_redox_init(mmio_base, mmio_size, 0, 0) }
|
||||
}
|
||||
|
||||
#[cfg(not(no_amdgpu_c))]
|
||||
fn amdgpu_dc_init_with_fb(
|
||||
mmio_base: *const u8,
|
||||
mmio_size: usize,
|
||||
fb_phys: u64,
|
||||
fb_size: usize,
|
||||
) -> i32 {
|
||||
unsafe { ffi_amdgpu_redox_init(mmio_base, mmio_size, fb_phys, fb_size) }
|
||||
}
|
||||
|
||||
#[cfg(not(no_amdgpu_c))]
|
||||
fn amdgpu_dc_detect_connectors() -> i32 {
|
||||
unsafe { ffi_amdgpu_dc_detect_connectors() }
|
||||
}
|
||||
|
||||
#[cfg(not(no_amdgpu_c))]
|
||||
fn amdgpu_dc_get_connector_info(idx: i32, info: *mut ConnectorInfoFFI) -> i32 {
|
||||
unsafe { ffi_amdgpu_dc_get_connector_info(idx, info) }
|
||||
}
|
||||
|
||||
#[cfg(not(no_amdgpu_c))]
|
||||
fn amdgpu_dc_set_crtc(crtc_id: i32, fb_addr: u64, width: u32, height: u32) -> i32 {
|
||||
unsafe { ffi_amdgpu_dc_set_crtc(crtc_id, fb_addr, width, height) }
|
||||
}
|
||||
|
||||
#[cfg(not(no_amdgpu_c))]
|
||||
fn amdgpu_dc_cleanup() {
|
||||
unsafe { ffi_amdgpu_redox_cleanup() }
|
||||
}
|
||||
|
||||
pub struct DisplayCore {
|
||||
initialized: bool,
|
||||
mmio_base: usize,
|
||||
mmio_size: usize,
|
||||
fb_phys: u64,
|
||||
fb_size: usize,
|
||||
}
|
||||
|
||||
impl DisplayCore {
|
||||
pub fn new(mmio_base: *const u8, mmio_size: usize) -> Result<Self> {
|
||||
Self::with_framebuffer(mmio_base, mmio_size, 0, 0)
|
||||
}
|
||||
|
||||
pub fn with_framebuffer(
|
||||
mmio_base: *const u8,
|
||||
mmio_size: usize,
|
||||
fb_phys: u64,
|
||||
fb_size: usize,
|
||||
) -> Result<Self> {
|
||||
let rc = if fb_phys != 0 && fb_size != 0 {
|
||||
amdgpu_dc_init_with_fb(mmio_base, mmio_size, fb_phys, fb_size)
|
||||
} else {
|
||||
amdgpu_dc_init(mmio_base, mmio_size)
|
||||
};
|
||||
if rc < 0 {
|
||||
return Err(DriverError::Initialization(format!(
|
||||
"amdgpu display init failed with status {}",
|
||||
rc
|
||||
)));
|
||||
}
|
||||
|
||||
info!(
|
||||
"redox-drm: AMD DC initialized with {} bytes of MMIO, fb_phys={:#x}, fb_size={}",
|
||||
mmio_size, fb_phys, fb_size
|
||||
);
|
||||
Ok(Self {
|
||||
initialized: true,
|
||||
mmio_base: mmio_base as usize,
|
||||
mmio_size,
|
||||
fb_phys,
|
||||
fb_size,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn fb_phys(&self) -> u64 {
|
||||
self.fb_phys
|
||||
}
|
||||
|
||||
pub fn fb_size(&self) -> usize {
|
||||
self.fb_size
|
||||
}
|
||||
|
||||
pub fn detect_connectors(&self) -> Result<Vec<ConnectorInfo>> {
|
||||
if !self.initialized {
|
||||
return Err(DriverError::Initialization(
|
||||
"display core not initialized".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let count = amdgpu_dc_detect_connectors();
|
||||
if count < 0 {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"AMD DC connector detection failed with status {}",
|
||||
count
|
||||
)));
|
||||
}
|
||||
if count == 0 {
|
||||
warn!("redox-drm: AMD DC reported 0 connected displays");
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut connectors = Vec::new();
|
||||
for idx in 0..count {
|
||||
let mut raw = ConnectorInfoFFI {
|
||||
id: 0,
|
||||
connector_type: 0,
|
||||
connector_type_id: 0,
|
||||
connection: 2,
|
||||
mm_width: 0,
|
||||
mm_height: 0,
|
||||
encoder_id: 0,
|
||||
};
|
||||
|
||||
let rc = amdgpu_dc_get_connector_info(idx, &mut raw as *mut ConnectorInfoFFI);
|
||||
if rc < 0 {
|
||||
warn!(
|
||||
"redox-drm: failed to fetch connector {} from AMD DC (status {})",
|
||||
idx, rc
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
connectors.push(ConnectorInfo {
|
||||
id: raw.id.max(0) as u32,
|
||||
connector_type: map_connector_type(raw.connector_type),
|
||||
connector_type_id: raw.connector_type_id.max(0) as u32,
|
||||
connection: map_connection_status(raw.connection),
|
||||
mm_width: raw.mm_width.max(0) as u32,
|
||||
mm_height: raw.mm_height.max(0) as u32,
|
||||
encoder_id: raw.encoder_id.max(0) as u32,
|
||||
modes: self.modes_for_connector(idx as u32),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(connectors)
|
||||
}
|
||||
|
||||
pub fn set_crtc(&self, crtc_id: u32, fb_addr: u64, width: u32, height: u32) -> Result<()> {
|
||||
if !self.initialized {
|
||||
return Err(DriverError::Initialization(
|
||||
"display core must be initialized before modesetting".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let rc = amdgpu_dc_set_crtc(crtc_id as i32, fb_addr, width, height);
|
||||
if rc < 0 {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"amdgpu_dc_set_crtc failed for CRTC {} with status {}",
|
||||
crtc_id, rc
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn flip_surface(&self, crtc_id: u32, fb_addr: u64) -> Result<()> {
|
||||
if !self.initialized {
|
||||
return Err(DriverError::Initialization(
|
||||
"display core must be initialized before page flip".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
const HUBP_FLIP_ADDR_LOW: usize = 0x5800;
|
||||
const HUBP_FLIP_ADDR_HIGH: usize = 0x5804;
|
||||
|
||||
let hubp_base = HUBP_FLIP_ADDR_LOW + (crtc_id as usize) * 0x400;
|
||||
let hubp_high = HUBP_FLIP_ADDR_HIGH + (crtc_id as usize) * 0x400;
|
||||
|
||||
self.write_reg(hubp_high, (fb_addr >> 32) as u32)?;
|
||||
self.write_reg(hubp_base, fb_addr as u32)?;
|
||||
|
||||
let flip_control = 0x5834 + (crtc_id as usize) * 0x400;
|
||||
self.write_reg(flip_control, 1)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn read_edid(&self, connector_index: u32) -> Vec<u8> {
|
||||
if !self.initialized {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
match self.read_edid_block(connector_index, 0x00) {
|
||||
Ok(edid) if edid.len() >= 128 => edid,
|
||||
Ok(_) | Err(_) => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn modes_for_connector(&self, connector_index: u32) -> Vec<ModeInfo> {
|
||||
let real_edid = self.read_edid(connector_index);
|
||||
let mut modes = ModeInfo::from_edid(&real_edid);
|
||||
if modes.is_empty() {
|
||||
modes = ModeInfo::from_edid(&synthetic_edid());
|
||||
}
|
||||
if modes.is_empty() {
|
||||
modes.push(ModeInfo::default_1080p());
|
||||
}
|
||||
modes
|
||||
}
|
||||
|
||||
fn read_edid_block(&self, connector_index: u32, offset: u8) -> Result<Vec<u8>> {
|
||||
const MM_DC_I2C_CONTROL: usize = 0x1e98;
|
||||
const MM_DC_I2C_ARBITRATION: usize = 0x1e99;
|
||||
const MM_DC_I2C_SW_STATUS: usize = 0x1e9b;
|
||||
const MM_DC_I2C_DDC1_SPEED: usize = 0x1ea2;
|
||||
const MM_DC_I2C_DDC1_SETUP: usize = 0x1ea3;
|
||||
const MM_DC_I2C_TRANSACTION0: usize = 0x1eae;
|
||||
const MM_DC_I2C_TRANSACTION1: usize = 0x1eaf;
|
||||
const MM_DC_I2C_DATA: usize = 0x1eb2;
|
||||
|
||||
const CONTROL_GO: u32 = 0x0000_0001;
|
||||
const CONTROL_SOFT_RESET: u32 = 0x0000_0002;
|
||||
const CONTROL_SW_STATUS_RESET: u32 = 0x0000_0008;
|
||||
const CONTROL_DDC_SELECT_MASK: u32 = 0x0000_0700;
|
||||
const CONTROL_DDC_SELECT_SHIFT: u32 = 8;
|
||||
const CONTROL_TRANSACTION_COUNT_MASK: u32 = 0x0030_0000;
|
||||
const CONTROL_TRANSACTION_COUNT_SHIFT: u32 = 20;
|
||||
|
||||
const ARBITRATION_STATUS_MASK: u32 = 0x0000_000c;
|
||||
const ARBITRATION_STATUS_SHIFT: u32 = 2;
|
||||
const ARBITRATION_REQ: u32 = 0x0010_0000;
|
||||
const ARBITRATION_DONE: u32 = 0x0020_0000;
|
||||
|
||||
const SW_STATUS_DONE: u32 = 0x0000_0004;
|
||||
const SW_STATUS_ABORTED: u32 = 0x0000_0010;
|
||||
const SW_STATUS_TIMEOUT: u32 = 0x0000_0020;
|
||||
const SW_STATUS_NACK: u32 = 0x0000_0100;
|
||||
|
||||
const SETUP_ENABLE: u32 = 0x0000_0040;
|
||||
const SETUP_SEND_RESET_LENGTH: u32 = 0x0000_0004;
|
||||
const SETUP_TIME_LIMIT_SHIFT: u32 = 24;
|
||||
|
||||
const SPEED_THRESHOLD: u32 = 0x0000_0002;
|
||||
const SPEED_PRESCALE_SHIFT: u32 = 16;
|
||||
const SPEED_START_STOP_TIMING: u32 = 0x0000_0200;
|
||||
|
||||
const TX_RW: u32 = 0x0000_0001;
|
||||
const TX_STOP_ON_NACK: u32 = 0x0000_0100;
|
||||
const TX_START: u32 = 0x0000_1000;
|
||||
const TX_STOP: u32 = 0x0000_2000;
|
||||
const TX_COUNT_SHIFT: u32 = 16;
|
||||
|
||||
const DATA_RW: u32 = 0x0000_0001;
|
||||
const DATA_VALUE_SHIFT: u32 = 8;
|
||||
const DATA_VALUE_MASK: u32 = 0x0000_ff00;
|
||||
const DATA_INDEX_SHIFT: u32 = 16;
|
||||
const DATA_INDEX_WRITE: u32 = 0x8000_0000;
|
||||
|
||||
const EDID_WRITE_ADDR: u8 = 0xa0;
|
||||
const EDID_READ_ADDR: u8 = 0xa1;
|
||||
const EDID_BLOCK_SIZE: usize = 128;
|
||||
const I2C_STATUS_IDLE: u32 = 0;
|
||||
const I2C_STATUS_USED_BY_SW: u32 = 1;
|
||||
const I2C_WAIT_RETRIES: usize = 200;
|
||||
|
||||
self.ensure_mmio_reg(MM_DC_I2C_DATA)?;
|
||||
self.ensure_mmio_reg(MM_DC_I2C_TRANSACTION1)?;
|
||||
|
||||
let connector_select = connector_index & 0x7;
|
||||
let arbitration = self.read_reg(MM_DC_I2C_ARBITRATION)?;
|
||||
let status = (arbitration & ARBITRATION_STATUS_MASK) >> ARBITRATION_STATUS_SHIFT;
|
||||
if status == I2C_STATUS_IDLE {
|
||||
self.write_reg(MM_DC_I2C_ARBITRATION, arbitration | ARBITRATION_REQ)?;
|
||||
} else if status != I2C_STATUS_USED_BY_SW {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"AMD I2C engine unavailable for connector {} (status {})",
|
||||
connector_index, status
|
||||
)));
|
||||
}
|
||||
|
||||
let control = self.read_reg(MM_DC_I2C_CONTROL)?;
|
||||
self.write_reg(
|
||||
MM_DC_I2C_CONTROL,
|
||||
(control
|
||||
& !(CONTROL_SOFT_RESET | CONTROL_DDC_SELECT_MASK | CONTROL_TRANSACTION_COUNT_MASK))
|
||||
| CONTROL_SW_STATUS_RESET
|
||||
| (connector_select << CONTROL_DDC_SELECT_SHIFT),
|
||||
)?;
|
||||
|
||||
self.write_reg(
|
||||
MM_DC_I2C_DDC1_SETUP,
|
||||
SETUP_ENABLE | SETUP_SEND_RESET_LENGTH | (3 << SETUP_TIME_LIMIT_SHIFT),
|
||||
)?;
|
||||
self.write_reg(
|
||||
MM_DC_I2C_DDC1_SPEED,
|
||||
SPEED_THRESHOLD | SPEED_START_STOP_TIMING | (40 << SPEED_PRESCALE_SHIFT),
|
||||
)?;
|
||||
self.write_reg(
|
||||
MM_DC_I2C_TRANSACTION0,
|
||||
TX_START | TX_STOP_ON_NACK | (1 << TX_COUNT_SHIFT),
|
||||
)?;
|
||||
self.write_reg(
|
||||
MM_DC_I2C_TRANSACTION1,
|
||||
TX_RW
|
||||
| TX_START
|
||||
| TX_STOP
|
||||
| TX_STOP_ON_NACK
|
||||
| ((EDID_BLOCK_SIZE as u32) << TX_COUNT_SHIFT),
|
||||
)?;
|
||||
|
||||
self.write_reg(
|
||||
MM_DC_I2C_DATA,
|
||||
((EDID_WRITE_ADDR as u32) << DATA_VALUE_SHIFT) | DATA_INDEX_WRITE,
|
||||
)?;
|
||||
self.write_reg(MM_DC_I2C_DATA, (offset as u32) << DATA_VALUE_SHIFT)?;
|
||||
self.write_reg(MM_DC_I2C_DATA, (EDID_READ_ADDR as u32) << DATA_VALUE_SHIFT)?;
|
||||
|
||||
let control = self.read_reg(MM_DC_I2C_CONTROL)?;
|
||||
self.write_reg(
|
||||
MM_DC_I2C_CONTROL,
|
||||
(control & !CONTROL_TRANSACTION_COUNT_MASK)
|
||||
| (1 << CONTROL_TRANSACTION_COUNT_SHIFT)
|
||||
| CONTROL_GO,
|
||||
)?;
|
||||
|
||||
let mut final_status = 0;
|
||||
for _ in 0..I2C_WAIT_RETRIES {
|
||||
final_status = self.read_reg(MM_DC_I2C_SW_STATUS)?;
|
||||
if (final_status
|
||||
& (SW_STATUS_DONE | SW_STATUS_ABORTED | SW_STATUS_TIMEOUT | SW_STATUS_NACK))
|
||||
!= 0
|
||||
{
|
||||
break;
|
||||
}
|
||||
thread::sleep(Duration::from_millis(1));
|
||||
}
|
||||
|
||||
self.write_reg(MM_DC_I2C_ARBITRATION, ARBITRATION_DONE)?;
|
||||
|
||||
if (final_status & SW_STATUS_DONE) == 0 {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"AMD I2C EDID read did not complete for connector {} (status {:#x})",
|
||||
connector_index, final_status
|
||||
)));
|
||||
}
|
||||
if (final_status & (SW_STATUS_ABORTED | SW_STATUS_TIMEOUT | SW_STATUS_NACK)) != 0 {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"AMD I2C EDID read failed for connector {} (status {:#x})",
|
||||
connector_index, final_status
|
||||
)));
|
||||
}
|
||||
|
||||
self.write_reg(
|
||||
MM_DC_I2C_DATA,
|
||||
DATA_RW | DATA_INDEX_WRITE | ((2_u32) << DATA_INDEX_SHIFT),
|
||||
)?;
|
||||
|
||||
let mut edid = Vec::with_capacity(EDID_BLOCK_SIZE);
|
||||
for _ in 0..EDID_BLOCK_SIZE {
|
||||
let value = self.read_reg(MM_DC_I2C_DATA)?;
|
||||
edid.push(((value & DATA_VALUE_MASK) >> DATA_VALUE_SHIFT) as u8);
|
||||
}
|
||||
|
||||
Ok(edid)
|
||||
}
|
||||
|
||||
fn ensure_mmio_reg(&self, reg: usize) -> Result<()> {
|
||||
let offset = reg.checked_mul(4).ok_or_else(|| {
|
||||
DriverError::Mmio(format!("AMD register offset overflow for {reg:#x}"))
|
||||
})?;
|
||||
if offset + 4 > self.mmio_size {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"AMD register {reg:#x} outside MMIO aperture {:#x}",
|
||||
self.mmio_size
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_reg(&self, reg: usize) -> Result<u32> {
|
||||
self.ensure_mmio_reg(reg)?;
|
||||
let offset = reg * 4;
|
||||
let ptr = (self.mmio_base + offset) as *const u32;
|
||||
let value = unsafe { ptr::read_volatile(ptr) };
|
||||
Ok(u32::from_le(value))
|
||||
}
|
||||
|
||||
fn write_reg(&self, reg: usize, value: u32) -> Result<()> {
|
||||
self.ensure_mmio_reg(reg)?;
|
||||
let offset = reg * 4;
|
||||
let ptr = (self.mmio_base + offset) as *mut u32;
|
||||
unsafe { ptr::write_volatile(ptr, value.to_le()) };
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DisplayCore {
|
||||
fn drop(&mut self) {
|
||||
if self.initialized {
|
||||
amdgpu_dc_cleanup();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn map_connector_type(value: i32) -> ConnectorType {
|
||||
match value {
|
||||
1 => ConnectorType::VGA,
|
||||
2 => ConnectorType::DVII,
|
||||
3 => ConnectorType::DVID,
|
||||
4 => ConnectorType::DVIA,
|
||||
10 => ConnectorType::DisplayPort,
|
||||
11 => ConnectorType::HDMIA,
|
||||
14 => ConnectorType::EDP,
|
||||
15 => ConnectorType::Virtual,
|
||||
_ => ConnectorType::Unknown,
|
||||
}
|
||||
}
|
||||
|
||||
fn map_connection_status(value: i32) -> ConnectorStatus {
|
||||
match value {
|
||||
1 => ConnectorStatus::Connected,
|
||||
2 => ConnectorStatus::Disconnected,
|
||||
_ => ConnectorStatus::Unknown,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,318 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use log::{info, warn};
|
||||
use redox_driver_sys::dma::DmaBuffer;
|
||||
use redox_driver_sys::memory::MmioRegion;
|
||||
|
||||
use crate::driver::{DriverError, Result};
|
||||
|
||||
const GPU_PAGE_SIZE: u64 = 4096;
|
||||
const PAGE_TABLE_LEVELS: usize = 4;
|
||||
const PTE_COUNT: usize = 512;
|
||||
const PT_BYTES: usize = PTE_COUNT * 8;
|
||||
const PTE_INDEX_MASK: u64 = 0x1ff;
|
||||
const PAGE_OFFSET_MASK: u64 = GPU_PAGE_SIZE - 1;
|
||||
const AMD_PTE_VALID: u64 = 1 << 0;
|
||||
const AMD_PTE_SYSTEM: u64 = 1 << 1;
|
||||
const AMD_PTE_FLAG_MASK: u64 = 0x0fff;
|
||||
const AMD_PTE_ADDR_MASK: u64 = 0x000f_ffff_ffff_f000;
|
||||
const GTT_MIN_VA_SIZE: u64 = 256 * 1024 * 1024;
|
||||
const TLB_POLL_LIMIT: usize = 10_000;
|
||||
|
||||
// GC 11.0 (RDNA2) VM register offsets (DWORD index * 4 = byte offset)
|
||||
const MM_VM_CONTEXT0_CNTL: usize = 0x1688 * 4;
|
||||
const MM_VM_CONTEXT0_PT_BASE_LO32: usize = 0x16f3 * 4;
|
||||
const MM_VM_CONTEXT0_PT_BASE_HI32: usize = 0x16f4 * 4;
|
||||
const MM_VM_CONTEXT0_PT_START_LO32: usize = 0x1713 * 4;
|
||||
const MM_VM_CONTEXT0_PT_START_HI32: usize = 0x1714 * 4;
|
||||
const MM_VM_CONTEXT0_PT_END_LO32: usize = 0x1733 * 4;
|
||||
const MM_VM_CONTEXT0_PT_END_HI32: usize = 0x1734 * 4;
|
||||
const MMVM_INVALIDATE_ENG0_REQ: usize = 0x16ab * 4;
|
||||
const MMVM_INVALIDATE_ENG0_ACK: usize = 0x16bd * 4;
|
||||
|
||||
struct PageTable {
|
||||
dma: DmaBuffer,
|
||||
children: BTreeMap<usize, Box<PageTable>>,
|
||||
}
|
||||
|
||||
impl PageTable {
|
||||
fn allocate() -> Result<Self> {
|
||||
let dma = DmaBuffer::allocate(PT_BYTES, 4096)
|
||||
.map_err(|e| DriverError::Buffer(format!("GTT page table alloc failed: {e}")))?;
|
||||
if !dma.is_physically_contiguous() {
|
||||
warn!("redox-drm: GTT page table not guaranteed physically contiguous");
|
||||
}
|
||||
Ok(Self {
|
||||
dma,
|
||||
children: BTreeMap::new(),
|
||||
})
|
||||
}
|
||||
|
||||
fn phys(&self) -> u64 {
|
||||
self.dma.physical_address() as u64
|
||||
}
|
||||
|
||||
fn entries(&self) -> &[u64] {
|
||||
unsafe { std::slice::from_raw_parts(self.dma.as_ptr() as *const u64, PTE_COUNT) }
|
||||
}
|
||||
|
||||
fn entries_mut(&mut self) -> &mut [u64] {
|
||||
unsafe { std::slice::from_raw_parts_mut(self.dma.as_mut_ptr() as *mut u64, PTE_COUNT) }
|
||||
}
|
||||
|
||||
fn map_page(&mut self, level: usize, gpu_addr: u64, phys_addr: u64, flags: u64) -> Result<()> {
|
||||
let idx = pt_index(gpu_addr, level)?;
|
||||
if level == PAGE_TABLE_LEVELS - 1 {
|
||||
self.entries_mut()[idx] = encode_pte(phys_addr, flags);
|
||||
return Ok(());
|
||||
}
|
||||
let child = match self.children.get_mut(&idx) {
|
||||
Some(c) => c,
|
||||
None => {
|
||||
let c = Box::new(PageTable::allocate()?);
|
||||
let c_phys = c.phys();
|
||||
self.entries_mut()[idx] =
|
||||
(c_phys & AMD_PTE_ADDR_MASK) | AMD_PTE_VALID | AMD_PTE_SYSTEM;
|
||||
self.children.entry(idx).or_insert(c)
|
||||
}
|
||||
};
|
||||
child.map_page(level + 1, gpu_addr, phys_addr, flags)
|
||||
}
|
||||
|
||||
fn unmap_page(&mut self, level: usize, gpu_addr: u64) -> Result<()> {
|
||||
let idx = pt_index(gpu_addr, level)?;
|
||||
if level == PAGE_TABLE_LEVELS - 1 {
|
||||
self.entries_mut()[idx] = 0;
|
||||
return Ok(());
|
||||
}
|
||||
if let Some(child) = self.children.get_mut(&idx) {
|
||||
child.unmap_page(level + 1, gpu_addr)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn translate(&self, level: usize, gpu_addr: u64) -> Option<u64> {
|
||||
let idx = pt_index(gpu_addr, level).ok()?;
|
||||
let entry = self.entries()[idx];
|
||||
if entry & AMD_PTE_VALID == 0 {
|
||||
return None;
|
||||
}
|
||||
if level == PAGE_TABLE_LEVELS - 1 {
|
||||
return Some((entry & AMD_PTE_ADDR_MASK) | (gpu_addr & PAGE_OFFSET_MASK));
|
||||
}
|
||||
self.children.get(&idx)?.translate(level + 1, gpu_addr)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GttManager {
|
||||
initialized: bool,
|
||||
root: Option<PageTable>,
|
||||
va_start: u64,
|
||||
va_end: u64,
|
||||
fb_offset: u64,
|
||||
next_alloc: u64,
|
||||
free_list: Vec<(u64, u64)>,
|
||||
}
|
||||
|
||||
impl Default for GttManager {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl GttManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
initialized: false,
|
||||
root: None,
|
||||
va_start: 0,
|
||||
va_end: GTT_MIN_VA_SIZE - 1,
|
||||
fb_offset: 0,
|
||||
next_alloc: 0,
|
||||
free_list: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn initialize(&mut self) -> Result<()> {
|
||||
if self.root.is_none() {
|
||||
self.root = Some(PageTable::allocate()?);
|
||||
}
|
||||
self.fb_offset = 0;
|
||||
self.va_start = self.fb_offset;
|
||||
self.va_end = self
|
||||
.va_start
|
||||
.checked_add(GTT_MIN_VA_SIZE)
|
||||
.ok_or_else(|| DriverError::Initialization("GTT VA range overflow".into()))?;
|
||||
self.next_alloc = self.va_start;
|
||||
self.initialized = true;
|
||||
info!(
|
||||
"redox-drm: AMD GTT initialized va={:#x}..{:#x} root_pt={:#x}",
|
||||
self.va_start,
|
||||
self.va_end,
|
||||
self.root.as_ref().map(|r| r.phys()).unwrap_or(0)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_initialized(&self) -> bool {
|
||||
self.initialized
|
||||
}
|
||||
|
||||
pub fn alloc_gpu_range(&mut self, size: u64) -> Result<u64> {
|
||||
self.ensure_init()?;
|
||||
let aligned_size = (size + GPU_PAGE_SIZE - 1) & !(GPU_PAGE_SIZE - 1);
|
||||
if let Some(idx) = self.free_list.iter().position(|&(_, s)| s >= aligned_size) {
|
||||
let (start, free_size) = self.free_list.remove(idx);
|
||||
let remainder = free_size - aligned_size;
|
||||
if remainder > 0 {
|
||||
self.free_list.push((start + aligned_size, remainder));
|
||||
}
|
||||
return Ok(start);
|
||||
}
|
||||
let gpu_addr = self.next_alloc;
|
||||
let new_next = gpu_addr
|
||||
.checked_add(aligned_size)
|
||||
.ok_or_else(|| DriverError::Buffer("GTT VA allocation overflow".into()))?;
|
||||
if new_next > self.va_end {
|
||||
return Err(DriverError::Buffer(format!(
|
||||
"GTT VA space exhausted: need {:#x}..{:#x}, have ..{:#x}",
|
||||
gpu_addr, new_next, self.va_end
|
||||
)));
|
||||
}
|
||||
self.next_alloc = new_next;
|
||||
Ok(gpu_addr)
|
||||
}
|
||||
|
||||
pub fn unmap_range(&mut self, gpu_start: u64, size: u64) -> Result<()> {
|
||||
self.ensure_init()?;
|
||||
let aligned_size = (size + GPU_PAGE_SIZE - 1) & !(GPU_PAGE_SIZE - 1);
|
||||
let num_pages = (aligned_size / GPU_PAGE_SIZE) as usize;
|
||||
for i in 0..num_pages {
|
||||
let gpu_addr = gpu_start + (i as u64) * GPU_PAGE_SIZE;
|
||||
self.root
|
||||
.as_mut()
|
||||
.ok_or_else(|| DriverError::Initialization("GTT root missing".into()))?
|
||||
.unmap_page(0, gpu_addr)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn release_range(&mut self, gpu_start: u64, size: u64) {
|
||||
let aligned_size = (size + GPU_PAGE_SIZE - 1) & !(GPU_PAGE_SIZE - 1);
|
||||
self.free_list.push((gpu_start, aligned_size));
|
||||
}
|
||||
|
||||
pub fn map_page(&mut self, gpu_addr: u64, phys_addr: u64, flags: u64) -> Result<()> {
|
||||
self.ensure_init()?;
|
||||
if gpu_addr & PAGE_OFFSET_MASK != 0 {
|
||||
return Err(DriverError::InvalidArgument("gpu_addr not page-aligned"));
|
||||
}
|
||||
if phys_addr & PAGE_OFFSET_MASK != 0 {
|
||||
return Err(DriverError::InvalidArgument("phys_addr not page-aligned"));
|
||||
}
|
||||
if gpu_addr < self.va_start || gpu_addr > self.va_end {
|
||||
return Err(DriverError::InvalidArgument(
|
||||
"gpu_addr outside GTT aperture",
|
||||
));
|
||||
}
|
||||
self.root
|
||||
.as_mut()
|
||||
.ok_or_else(|| DriverError::Initialization("GTT root missing".into()))?
|
||||
.map_page(0, gpu_addr, phys_addr, flags)
|
||||
}
|
||||
|
||||
pub fn unmap_page(&mut self, gpu_addr: u64) -> Result<()> {
|
||||
self.ensure_init()?;
|
||||
self.root
|
||||
.as_mut()
|
||||
.ok_or_else(|| DriverError::Initialization("GTT root missing".into()))?
|
||||
.unmap_page(0, gpu_addr)
|
||||
}
|
||||
|
||||
pub fn map_range(
|
||||
&mut self,
|
||||
gpu_start: u64,
|
||||
phys_start: u64,
|
||||
size: u64,
|
||||
flags: u64,
|
||||
) -> Result<()> {
|
||||
self.ensure_init()?;
|
||||
let aligned_size = (size + GPU_PAGE_SIZE - 1) & !(GPU_PAGE_SIZE - 1);
|
||||
let num_pages = (aligned_size / GPU_PAGE_SIZE) as usize;
|
||||
for i in 0..num_pages {
|
||||
let gpu_addr = gpu_start + (i as u64) * GPU_PAGE_SIZE;
|
||||
let phys_addr = phys_start + (i as u64) * GPU_PAGE_SIZE;
|
||||
self.map_page(gpu_addr, phys_addr, flags)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn flush_tlb(&self, mmio: &MmioRegion) -> Result<()> {
|
||||
if !self.initialized {
|
||||
return Err(DriverError::Initialization("GTT not initialized".into()));
|
||||
}
|
||||
let req =
|
||||
(1u32 << 0) | (1u32 << 19) | (1u32 << 20) | (1u32 << 21) | (1u32 << 22) | (1u32 << 23);
|
||||
mmio.write32(MMVM_INVALIDATE_ENG0_REQ, req);
|
||||
for _ in 0..TLB_POLL_LIMIT {
|
||||
let ack = mmio.read32(MMVM_INVALIDATE_ENG0_ACK);
|
||||
if ack & (1u32 << 0) != 0 {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
Err(DriverError::Mmio("GTT TLB flush timeout".into()))
|
||||
}
|
||||
|
||||
pub fn translate(&self, gpu_addr: u64) -> Option<u64> {
|
||||
if !self.initialized || gpu_addr < self.va_start || gpu_addr > self.va_end {
|
||||
return None;
|
||||
}
|
||||
self.root.as_ref()?.translate(0, gpu_addr)
|
||||
}
|
||||
|
||||
pub fn program_vm_context(&self, mmio: &MmioRegion) -> Result<()> {
|
||||
let root_phys = self
|
||||
.root
|
||||
.as_ref()
|
||||
.map(|r| r.phys())
|
||||
.ok_or_else(|| DriverError::Initialization("GTT root missing".into()))?;
|
||||
|
||||
mmio.write32(MM_VM_CONTEXT0_PT_BASE_LO32, root_phys as u32);
|
||||
mmio.write32(MM_VM_CONTEXT0_PT_BASE_HI32, (root_phys >> 32) as u32);
|
||||
|
||||
let va_start_pages = self.va_start >> 12;
|
||||
let va_end_pages = self.va_end >> 12;
|
||||
mmio.write32(MM_VM_CONTEXT0_PT_START_LO32, va_start_pages as u32);
|
||||
mmio.write32(MM_VM_CONTEXT0_PT_START_HI32, (va_start_pages >> 32) as u32);
|
||||
mmio.write32(MM_VM_CONTEXT0_PT_END_LO32, va_end_pages as u32);
|
||||
mmio.write32(MM_VM_CONTEXT0_PT_END_HI32, (va_end_pages >> 32) as u32);
|
||||
|
||||
// Enable VM context 0: depth=0 (4-level), block_size=0 (4KB pages)
|
||||
mmio.write32(MM_VM_CONTEXT0_CNTL, 1);
|
||||
|
||||
self.flush_tlb(mmio)
|
||||
}
|
||||
|
||||
fn ensure_init(&self) -> Result<()> {
|
||||
if !self.initialized {
|
||||
return Err(DriverError::Initialization(
|
||||
"GTT manager not initialized".into(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn pt_index(gpu_addr: u64, level: usize) -> Result<usize> {
|
||||
if level >= PAGE_TABLE_LEVELS {
|
||||
return Err(DriverError::Initialization(format!(
|
||||
"invalid PT level {level}"
|
||||
)));
|
||||
}
|
||||
let shift = 12 + ((PAGE_TABLE_LEVELS - 1 - level) * 9);
|
||||
Ok(((gpu_addr >> shift) & PTE_INDEX_MASK) as usize)
|
||||
}
|
||||
|
||||
fn encode_pte(phys_addr: u64, flags: u64) -> u64 {
|
||||
(phys_addr & AMD_PTE_ADDR_MASK) | (flags & AMD_PTE_FLAG_MASK) | AMD_PTE_VALID | AMD_PTE_SYSTEM
|
||||
}
|
||||
@@ -0,0 +1,612 @@
|
||||
pub mod display;
|
||||
pub mod gtt;
|
||||
pub mod ring;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
use std::sync::Mutex;
|
||||
|
||||
use log::{debug, info, warn};
|
||||
use redox_driver_sys::irq::IrqHandle;
|
||||
use redox_driver_sys::memory::MmioRegion;
|
||||
use redox_driver_sys::pci::{PciBarInfo, PciDevice, PciDeviceInfo};
|
||||
|
||||
use crate::driver::{DriverError, GpuDriver, Result};
|
||||
use crate::gem::{GemHandle, GemManager};
|
||||
use crate::kms::connector::{synthetic_edid, Connector};
|
||||
use crate::kms::crtc::Crtc;
|
||||
use crate::kms::encoder::Encoder;
|
||||
use crate::kms::{ConnectorInfo, ModeInfo};
|
||||
|
||||
use self::display::DisplayCore;
|
||||
use self::gtt::GttManager;
|
||||
use self::ring::RingManager;
|
||||
|
||||
const AMD_IH_RB_CNTL: usize = 0x0080;
|
||||
const AMD_IH_RB_RPTR: usize = 0x0083;
|
||||
const AMD_IH_RB_WPTR: usize = 0x0084;
|
||||
const AMD_IH_CNTL: usize = 0x00c0;
|
||||
const AMD_IH_STATUS: usize = 0x00c2;
|
||||
|
||||
const AMD_DCN_DISP_INTERRUPT_STATUS: [usize; 6] = [0x012a, 0x012b, 0x012c, 0x012d, 0x012e, 0x012f];
|
||||
const AMD_DCN_HPD_INT_STATUS: [usize; 6] = [0x1f14, 0x1f1c, 0x1f24, 0x1f2c, 0x1f34, 0x1f3c];
|
||||
const AMD_DCN_HPD_CONTROL: [usize; 6] = [0x1f16, 0x1f1e, 0x1f26, 0x1f2e, 0x1f36, 0x1f3e];
|
||||
|
||||
const AMD_DISP_INTERRUPT_VBLANK_MASK: u32 = 0x0000_0008;
|
||||
const AMD_DISP_INTERRUPT_HPD_MASK: u32 = 0x0002_0000;
|
||||
const AMD_HPD_INT_STATUS_MASK: u32 = 0x0000_0001;
|
||||
const AMD_HPD_RX_INT_STATUS_MASK: u32 = 0x0000_0100;
|
||||
const AMD_HPD_INT_ACK_MASK: u32 = 0x0000_0001;
|
||||
const AMD_HPD_RX_INT_ACK_MASK: u32 = 0x0000_0100;
|
||||
const AMD_IH_STATUS_INTERRUPT_PENDING_MASK: u32 = 0x0000_0001;
|
||||
const AMD_IH_STATUS_RING_OVERFLOW_MASK: u32 = 0x0000_0002;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum IrqEvent {
|
||||
Vblank { crtc_id: u32, count: u64 },
|
||||
Hotplug { connector_id: u32 },
|
||||
Unknown,
|
||||
}
|
||||
|
||||
pub struct AmdDriver {
|
||||
info: PciDeviceInfo,
|
||||
mmio: MmioRegion,
|
||||
irq_handle: Option<IrqHandle>,
|
||||
display: DisplayCore,
|
||||
gem: Mutex<GemManager>,
|
||||
connectors: Mutex<Vec<Connector>>,
|
||||
crtcs: Mutex<Vec<Crtc>>,
|
||||
encoders: Mutex<Vec<Encoder>>,
|
||||
gtt: Mutex<GttManager>,
|
||||
ring: Mutex<RingManager>,
|
||||
vblank_count: AtomicU64,
|
||||
hotplug_pending: AtomicBool,
|
||||
firmware: HashMap<String, Vec<u8>>,
|
||||
}
|
||||
|
||||
impl AmdDriver {
|
||||
pub fn new(info: PciDeviceInfo, firmware: HashMap<String, Vec<u8>>) -> Result<Self> {
|
||||
let bar0 = find_memory_bar0(&info)?;
|
||||
let bar2 = info.find_memory_bar(2).copied();
|
||||
let mut device = PciDevice::open_location(&info.location)
|
||||
.map_err(|e| DriverError::Pci(format!("failed to re-open PCI device: {e}")))?;
|
||||
device
|
||||
.enable_device()
|
||||
.map_err(|e| DriverError::Pci(format!("enable_device failed: {e}")))?;
|
||||
let mmio = device
|
||||
.map_bar(bar0.index, bar0.addr, bar0.size as usize)
|
||||
.map_err(|e| DriverError::Mmio(format!("map_bar failed: {e}")))?;
|
||||
|
||||
let pci_id = mmio.read32(0);
|
||||
debug!(
|
||||
"redox-drm: mapped AMD MMIO BAR0 addr={:#x} size={:#x} idreg={:#x}",
|
||||
bar0.addr, bar0.size, pci_id
|
||||
);
|
||||
|
||||
let (fb_phys, fb_size) = match &bar2 {
|
||||
Some(bar) => {
|
||||
debug!(
|
||||
"redox-drm: AMD VRAM BAR2 addr={:#x} size={:#x}",
|
||||
bar.addr, bar.size
|
||||
);
|
||||
(bar.addr, bar.size as usize)
|
||||
}
|
||||
None => {
|
||||
return Err(DriverError::Pci(format!(
|
||||
"AMD device {} has no VRAM BAR2 — cannot initialize display without framebuffer aperture",
|
||||
info.location
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let irq_handle = match info.irq {
|
||||
Some(irq) => Some(
|
||||
IrqHandle::request(irq)
|
||||
.map_err(|e| DriverError::Io(format!("failed to request IRQ {irq}: {e}")))?,
|
||||
),
|
||||
None => {
|
||||
warn!(
|
||||
"redox-drm: AMD device {} has no IRQ assigned",
|
||||
info.location
|
||||
);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let display = DisplayCore::with_framebuffer(mmio.as_ptr(), mmio.size(), fb_phys, fb_size)?;
|
||||
let (connectors, encoders) = detect_display_topology(&display)?;
|
||||
|
||||
RingManager::bind_mmio(&mmio);
|
||||
|
||||
let mut gtt = GttManager::new();
|
||||
gtt.initialize()?;
|
||||
gtt.program_vm_context(&mmio)?;
|
||||
|
||||
let mut ring = RingManager::new();
|
||||
ring.initialize()?;
|
||||
|
||||
let fw_count = firmware.len();
|
||||
let dmcub_available = firmware.contains_key("amdgpu/dmcub_dcn31.bin")
|
||||
|| firmware.contains_key("amdgpu/dcn_3_1_dmcub");
|
||||
if !dmcub_available {
|
||||
warn!("redox-drm: DMCUB firmware not found in cache — display core may fail to initialize");
|
||||
}
|
||||
|
||||
info!(
|
||||
"redox-drm: AMD driver ready for {} with {} connector(s), {} firmware blob(s) loaded",
|
||||
info.location,
|
||||
connectors.len(),
|
||||
fw_count
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
info,
|
||||
mmio,
|
||||
irq_handle,
|
||||
display,
|
||||
gem: Mutex::new(GemManager::new()),
|
||||
connectors: Mutex::new(connectors),
|
||||
crtcs: Mutex::new(vec![Crtc::new(1)]),
|
||||
encoders: Mutex::new(encoders),
|
||||
gtt: Mutex::new(gtt),
|
||||
ring: Mutex::new(ring),
|
||||
vblank_count: AtomicU64::new(0),
|
||||
hotplug_pending: AtomicBool::new(false),
|
||||
firmware,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn process_irq(&self) -> Result<IrqEvent> {
|
||||
let ih_status = self.read_mmio_reg(AMD_IH_STATUS);
|
||||
let ih_cntl = self.read_mmio_reg(AMD_IH_CNTL);
|
||||
let ih_rptr = self.read_mmio_reg(AMD_IH_RB_RPTR);
|
||||
let ih_wptr = self.read_mmio_reg(AMD_IH_RB_WPTR);
|
||||
let ring_pending = ih_rptr != ih_wptr;
|
||||
|
||||
if ih_status & AMD_IH_STATUS_RING_OVERFLOW_MASK != 0 {
|
||||
warn!(
|
||||
"redox-drm: AMD IH overflow status={:#010x} cntl={:#010x}",
|
||||
ih_status, ih_cntl
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(connector_id) = self.detect_hotplug_interrupt() {
|
||||
self.hotplug_pending.store(true, Ordering::SeqCst);
|
||||
self.refresh_connectors()?;
|
||||
self.hotplug_pending.store(false, Ordering::SeqCst);
|
||||
self.acknowledge_ih(ih_wptr);
|
||||
|
||||
debug!(
|
||||
"redox-drm: hotplug interrupt on connector {} status={:#010x} cntl={:#010x} rptr={:#010x} wptr={:#010x}",
|
||||
connector_id, ih_status, ih_cntl, ih_rptr, ih_wptr
|
||||
);
|
||||
|
||||
return Ok(IrqEvent::Hotplug { connector_id });
|
||||
}
|
||||
|
||||
if ring_pending || (ih_status & AMD_IH_STATUS_INTERRUPT_PENDING_MASK != 0) {
|
||||
if let Some(crtc_id) = self.detect_vblank_interrupt() {
|
||||
let count = self.vblank_count.fetch_add(1, Ordering::SeqCst) + 1;
|
||||
self.acknowledge_ih(ih_wptr);
|
||||
|
||||
debug!(
|
||||
"redox-drm: vblank interrupt on CRTC {} count={} status={:#010x} cntl={:#010x} rptr={:#010x} wptr={:#010x}",
|
||||
crtc_id, count, ih_status, ih_cntl, ih_rptr, ih_wptr
|
||||
);
|
||||
|
||||
return Ok(IrqEvent::Vblank { crtc_id, count });
|
||||
}
|
||||
}
|
||||
|
||||
self.acknowledge_ih(ih_wptr);
|
||||
Ok(IrqEvent::Unknown)
|
||||
}
|
||||
|
||||
fn read_mmio_reg(&self, register_index: usize) -> u32 {
|
||||
self.mmio.read32(register_index.saturating_mul(4))
|
||||
}
|
||||
|
||||
fn write_mmio_reg(&self, register_index: usize, value: u32) {
|
||||
self.mmio.write32(register_index.saturating_mul(4), value);
|
||||
}
|
||||
|
||||
fn detect_vblank_interrupt(&self) -> Option<u32> {
|
||||
let active_crtc_ids = self
|
||||
.crtcs
|
||||
.lock()
|
||||
.map(|crtcs| {
|
||||
crtcs
|
||||
.iter()
|
||||
.filter(|crtc| crtc.mode.is_some())
|
||||
.map(|crtc| crtc.id)
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.unwrap_or_else(|_| vec![1]);
|
||||
|
||||
for (index, register) in AMD_DCN_DISP_INTERRUPT_STATUS.iter().copied().enumerate() {
|
||||
let status = self.read_mmio_reg(register);
|
||||
if status & AMD_DISP_INTERRUPT_VBLANK_MASK == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let crtc_id = index as u32 + 1;
|
||||
if active_crtc_ids.is_empty() || active_crtc_ids.contains(&crtc_id) {
|
||||
return Some(crtc_id);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn detect_hotplug_interrupt(&self) -> Option<u32> {
|
||||
for (index, register) in AMD_DCN_HPD_INT_STATUS.iter().copied().enumerate() {
|
||||
let status = self.read_mmio_reg(register);
|
||||
if status & (AMD_HPD_INT_STATUS_MASK | AMD_HPD_RX_INT_STATUS_MASK) != 0 {
|
||||
self.acknowledge_hotplug(index, status);
|
||||
return Some(index as u32 + 1);
|
||||
}
|
||||
}
|
||||
|
||||
for (index, register) in AMD_DCN_DISP_INTERRUPT_STATUS.iter().copied().enumerate() {
|
||||
let status = self.read_mmio_reg(register);
|
||||
if status & AMD_DISP_INTERRUPT_HPD_MASK != 0 {
|
||||
let hpd_status = self.read_mmio_reg(AMD_DCN_HPD_INT_STATUS[index]);
|
||||
self.acknowledge_hotplug(index, hpd_status);
|
||||
return Some(index as u32 + 1);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn acknowledge_hotplug(&self, hpd_index: usize, hpd_status: u32) {
|
||||
let control_register = AMD_DCN_HPD_CONTROL[hpd_index];
|
||||
let control = self.read_mmio_reg(control_register);
|
||||
let ack = control
|
||||
| if hpd_status & AMD_HPD_INT_STATUS_MASK != 0 {
|
||||
AMD_HPD_INT_ACK_MASK
|
||||
} else {
|
||||
0
|
||||
}
|
||||
| if hpd_status & AMD_HPD_RX_INT_STATUS_MASK != 0 {
|
||||
AMD_HPD_RX_INT_ACK_MASK
|
||||
} else {
|
||||
0
|
||||
};
|
||||
self.write_mmio_reg(control_register, ack);
|
||||
}
|
||||
|
||||
fn acknowledge_ih(&self, ih_wptr: u32) {
|
||||
self.write_mmio_reg(AMD_IH_RB_RPTR, ih_wptr);
|
||||
|
||||
let ih_cntl = self.read_mmio_reg(AMD_IH_CNTL);
|
||||
self.write_mmio_reg(AMD_IH_CNTL, ih_cntl);
|
||||
|
||||
let ih_rb_cntl = self.read_mmio_reg(AMD_IH_RB_CNTL);
|
||||
self.write_mmio_reg(AMD_IH_RB_CNTL, ih_rb_cntl);
|
||||
}
|
||||
|
||||
fn refresh_connectors(&self) -> Result<()> {
|
||||
let (connectors, encoders) = detect_display_topology(&self.display)?;
|
||||
|
||||
{
|
||||
let mut connector_state = self
|
||||
.connectors
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("connector state poisoned".to_string()))?;
|
||||
*connector_state = connectors;
|
||||
}
|
||||
|
||||
{
|
||||
let mut encoder_state = self
|
||||
.encoders
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("encoder state poisoned".to_string()))?;
|
||||
*encoder_state = encoders;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_gem_gpu_mapping(&self, fb_handle: GemHandle) -> Result<u64> {
|
||||
{
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
|
||||
if let Some(addr) = gem.object(fb_handle)?.gpu_addr {
|
||||
return Ok(addr);
|
||||
}
|
||||
}
|
||||
|
||||
let (phys_addr, fb_size) = {
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
|
||||
let obj = gem.object(fb_handle)?;
|
||||
(obj.phys_addr as u64, obj.size)
|
||||
};
|
||||
|
||||
let gpu_addr = {
|
||||
let mut gtt = self
|
||||
.gtt
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("GTT manager poisoned".to_string()))?;
|
||||
let addr = gtt.alloc_gpu_range(fb_size)?;
|
||||
if let Err(e) = gtt.map_range(addr, phys_addr, fb_size, 0) {
|
||||
if gtt.unmap_range(addr, fb_size).is_ok() {
|
||||
gtt.release_range(addr, fb_size);
|
||||
}
|
||||
return Err(e);
|
||||
}
|
||||
if let Err(e) = gtt.flush_tlb(&self.mmio) {
|
||||
if gtt.unmap_range(addr, fb_size).is_ok() {
|
||||
if gtt.flush_tlb(&self.mmio).is_ok() {
|
||||
gtt.release_range(addr, fb_size);
|
||||
}
|
||||
}
|
||||
return Err(e);
|
||||
}
|
||||
addr
|
||||
};
|
||||
|
||||
if let Err(e) = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?
|
||||
.set_gpu_addr(fb_handle, gpu_addr)
|
||||
{
|
||||
let mut gtt = self
|
||||
.gtt
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("GTT manager poisoned".to_string()))?;
|
||||
if gtt.flush_tlb(&self.mmio).is_ok() && gtt.unmap_range(gpu_addr, fb_size).is_ok() {
|
||||
gtt.release_range(gpu_addr, fb_size);
|
||||
} else {
|
||||
let _ = gtt.unmap_range(gpu_addr, fb_size);
|
||||
}
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
Ok(gpu_addr)
|
||||
}
|
||||
}
|
||||
|
||||
impl GpuDriver for AmdDriver {
|
||||
fn driver_name(&self) -> &str {
|
||||
"amdgpu-redox"
|
||||
}
|
||||
|
||||
fn driver_desc(&self) -> &str {
|
||||
"AMD GPU DRM/KMS backend for Redox"
|
||||
}
|
||||
|
||||
fn driver_date(&self) -> &str {
|
||||
"2026-04-11"
|
||||
}
|
||||
|
||||
fn detect_connectors(&self) -> Vec<ConnectorInfo> {
|
||||
match self.connectors.lock() {
|
||||
Ok(connectors) => connectors
|
||||
.iter()
|
||||
.map(|connector| connector.info.clone())
|
||||
.collect(),
|
||||
Err(poisoned) => {
|
||||
warn!("redox-drm: connector state poisoned; using inner state");
|
||||
poisoned
|
||||
.into_inner()
|
||||
.iter()
|
||||
.map(|connector| connector.info.clone())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_modes(&self, connector_id: u32) -> Vec<ModeInfo> {
|
||||
self.detect_connectors()
|
||||
.into_iter()
|
||||
.find(|connector| connector.id == connector_id)
|
||||
.map(|connector| connector.modes)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn set_crtc(
|
||||
&self,
|
||||
crtc_id: u32,
|
||||
fb_handle: u32,
|
||||
connectors: &[u32],
|
||||
mode: &ModeInfo,
|
||||
) -> Result<()> {
|
||||
let fb_addr = self.ensure_gem_gpu_mapping(fb_handle)?;
|
||||
|
||||
self.display
|
||||
.set_crtc(crtc_id, fb_addr, mode.hdisplay as u32, mode.vdisplay as u32)?;
|
||||
|
||||
let mut crtcs = self
|
||||
.crtcs
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("CRTC state poisoned".to_string()))?;
|
||||
let crtc = crtcs
|
||||
.iter_mut()
|
||||
.find(|candidate| candidate.id == crtc_id)
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown CRTC {crtc_id}")))?;
|
||||
crtc.program(fb_handle, connectors, mode)
|
||||
}
|
||||
|
||||
fn page_flip(&self, crtc_id: u32, fb_handle: u32, _flags: u32) -> Result<u64> {
|
||||
{
|
||||
let crtcs = self
|
||||
.crtcs
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("CRTC state poisoned".to_string()))?;
|
||||
if !crtcs.iter().any(|crtc| crtc.id == crtc_id) {
|
||||
return Err(DriverError::NotFound(format!("unknown CRTC {crtc_id}")));
|
||||
}
|
||||
}
|
||||
|
||||
let fb_addr = self.ensure_gem_gpu_mapping(fb_handle)?;
|
||||
|
||||
self.display.flip_surface(crtc_id, fb_addr)?;
|
||||
|
||||
let mut ring = self
|
||||
.ring
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("ring manager poisoned".to_string()))?;
|
||||
ring.page_flip()
|
||||
}
|
||||
|
||||
fn get_vblank(&self, crtc_id: u32) -> Result<u64> {
|
||||
let crtcs = self
|
||||
.crtcs
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("CRTC state poisoned".to_string()))?;
|
||||
if !crtcs.iter().any(|crtc| crtc.id == crtc_id) {
|
||||
return Err(DriverError::NotFound(format!("unknown CRTC {crtc_id}")));
|
||||
}
|
||||
|
||||
Ok(self.vblank_count.load(Ordering::SeqCst))
|
||||
}
|
||||
|
||||
fn gem_create(&self, size: u64) -> Result<GemHandle> {
|
||||
let mut gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
|
||||
gem.create(size)
|
||||
}
|
||||
|
||||
fn gem_close(&self, handle: GemHandle) -> Result<()> {
|
||||
let gpu_info = {
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
|
||||
let obj = gem.object(handle)?;
|
||||
(obj.gpu_addr, obj.size)
|
||||
};
|
||||
|
||||
if let (Some(gpu_addr), fb_size) = gpu_info {
|
||||
let mut gtt = self
|
||||
.gtt
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("GTT manager poisoned".to_string()))?;
|
||||
gtt.flush_tlb(&self.mmio)?;
|
||||
gtt.unmap_range(gpu_addr, fb_size)?;
|
||||
gtt.release_range(gpu_addr, fb_size);
|
||||
}
|
||||
|
||||
self.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?
|
||||
.close(handle)
|
||||
}
|
||||
|
||||
fn gem_mmap(&self, handle: GemHandle) -> Result<usize> {
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
|
||||
gem.mmap(handle)
|
||||
}
|
||||
|
||||
fn gem_size(&self, handle: GemHandle) -> Result<u64> {
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
|
||||
Ok(gem.object(handle)?.size)
|
||||
}
|
||||
|
||||
fn gem_export_dmafd(&self, handle: GemHandle) -> Result<i32> {
|
||||
let mut gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
|
||||
gem.export_dmafd(handle)
|
||||
}
|
||||
|
||||
fn gem_import_dmafd(&self, fd: i32) -> Result<GemHandle> {
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
|
||||
gem.import_dmafd(fd)
|
||||
}
|
||||
|
||||
fn get_edid(&self, connector_id: u32) -> Vec<u8> {
|
||||
match self.connectors.lock() {
|
||||
Ok(connectors) => connectors
|
||||
.iter()
|
||||
.find(|connector| connector.info.id == connector_id)
|
||||
.map(|connector| connector.edid.clone())
|
||||
.unwrap_or_default(),
|
||||
Err(poisoned) => poisoned
|
||||
.into_inner()
|
||||
.iter()
|
||||
.find(|connector| connector.info.id == connector_id)
|
||||
.map(|connector| connector.edid.clone())
|
||||
.unwrap_or_default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_irq(&self) -> Result<Option<(u32, u64)>> {
|
||||
match self.process_irq()? {
|
||||
IrqEvent::Vblank { crtc_id, count } => {
|
||||
debug!(
|
||||
"redox-drm: handled AMD vblank IRQ for {} CRTC {} count={} irq={:?}",
|
||||
self.info.location,
|
||||
crtc_id,
|
||||
count,
|
||||
self.irq_handle.as_ref().map(IrqHandle::irq)
|
||||
);
|
||||
Ok(Some((crtc_id, count)))
|
||||
}
|
||||
IrqEvent::Hotplug { connector_id } => {
|
||||
info!(
|
||||
"redox-drm: handled AMD hotplug IRQ for {} connector {} irq={:?}",
|
||||
self.info.location,
|
||||
connector_id,
|
||||
self.irq_handle.as_ref().map(IrqHandle::irq)
|
||||
);
|
||||
Ok(None)
|
||||
}
|
||||
IrqEvent::Unknown => {
|
||||
debug!(
|
||||
"redox-drm: handled AMD IRQ for {} with no decoded source irq={:?}",
|
||||
self.info.location,
|
||||
self.irq_handle.as_ref().map(IrqHandle::irq)
|
||||
);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn detect_display_topology(display: &DisplayCore) -> Result<(Vec<Connector>, Vec<Encoder>)> {
|
||||
let detected = display.detect_connectors()?;
|
||||
let mut connectors = Vec::new();
|
||||
let mut encoders = Vec::new();
|
||||
|
||||
for (idx, connector) in detected.into_iter().enumerate() {
|
||||
let encoder_id = connector.encoder_id;
|
||||
encoders.push(Encoder::new(encoder_id, 1));
|
||||
let edid = display.read_edid(idx as u32);
|
||||
connectors.push(Connector {
|
||||
info: connector,
|
||||
edid: if edid.is_empty() {
|
||||
synthetic_edid()
|
||||
} else {
|
||||
edid
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
Ok((connectors, encoders))
|
||||
}
|
||||
|
||||
fn find_memory_bar0(info: &PciDeviceInfo) -> Result<PciBarInfo> {
|
||||
info.find_memory_bar(0)
|
||||
.copied()
|
||||
.ok_or_else(|| DriverError::Pci(format!("device {} has no MMIO BAR0", info.location)))
|
||||
}
|
||||
@@ -0,0 +1,404 @@
|
||||
use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
|
||||
|
||||
use log::{info, warn};
|
||||
use redox_driver_sys::dma::DmaBuffer;
|
||||
use redox_driver_sys::memory::MmioRegion;
|
||||
|
||||
use crate::driver::{DriverError, Result};
|
||||
|
||||
const RING_BUFFER_BYTES: usize = 4096;
|
||||
const RING_BUFFER_DWORDS: usize = RING_BUFFER_BYTES / 4;
|
||||
const RING_ALIGNMENT_BYTES: usize = 4096;
|
||||
const FENCE_BUFFER_BYTES: usize = 16;
|
||||
const WPTR_STRIDE_DWORDS: usize = 1;
|
||||
|
||||
const SDMA_OP_NOP: u32 = 0;
|
||||
const SDMA_OP_FENCE: u32 = 5;
|
||||
const SDMA_OP_TRAP: u32 = 6;
|
||||
|
||||
const SDMA0_GFX_RB_CNTL: usize = 0x0080 * 4;
|
||||
const SDMA0_GFX_RB_BASE: usize = 0x0081 * 4;
|
||||
const SDMA0_GFX_RB_BASE_HI: usize = 0x0082 * 4;
|
||||
const SDMA0_GFX_RB_RPTR: usize = 0x0083 * 4;
|
||||
const SDMA0_GFX_RB_RPTR_HI: usize = 0x0084 * 4;
|
||||
const SDMA0_GFX_RB_WPTR: usize = 0x0085 * 4;
|
||||
const SDMA0_GFX_RB_WPTR_HI: usize = 0x0086 * 4;
|
||||
const SDMA0_GFX_RB_WPTR_POLL_CNTL: usize = 0x0087 * 4;
|
||||
const SDMA0_GFX_RB_RPTR_ADDR_HI: usize = 0x0088 * 4;
|
||||
const SDMA0_GFX_RB_RPTR_ADDR_LO: usize = 0x0089 * 4;
|
||||
const SDMA0_GFX_IB_CNTL: usize = 0x008a * 4;
|
||||
const SDMA0_GFX_RB_WPTR_POLL_ADDR_HI: usize = 0x00b2 * 4;
|
||||
const SDMA0_GFX_RB_WPTR_POLL_ADDR_LO: usize = 0x00b3 * 4;
|
||||
const SDMA0_GFX_MINOR_PTR_UPDATE: usize = 0x00b5 * 4;
|
||||
|
||||
const SDMA_RB_CNTL_RB_ENABLE: u32 = 1 << 0;
|
||||
const SDMA_RB_CNTL_RB_SIZE_SHIFT: u32 = 1;
|
||||
const SDMA_RB_CNTL_RB_SIZE_MASK: u32 = 0x1f << SDMA_RB_CNTL_RB_SIZE_SHIFT;
|
||||
const SDMA_RB_CNTL_RPTR_WRITEBACK_ENABLE: u32 = 1 << 12;
|
||||
const SDMA_IB_CNTL_IB_ENABLE: u32 = 1 << 0;
|
||||
|
||||
const FENCE_OFFSET_BYTES: usize = 0;
|
||||
const WPTR_POLL_OFFSET_BYTES: usize = 8;
|
||||
|
||||
static MMIO_BASE: AtomicPtr<u8> = AtomicPtr::new(core::ptr::null_mut());
|
||||
static MMIO_SIZE: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct MmioBinding {
|
||||
base: usize,
|
||||
size: usize,
|
||||
}
|
||||
|
||||
// Safety: MmioBinding holds raw address integers, not pointers.
|
||||
// It is safe to send between threads because register access is volatile.
|
||||
unsafe impl Send for MmioBinding {}
|
||||
unsafe impl Sync for MmioBinding {}
|
||||
|
||||
impl MmioBinding {
|
||||
fn try_load() -> Option<Self> {
|
||||
let base = MMIO_BASE.load(Ordering::Acquire);
|
||||
let size = MMIO_SIZE.load(Ordering::Acquire);
|
||||
if base.is_null() {
|
||||
return None;
|
||||
}
|
||||
Some(Self {
|
||||
base: base as usize,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
fn read32(&self, offset: usize) -> Result<u32> {
|
||||
if offset.checked_add(4).is_none_or(|end| end > self.size) {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"AMD ring MMIO read out of bounds: offset={offset:#x} size={:#x}",
|
||||
self.size
|
||||
)));
|
||||
}
|
||||
|
||||
let ptr = (self.base + offset) as *const u32;
|
||||
Ok(unsafe { core::ptr::read_volatile(ptr) })
|
||||
}
|
||||
|
||||
fn write32(&self, offset: usize, value: u32) -> Result<()> {
|
||||
if offset.checked_add(4).is_none_or(|end| end > self.size) {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"AMD ring MMIO write out of bounds: offset={offset:#x} size={:#x}",
|
||||
self.size
|
||||
)));
|
||||
}
|
||||
|
||||
let ptr = (self.base + offset) as *mut u32;
|
||||
unsafe { core::ptr::write_volatile(ptr, value) };
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct RingManager {
|
||||
initialized: bool,
|
||||
ring_buffer: Option<DmaBuffer>,
|
||||
fence_buffer: Option<DmaBuffer>,
|
||||
mmio: Option<MmioBinding>,
|
||||
ring_size_dwords: u32,
|
||||
read_ptr: u64,
|
||||
write_ptr: u64,
|
||||
next_seqno: u64,
|
||||
last_signaled_seqno: u64,
|
||||
}
|
||||
|
||||
impl RingManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
initialized: false,
|
||||
ring_buffer: None,
|
||||
fence_buffer: None,
|
||||
mmio: None,
|
||||
ring_size_dwords: RING_BUFFER_DWORDS as u32,
|
||||
read_ptr: 0,
|
||||
write_ptr: 0,
|
||||
next_seqno: 1,
|
||||
last_signaled_seqno: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn initialize(&mut self) -> Result<()> {
|
||||
let mut ring_buffer = DmaBuffer::allocate(RING_BUFFER_BYTES, RING_ALIGNMENT_BYTES)
|
||||
.map_err(|e| DriverError::Buffer(format!("ring buffer allocation failed: {e}")))?;
|
||||
let mut fence_buffer =
|
||||
DmaBuffer::allocate(FENCE_BUFFER_BYTES, core::mem::align_of::<u64>())
|
||||
.map_err(|e| DriverError::Buffer(format!("fence buffer allocation failed: {e}")))?;
|
||||
|
||||
Self::zero_dma(&mut ring_buffer);
|
||||
Self::zero_dma(&mut fence_buffer);
|
||||
|
||||
self.mmio = MmioBinding::try_load();
|
||||
self.program_ring(&ring_buffer, &fence_buffer)?;
|
||||
|
||||
self.ring_buffer = Some(ring_buffer);
|
||||
self.fence_buffer = Some(fence_buffer);
|
||||
self.read_ptr = 0;
|
||||
self.write_ptr = 0;
|
||||
self.next_seqno = 1;
|
||||
self.last_signaled_seqno = 0;
|
||||
self.initialized = true;
|
||||
|
||||
info!(
|
||||
"redox-drm: AMD ring manager initialized with {} DW ring buffer{}",
|
||||
self.ring_size_dwords,
|
||||
if self.mmio.is_some() {
|
||||
" and SDMA MMIO programming"
|
||||
} else {
|
||||
" (MMIO binding unavailable; submissions stay software-tracked)"
|
||||
}
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn page_flip(&mut self) -> Result<u64> {
|
||||
self.ensure_initialized()?;
|
||||
|
||||
let seqno = self.next_seqno;
|
||||
self.next_seqno = self.next_seqno.saturating_add(1);
|
||||
|
||||
let mut packet = Vec::with_capacity(16);
|
||||
self.emit_flip(&mut packet, seqno);
|
||||
self.emit_fence(&mut packet, seqno)?;
|
||||
|
||||
self.submit(&packet, seqno)
|
||||
}
|
||||
|
||||
pub(crate) fn bind_mmio(mmio: &MmioRegion) {
|
||||
MMIO_BASE.store(mmio.as_ptr() as *mut u8, Ordering::Release);
|
||||
MMIO_SIZE.store(mmio.size(), Ordering::Release);
|
||||
}
|
||||
|
||||
fn ensure_initialized(&self) -> Result<()> {
|
||||
if self.initialized {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(DriverError::Initialization(
|
||||
"ring manager must be initialized before page flips".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn program_ring(&self, ring_buffer: &DmaBuffer, fence_buffer: &DmaBuffer) -> Result<()> {
|
||||
let Some(mmio) = self.mmio else {
|
||||
warn!(
|
||||
"redox-drm: AMD ring manager has no MMIO binding; skipping SDMA register programming"
|
||||
);
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let ring_addr = ring_buffer.physical_address() as u64;
|
||||
let fence_addr = fence_buffer.physical_address() as u64 + FENCE_OFFSET_BYTES as u64;
|
||||
let wptr_poll_addr = fence_buffer.physical_address() as u64 + WPTR_POLL_OFFSET_BYTES as u64;
|
||||
|
||||
let mut rb_cntl = mmio.read32(SDMA0_GFX_RB_CNTL)?;
|
||||
rb_cntl &= !(SDMA_RB_CNTL_RB_ENABLE | SDMA_RB_CNTL_RB_SIZE_MASK);
|
||||
rb_cntl |=
|
||||
(self.ring_size_order() << SDMA_RB_CNTL_RB_SIZE_SHIFT) & SDMA_RB_CNTL_RB_SIZE_MASK;
|
||||
mmio.write32(SDMA0_GFX_RB_CNTL, rb_cntl)?;
|
||||
|
||||
mmio.write32(SDMA0_GFX_RB_RPTR, 0)?;
|
||||
mmio.write32(SDMA0_GFX_RB_RPTR_HI, 0)?;
|
||||
mmio.write32(SDMA0_GFX_RB_WPTR, 0)?;
|
||||
mmio.write32(SDMA0_GFX_RB_WPTR_HI, 0)?;
|
||||
|
||||
mmio.write32(SDMA0_GFX_RB_RPTR_ADDR_HI, upper_32(fence_addr))?;
|
||||
mmio.write32(SDMA0_GFX_RB_RPTR_ADDR_LO, lower_32(fence_addr) & !0x3)?;
|
||||
|
||||
rb_cntl |= SDMA_RB_CNTL_RPTR_WRITEBACK_ENABLE;
|
||||
mmio.write32(SDMA0_GFX_RB_CNTL, rb_cntl)?;
|
||||
|
||||
mmio.write32(SDMA0_GFX_RB_BASE, lower_32(ring_addr >> 8))?;
|
||||
mmio.write32(SDMA0_GFX_RB_BASE_HI, lower_32(ring_addr >> 40))?;
|
||||
|
||||
mmio.write32(SDMA0_GFX_MINOR_PTR_UPDATE, 1)?;
|
||||
mmio.write32(SDMA0_GFX_RB_WPTR, 0)?;
|
||||
mmio.write32(SDMA0_GFX_RB_WPTR_HI, 0)?;
|
||||
mmio.write32(SDMA0_GFX_MINOR_PTR_UPDATE, 0)?;
|
||||
|
||||
mmio.write32(SDMA0_GFX_RB_WPTR_POLL_ADDR_LO, lower_32(wptr_poll_addr))?;
|
||||
mmio.write32(SDMA0_GFX_RB_WPTR_POLL_ADDR_HI, upper_32(wptr_poll_addr))?;
|
||||
mmio.write32(SDMA0_GFX_RB_WPTR_POLL_CNTL, 0)?;
|
||||
|
||||
rb_cntl |= SDMA_RB_CNTL_RB_ENABLE;
|
||||
mmio.write32(SDMA0_GFX_RB_CNTL, rb_cntl)?;
|
||||
|
||||
let mut ib_cntl = mmio.read32(SDMA0_GFX_IB_CNTL)?;
|
||||
ib_cntl |= SDMA_IB_CNTL_IB_ENABLE;
|
||||
mmio.write32(SDMA0_GFX_IB_CNTL, ib_cntl)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn submit(&mut self, commands: &[u32], seqno: u64) -> Result<u64> {
|
||||
self.refresh_read_ptr();
|
||||
self.ensure_space(commands.len())?;
|
||||
|
||||
for &command in commands {
|
||||
self.write_ring_dword(command)?;
|
||||
}
|
||||
|
||||
fence(Ordering::Release);
|
||||
self.publish_wptr()?;
|
||||
|
||||
if self.mmio.is_none() {
|
||||
self.write_completed_seqno(seqno)?;
|
||||
}
|
||||
|
||||
Ok(seqno)
|
||||
}
|
||||
|
||||
fn refresh_read_ptr(&mut self) {
|
||||
if let Some(mmio) = self.mmio {
|
||||
let low = mmio.read32(SDMA0_GFX_RB_RPTR).unwrap_or(0) as u64;
|
||||
let high = mmio.read32(SDMA0_GFX_RB_RPTR_HI).unwrap_or(0) as u64;
|
||||
self.read_ptr = ((high << 32) | low) >> 2;
|
||||
} else {
|
||||
self.read_ptr = self.write_ptr;
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_space(&self, required_dwords: usize) -> Result<()> {
|
||||
if required_dwords >= self.ring_capacity() {
|
||||
return Err(DriverError::Buffer(format!(
|
||||
"ring submission too large: {} DW exceeds capacity {} DW",
|
||||
required_dwords,
|
||||
self.ring_capacity() - 1
|
||||
)));
|
||||
}
|
||||
|
||||
let used = self.used_dwords();
|
||||
let free = self.ring_capacity().saturating_sub(used).saturating_sub(1);
|
||||
if required_dwords <= free {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(DriverError::Buffer(format!(
|
||||
"ring buffer full: required {} DW, free {} DW",
|
||||
required_dwords, free
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
fn used_dwords(&self) -> usize {
|
||||
let size = self.ring_capacity() as u64;
|
||||
((self.write_ptr + size).wrapping_sub(self.read_ptr) % size) as usize
|
||||
}
|
||||
|
||||
fn write_ring_dword(&mut self, value: u32) -> Result<()> {
|
||||
let capacity = self.ring_capacity();
|
||||
let ring_buffer = self
|
||||
.ring_buffer
|
||||
.as_mut()
|
||||
.ok_or_else(|| DriverError::Initialization("ring buffer missing".to_string()))?;
|
||||
|
||||
let index = (self.write_ptr as usize) % capacity;
|
||||
let ptr = unsafe {
|
||||
ring_buffer
|
||||
.as_mut_ptr()
|
||||
.add(index * core::mem::size_of::<u32>()) as *mut u32
|
||||
};
|
||||
unsafe { core::ptr::write_volatile(ptr, value) };
|
||||
|
||||
self.write_ptr = (self.write_ptr + WPTR_STRIDE_DWORDS as u64) % capacity as u64;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn publish_wptr(&mut self) -> Result<()> {
|
||||
self.write_wptr_shadow(self.write_ptr)?;
|
||||
|
||||
let Some(mmio) = self.mmio else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
mmio.write32(SDMA0_GFX_MINOR_PTR_UPDATE, 1)?;
|
||||
mmio.write32(SDMA0_GFX_RB_WPTR, lower_32(self.write_ptr << 2))?;
|
||||
mmio.write32(SDMA0_GFX_RB_WPTR_HI, upper_32(self.write_ptr << 2))?;
|
||||
mmio.write32(SDMA0_GFX_MINOR_PTR_UPDATE, 0)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn emit_nop(&self, packet: &mut Vec<u32>, count: u32) {
|
||||
for _ in 0..count {
|
||||
packet.push(SDMA_OP_NOP);
|
||||
}
|
||||
}
|
||||
|
||||
fn emit_flip(&self, packet: &mut Vec<u32>, seqno: u64) {
|
||||
self.emit_nop(packet, 2);
|
||||
packet.push(0x5049_4c46);
|
||||
packet.push(lower_32(seqno));
|
||||
packet.push(upper_32(seqno));
|
||||
}
|
||||
|
||||
fn emit_fence(&self, packet: &mut Vec<u32>, seqno: u64) -> Result<()> {
|
||||
let fence_addr = self.fence_address()?;
|
||||
|
||||
packet.push(SDMA_OP_FENCE);
|
||||
packet.push(lower_32(fence_addr));
|
||||
packet.push(upper_32(fence_addr));
|
||||
packet.push(lower_32(seqno));
|
||||
|
||||
packet.push(SDMA_OP_FENCE);
|
||||
packet.push(lower_32(fence_addr + 4));
|
||||
packet.push(upper_32(fence_addr + 4));
|
||||
packet.push(upper_32(seqno));
|
||||
|
||||
packet.push(SDMA_OP_TRAP);
|
||||
packet.push(0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fence_address(&self) -> Result<u64> {
|
||||
let fence_buffer = self
|
||||
.fence_buffer
|
||||
.as_ref()
|
||||
.ok_or_else(|| DriverError::Initialization("fence buffer missing".to_string()))?;
|
||||
Ok(fence_buffer.physical_address() as u64 + FENCE_OFFSET_BYTES as u64)
|
||||
}
|
||||
|
||||
fn write_completed_seqno(&mut self, seqno: u64) -> Result<()> {
|
||||
let fence_buffer = self
|
||||
.fence_buffer
|
||||
.as_mut()
|
||||
.ok_or_else(|| DriverError::Initialization("fence buffer missing".to_string()))?;
|
||||
let ptr = unsafe { fence_buffer.as_mut_ptr().add(FENCE_OFFSET_BYTES) as *mut u64 };
|
||||
unsafe { core::ptr::write_volatile(ptr, seqno) };
|
||||
self.last_signaled_seqno = seqno;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_wptr_shadow(&mut self, wptr_dwords: u64) -> Result<()> {
|
||||
let fence_buffer = self
|
||||
.fence_buffer
|
||||
.as_mut()
|
||||
.ok_or_else(|| DriverError::Initialization("fence buffer missing".to_string()))?;
|
||||
let ptr = unsafe { fence_buffer.as_mut_ptr().add(WPTR_POLL_OFFSET_BYTES) as *mut u64 };
|
||||
unsafe { core::ptr::write_volatile(ptr, wptr_dwords << 2) };
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ring_size_order(&self) -> u32 {
|
||||
self.ring_size_dwords.ilog2()
|
||||
}
|
||||
|
||||
fn ring_capacity(&self) -> usize {
|
||||
self.ring_size_dwords as usize
|
||||
}
|
||||
|
||||
fn zero_dma(buffer: &mut DmaBuffer) {
|
||||
unsafe { core::ptr::write_bytes(buffer.as_mut_ptr(), 0, buffer.len()) };
|
||||
}
|
||||
}
|
||||
|
||||
fn lower_32(value: u64) -> u32 {
|
||||
value as u32
|
||||
}
|
||||
|
||||
fn upper_32(value: u64) -> u32 {
|
||||
(value >> 32) as u32
|
||||
}
|
||||
@@ -0,0 +1,392 @@
|
||||
use std::sync::Mutex;
|
||||
|
||||
use log::{debug, info};
|
||||
use redox_driver_sys::memory::MmioRegion;
|
||||
|
||||
use crate::driver::{DriverError, Result};
|
||||
use crate::kms::connector::synthetic_edid;
|
||||
use crate::kms::{ConnectorInfo, ConnectorStatus, ConnectorType, ModeInfo};
|
||||
|
||||
const PIPE_COUNT: usize = 3;
|
||||
const PORT_COUNT: usize = 5;
|
||||
|
||||
const PP_STATUS: usize = 0xC7200;
|
||||
const PIPECONF_BASE: usize = 0x70008;
|
||||
const DSPCNTR_BASE: usize = 0x70180;
|
||||
const DSPSURF_BASE: usize = 0x7019C;
|
||||
const DDI_BUF_CTL_BASE: usize = 0x64000;
|
||||
|
||||
const HTOTAL_BASE: usize = 0x60000;
|
||||
const HBLANK_BASE: usize = 0x60004;
|
||||
const HSYNC_BASE: usize = 0x60008;
|
||||
const VTOTAL_BASE: usize = 0x6000C;
|
||||
const VBLANK_BASE: usize = 0x60010;
|
||||
const VSYNC_BASE: usize = 0x60014;
|
||||
const PIPE_SRC_BASE: usize = 0x6001C;
|
||||
const PLANE_SIZE_BASE: usize = 0x70190;
|
||||
|
||||
const PIPE_STRIDE: usize = 0x1000;
|
||||
const PORT_STRIDE: usize = 0x100;
|
||||
|
||||
const PIPECONF_ENABLE: u32 = 1 << 31;
|
||||
const DSPCNTR_ENABLE: u32 = 1 << 31;
|
||||
const DDI_BUF_CTL_ENABLE: u32 = 1 << 31;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct DisplayPipe {
|
||||
pub index: u8,
|
||||
pub enabled: bool,
|
||||
pub port: Option<u8>,
|
||||
}
|
||||
|
||||
pub struct IntelDisplay {
|
||||
mmio: MmioRegion,
|
||||
pipes: Mutex<Vec<DisplayPipe>>,
|
||||
}
|
||||
|
||||
impl IntelDisplay {
|
||||
pub fn new(mmio: MmioRegion) -> Result<Self> {
|
||||
let pipes = Self::detect_pipes(&mmio)?;
|
||||
info!(
|
||||
"redox-drm: Intel display initialized with {} pipe(s)",
|
||||
pipes.len()
|
||||
);
|
||||
Ok(Self {
|
||||
mmio,
|
||||
pipes: Mutex::new(pipes),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn pipes(&self) -> Result<Vec<DisplayPipe>> {
|
||||
self.refresh_pipes()
|
||||
}
|
||||
|
||||
pub fn pipe_for_crtc(&self, crtc_id: u32) -> Result<DisplayPipe> {
|
||||
let index = crtc_id
|
||||
.checked_sub(1)
|
||||
.ok_or(DriverError::InvalidArgument("invalid Intel CRTC id"))?
|
||||
as usize;
|
||||
self.refresh_pipes()?
|
||||
.get(index)
|
||||
.copied()
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown Intel pipe for CRTC {crtc_id}")))
|
||||
}
|
||||
|
||||
pub fn detect_pipes(mmio: &MmioRegion) -> Result<Vec<DisplayPipe>> {
|
||||
let mut pipes = Vec::with_capacity(PIPE_COUNT);
|
||||
let pp_status = read32(mmio, PP_STATUS).unwrap_or(0);
|
||||
let connected_ports = connected_ports(mmio);
|
||||
|
||||
for index in 0..PIPE_COUNT {
|
||||
let conf = read32(mmio, pipe_offset(PIPECONF_BASE, index))?;
|
||||
let enabled = conf & PIPECONF_ENABLE != 0;
|
||||
let mut port = connected_ports.get(index).copied();
|
||||
|
||||
if port.is_none() && index == 0 && pp_status != 0 {
|
||||
port = Some(0);
|
||||
}
|
||||
if port.is_none() && enabled {
|
||||
port = Some(index as u8);
|
||||
}
|
||||
|
||||
pipes.push(DisplayPipe {
|
||||
index: index as u8,
|
||||
enabled,
|
||||
port,
|
||||
});
|
||||
}
|
||||
|
||||
if pipes.iter().all(|pipe| pipe.port.is_none()) {
|
||||
if let Some(pipe) = pipes.first_mut() {
|
||||
pipe.port = Some(0);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(pipes)
|
||||
}
|
||||
|
||||
pub fn detect_connectors(&self) -> Result<Vec<ConnectorInfo>> {
|
||||
let pp_status = self.read32(PP_STATUS).unwrap_or(0);
|
||||
let pipes = self.refresh_pipes()?;
|
||||
let mut connectors = Vec::with_capacity(PORT_COUNT);
|
||||
|
||||
for port in 0..PORT_COUNT as u8 {
|
||||
let status = self.read32(ddi_offset(port)).unwrap_or(0);
|
||||
let connected = status & DDI_BUF_CTL_ENABLE != 0
|
||||
|| pipes
|
||||
.iter()
|
||||
.any(|pipe| pipe.port == Some(port) && pipe.enabled)
|
||||
|| (port == 0 && pp_status != 0);
|
||||
let connector_type = connector_type_for_port(port, pp_status);
|
||||
let modes = self.modes_for_port(port, connector_type);
|
||||
|
||||
connectors.push(ConnectorInfo {
|
||||
id: port as u32 + 1,
|
||||
connector_type,
|
||||
connector_type_id: port as u32 + 1,
|
||||
connection: if connected {
|
||||
ConnectorStatus::Connected
|
||||
} else {
|
||||
ConnectorStatus::Disconnected
|
||||
},
|
||||
mm_width: 600,
|
||||
mm_height: 340,
|
||||
encoder_id: port as u32 + 1,
|
||||
modes,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(connectors)
|
||||
}
|
||||
|
||||
pub fn modes_for_connector(&self, connector: &ConnectorInfo) -> Vec<ModeInfo> {
|
||||
let port = connector
|
||||
.connector_type_id
|
||||
.saturating_sub(1)
|
||||
.min((PORT_COUNT - 1) as u32) as u8;
|
||||
self.modes_for_port(port, connector.connector_type)
|
||||
}
|
||||
|
||||
pub fn read_edid(&self, port: u8) -> Vec<u8> {
|
||||
debug!("redox-drm: Intel HDMI/DVI EDID fallback on port {}", port);
|
||||
synthetic_edid()
|
||||
}
|
||||
|
||||
pub fn read_dpcd(&self, port: u8) -> Vec<u8> {
|
||||
let status = self.read32(ddi_offset(port)).unwrap_or(0);
|
||||
if status & DDI_BUF_CTL_ENABLE == 0 {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
debug!("redox-drm: Intel AUX/DPCD skeleton read on port {}", port);
|
||||
vec![0x12, 0x0A, 0x84, 0x01]
|
||||
}
|
||||
|
||||
pub fn set_mode(&self, pipe: &DisplayPipe, mode: &ModeInfo) -> Result<()> {
|
||||
let index = usize::from(pipe.index);
|
||||
self.write32(
|
||||
pipe_offset(HTOTAL_BASE, index),
|
||||
pack_pair(mode.htotal, mode.hdisplay),
|
||||
)?;
|
||||
self.write32(
|
||||
pipe_offset(HBLANK_BASE, index),
|
||||
pack_pair(mode.htotal, mode.hdisplay),
|
||||
)?;
|
||||
self.write32(
|
||||
pipe_offset(HSYNC_BASE, index),
|
||||
pack_pair(mode.hsync_end, mode.hsync_start),
|
||||
)?;
|
||||
self.write32(
|
||||
pipe_offset(VTOTAL_BASE, index),
|
||||
pack_pair(mode.vtotal, mode.vdisplay),
|
||||
)?;
|
||||
self.write32(
|
||||
pipe_offset(VBLANK_BASE, index),
|
||||
pack_pair(mode.vtotal, mode.vdisplay),
|
||||
)?;
|
||||
self.write32(
|
||||
pipe_offset(VSYNC_BASE, index),
|
||||
pack_pair(mode.vsync_end, mode.vsync_start),
|
||||
)?;
|
||||
self.write32(
|
||||
pipe_offset(PIPE_SRC_BASE, index),
|
||||
pack_pair(mode.vdisplay, mode.hdisplay),
|
||||
)?;
|
||||
self.write32(
|
||||
pipe_offset(PLANE_SIZE_BASE, index),
|
||||
pack_pair(mode.vdisplay, mode.hdisplay),
|
||||
)?;
|
||||
|
||||
let mut dspcntr = self.read32(pipe_offset(DSPCNTR_BASE, index))?;
|
||||
dspcntr |= DSPCNTR_ENABLE;
|
||||
self.write32(pipe_offset(DSPCNTR_BASE, index), dspcntr)?;
|
||||
|
||||
let mut pipeconf = self.read32(pipe_offset(PIPECONF_BASE, index))?;
|
||||
pipeconf |= PIPECONF_ENABLE;
|
||||
self.write32(pipe_offset(PIPECONF_BASE, index), pipeconf)?;
|
||||
|
||||
if let Some(port) = pipe.port {
|
||||
let mut ddi = self.read32(ddi_offset(port))?;
|
||||
ddi |= DDI_BUF_CTL_ENABLE;
|
||||
self.write32(ddi_offset(port), ddi)?;
|
||||
}
|
||||
|
||||
self.update_pipe(pipe.index, true, pipe.port)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn page_flip(&self, pipe: &DisplayPipe, fb_addr: u64) -> Result<()> {
|
||||
if fb_addr > u64::from(u32::MAX) {
|
||||
return Err(DriverError::Buffer(format!(
|
||||
"Intel DSPSURF supports 32-bit GGTT offsets in this skeleton, got {fb_addr:#x}"
|
||||
)));
|
||||
}
|
||||
let index = usize::from(pipe.index);
|
||||
self.write32(pipe_offset(DSPSURF_BASE, index), fb_addr as u32)
|
||||
}
|
||||
|
||||
fn refresh_pipes(&self) -> Result<Vec<DisplayPipe>> {
|
||||
let detected = Self::detect_pipes(&self.mmio)?;
|
||||
let mut cached = self
|
||||
.pipes
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel display pipe state poisoned".into()))?;
|
||||
|
||||
let previous = cached.clone();
|
||||
let mut refreshed = Vec::with_capacity(detected.len());
|
||||
|
||||
for mut pipe in detected {
|
||||
if let Some(existing) = previous
|
||||
.iter()
|
||||
.find(|existing| existing.index == pipe.index)
|
||||
{
|
||||
if pipe.port.is_none() {
|
||||
pipe.port = existing.port;
|
||||
}
|
||||
pipe.enabled |= existing.enabled;
|
||||
}
|
||||
refreshed.push(pipe);
|
||||
}
|
||||
|
||||
*cached = refreshed.clone();
|
||||
Ok(refreshed)
|
||||
}
|
||||
|
||||
fn update_pipe(&self, index: u8, enabled: bool, port: Option<u8>) -> Result<()> {
|
||||
let mut cached = self
|
||||
.pipes
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel display pipe state poisoned".into()))?;
|
||||
|
||||
if let Some(pipe) = cached.iter_mut().find(|pipe| pipe.index == index) {
|
||||
pipe.enabled = enabled;
|
||||
pipe.port = port.or(pipe.port);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
cached.push(DisplayPipe {
|
||||
index,
|
||||
enabled,
|
||||
port,
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn modes_for_port(&self, port: u8, connector_type: ConnectorType) -> Vec<ModeInfo> {
|
||||
let mut modes = match connector_type {
|
||||
ConnectorType::DisplayPort | ConnectorType::EDP => {
|
||||
modes_from_dpcd(&self.read_dpcd(port))
|
||||
}
|
||||
_ => ModeInfo::from_edid(&self.read_edid(port)),
|
||||
};
|
||||
|
||||
if modes.is_empty() {
|
||||
modes = ModeInfo::from_edid(&synthetic_edid());
|
||||
}
|
||||
if modes.is_empty() {
|
||||
modes.push(ModeInfo::default_1080p());
|
||||
}
|
||||
modes
|
||||
}
|
||||
|
||||
fn read32(&self, offset: usize) -> Result<u32> {
|
||||
read32(&self.mmio, offset)
|
||||
}
|
||||
|
||||
fn write32(&self, offset: usize, value: u32) -> Result<()> {
|
||||
write32(&self.mmio, offset, value)
|
||||
}
|
||||
}
|
||||
|
||||
fn connected_ports(mmio: &MmioRegion) -> Vec<u8> {
|
||||
let mut ports = Vec::new();
|
||||
for port in 0..PORT_COUNT as u8 {
|
||||
if read32(mmio, ddi_offset(port)).unwrap_or(0) & DDI_BUF_CTL_ENABLE != 0 {
|
||||
ports.push(port);
|
||||
}
|
||||
}
|
||||
ports
|
||||
}
|
||||
|
||||
fn read32(mmio: &MmioRegion, offset: usize) -> Result<u32> {
|
||||
ensure_access(
|
||||
mmio.size(),
|
||||
offset,
|
||||
core::mem::size_of::<u32>(),
|
||||
"Intel display read",
|
||||
)?;
|
||||
Ok(mmio.read32(offset))
|
||||
}
|
||||
|
||||
fn write32(mmio: &MmioRegion, offset: usize, value: u32) -> Result<()> {
|
||||
ensure_access(
|
||||
mmio.size(),
|
||||
offset,
|
||||
core::mem::size_of::<u32>(),
|
||||
"Intel display write",
|
||||
)?;
|
||||
mmio.write32(offset, value);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_access(mmio_size: usize, offset: usize, width: usize, op: &str) -> Result<()> {
|
||||
let end = offset
|
||||
.checked_add(width)
|
||||
.ok_or_else(|| DriverError::Mmio(format!("{op} offset overflow at {offset:#x}")))?;
|
||||
if end > mmio_size {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"{op} outside MMIO aperture: end={end:#x} size={mmio_size:#x}"
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn pipe_offset(base: usize, index: usize) -> usize {
|
||||
base + index * PIPE_STRIDE
|
||||
}
|
||||
|
||||
fn ddi_offset(port: u8) -> usize {
|
||||
DDI_BUF_CTL_BASE + usize::from(port) * PORT_STRIDE
|
||||
}
|
||||
|
||||
fn pack_pair(upper: u16, lower: u16) -> u32 {
|
||||
((u32::from(upper).saturating_sub(1)) << 16) | u32::from(lower).saturating_sub(1)
|
||||
}
|
||||
|
||||
fn connector_type_for_port(port: u8, pp_status: u32) -> ConnectorType {
|
||||
match port {
|
||||
0 if pp_status != 0 => ConnectorType::EDP,
|
||||
0 | 1 => ConnectorType::HDMIA,
|
||||
2 | 3 => ConnectorType::DisplayPort,
|
||||
_ => ConnectorType::VGA,
|
||||
}
|
||||
}
|
||||
|
||||
fn modes_from_dpcd(dpcd: &[u8]) -> Vec<ModeInfo> {
|
||||
if dpcd.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
vec![ModeInfo::default_1080p(), mode_1440p()]
|
||||
}
|
||||
|
||||
fn mode_1440p() -> ModeInfo {
|
||||
ModeInfo {
|
||||
clock: 241_500,
|
||||
hdisplay: 2560,
|
||||
hsync_start: 2608,
|
||||
hsync_end: 2640,
|
||||
htotal: 2720,
|
||||
hskew: 0,
|
||||
vdisplay: 1440,
|
||||
vsync_start: 1443,
|
||||
vsync_end: 1448,
|
||||
vtotal: 1481,
|
||||
vscan: 0,
|
||||
vrefresh: 60,
|
||||
flags: 0,
|
||||
type_: 0,
|
||||
name: "2560x1440@60".to_string(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,226 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use log::{debug, info};
|
||||
use redox_driver_sys::memory::MmioRegion;
|
||||
|
||||
use crate::driver::{DriverError, Result};
|
||||
|
||||
const GTT_BASE: usize = 0x0000;
|
||||
const GFX_FLSH_CNTL_REG: usize = 0x101008;
|
||||
const GFX_FLSH_CNTL_EN: u32 = 1 << 0;
|
||||
|
||||
const GTT_PAGE_SIZE: u64 = 4096;
|
||||
const GTT_PAGE_MASK: u64 = GTT_PAGE_SIZE - 1;
|
||||
const GTT_PTE_PRESENT: u64 = 1 << 0;
|
||||
const GTT_PTE_WRITE: u64 = 1 << 1;
|
||||
const GTT_PTE_ADDR_MASK: u64 = 0xFFFF_FFFF_FFFF_F000;
|
||||
|
||||
pub struct IntelGtt {
|
||||
gtt_mmio: MmioRegion,
|
||||
control_mmio: MmioRegion,
|
||||
page_count: usize,
|
||||
aperture_size: u64,
|
||||
next_allocation: u64,
|
||||
free_list: Vec<(u64, u64)>,
|
||||
mappings: BTreeMap<u64, u64>,
|
||||
}
|
||||
|
||||
impl IntelGtt {
|
||||
pub fn init(gtt_mmio: MmioRegion, control_mmio: MmioRegion) -> Result<Self> {
|
||||
let page_count = gtt_mmio.size() / core::mem::size_of::<u64>();
|
||||
if page_count == 0 {
|
||||
return Err(DriverError::Initialization(
|
||||
"Intel GGTT BAR exposes no page table entries".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let aperture_size = (page_count as u64)
|
||||
.checked_mul(GTT_PAGE_SIZE)
|
||||
.ok_or_else(|| DriverError::Initialization("Intel GGTT aperture overflow".into()))?;
|
||||
|
||||
let gtt = Self {
|
||||
gtt_mmio,
|
||||
control_mmio,
|
||||
page_count,
|
||||
aperture_size,
|
||||
next_allocation: 0,
|
||||
free_list: Vec::new(),
|
||||
mappings: BTreeMap::new(),
|
||||
};
|
||||
|
||||
gtt.flush()?;
|
||||
info!(
|
||||
"redox-drm: Intel GGTT initialized with {} entries ({:#x} aperture)",
|
||||
page_count, aperture_size
|
||||
);
|
||||
Ok(gtt)
|
||||
}
|
||||
|
||||
pub fn alloc_range(&mut self, size: u64) -> Result<u64> {
|
||||
let aligned_size = align_up(size, GTT_PAGE_SIZE)?;
|
||||
|
||||
if let Some(index) = self
|
||||
.free_list
|
||||
.iter()
|
||||
.position(|&(_, free_size)| free_size >= aligned_size)
|
||||
{
|
||||
let (start, free_size) = self.free_list.remove(index);
|
||||
let remainder = free_size.saturating_sub(aligned_size);
|
||||
if remainder != 0 {
|
||||
self.free_list.push((start + aligned_size, remainder));
|
||||
}
|
||||
return Ok(start);
|
||||
}
|
||||
|
||||
let start = self.next_allocation;
|
||||
let end = start
|
||||
.checked_add(aligned_size)
|
||||
.ok_or_else(|| DriverError::Buffer("Intel GGTT allocation overflow".into()))?;
|
||||
if end > self.aperture_size {
|
||||
return Err(DriverError::Buffer(format!(
|
||||
"Intel GGTT aperture exhausted: need {:#x} bytes, remaining {:#x}",
|
||||
aligned_size,
|
||||
self.aperture_size.saturating_sub(start)
|
||||
)));
|
||||
}
|
||||
|
||||
self.next_allocation = end;
|
||||
Ok(start)
|
||||
}
|
||||
|
||||
pub fn release_range(&mut self, gpu_addr: u64, size: u64) -> Result<()> {
|
||||
let aligned_size = align_up(size, GTT_PAGE_SIZE)?;
|
||||
self.free_list.push((gpu_addr, aligned_size));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn map_range(
|
||||
&mut self,
|
||||
gpu_addr: u64,
|
||||
phys_addr: u64,
|
||||
size: u64,
|
||||
flags: u64,
|
||||
) -> Result<()> {
|
||||
let aligned_size = align_up(size, GTT_PAGE_SIZE)?;
|
||||
let page_count = (aligned_size / GTT_PAGE_SIZE) as usize;
|
||||
|
||||
for page in 0..page_count {
|
||||
let page_offset = (page as u64) * GTT_PAGE_SIZE;
|
||||
self.insert_page(gpu_addr + page_offset, phys_addr + page_offset, flags)?;
|
||||
}
|
||||
|
||||
self.mappings.insert(gpu_addr, aligned_size);
|
||||
self.flush()
|
||||
}
|
||||
|
||||
pub fn unmap_range(&mut self, gpu_addr: u64, size: u64) -> Result<()> {
|
||||
let aligned_size = align_up(size, GTT_PAGE_SIZE)?;
|
||||
let page_count = (aligned_size / GTT_PAGE_SIZE) as usize;
|
||||
|
||||
for page in 0..page_count {
|
||||
let page_offset = (page as u64) * GTT_PAGE_SIZE;
|
||||
self.remove_page(gpu_addr + page_offset)?;
|
||||
}
|
||||
|
||||
self.mappings.remove(&gpu_addr);
|
||||
self.flush()
|
||||
}
|
||||
|
||||
pub fn insert_page(&self, virtual_addr: u64, physical_addr: u64, flags: u64) -> Result<()> {
|
||||
ensure_page_alignment(virtual_addr, "virtual_addr")?;
|
||||
ensure_page_alignment(physical_addr, "physical_addr")?;
|
||||
|
||||
let entry_index = self.entry_index(virtual_addr)?;
|
||||
let entry_offset = gtt_entry_offset(entry_index)?;
|
||||
self.ensure_gtt_access(entry_offset, core::mem::size_of::<u64>(), "GGTT PTE write")?;
|
||||
|
||||
let pte = encode_pte(physical_addr, flags);
|
||||
self.gtt_mmio.write64(entry_offset, pte);
|
||||
debug!(
|
||||
"redox-drm: Intel GGTT map va={:#x} -> pa={:#x} flags={:#x}",
|
||||
virtual_addr, physical_addr, flags
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove_page(&self, virtual_addr: u64) -> Result<()> {
|
||||
ensure_page_alignment(virtual_addr, "virtual_addr")?;
|
||||
|
||||
let entry_index = self.entry_index(virtual_addr)?;
|
||||
let entry_offset = gtt_entry_offset(entry_index)?;
|
||||
self.ensure_gtt_access(entry_offset, core::mem::size_of::<u64>(), "GGTT PTE clear")?;
|
||||
|
||||
self.gtt_mmio.write64(entry_offset, 0);
|
||||
debug!("redox-drm: Intel GGTT unmap va={:#x}", virtual_addr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn flush(&self) -> Result<()> {
|
||||
self.ensure_control_access(GFX_FLSH_CNTL_REG, core::mem::size_of::<u32>(), "GGTT flush")?;
|
||||
self.control_mmio
|
||||
.write32(GFX_FLSH_CNTL_REG, GFX_FLSH_CNTL_EN);
|
||||
let _ = self.control_mmio.read32(GFX_FLSH_CNTL_REG);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn entry_index(&self, virtual_addr: u64) -> Result<usize> {
|
||||
let entry_index = (virtual_addr / GTT_PAGE_SIZE) as usize;
|
||||
if entry_index >= self.page_count {
|
||||
return Err(DriverError::Buffer(format!(
|
||||
"Intel GGTT entry {entry_index} outside aperture of {} entries",
|
||||
self.page_count
|
||||
)));
|
||||
}
|
||||
Ok(entry_index)
|
||||
}
|
||||
|
||||
fn ensure_gtt_access(&self, offset: usize, width: usize, op: &str) -> Result<()> {
|
||||
ensure_mmio_access(self.gtt_mmio.size(), offset, width, op)
|
||||
}
|
||||
|
||||
fn ensure_control_access(&self, offset: usize, width: usize, op: &str) -> Result<()> {
|
||||
ensure_mmio_access(self.control_mmio.size(), offset, width, op)
|
||||
}
|
||||
}
|
||||
|
||||
fn align_up(value: u64, alignment: u64) -> Result<u64> {
|
||||
value
|
||||
.checked_add(alignment - 1)
|
||||
.map(|v| v & !(alignment - 1))
|
||||
.ok_or_else(|| DriverError::Buffer("Intel GGTT size alignment overflow".into()))
|
||||
}
|
||||
|
||||
fn ensure_page_alignment(value: u64, name: &'static str) -> Result<()> {
|
||||
if value & GTT_PAGE_MASK != 0 {
|
||||
return Err(DriverError::InvalidArgument(name));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn gtt_entry_offset(entry_index: usize) -> Result<usize> {
|
||||
GTT_BASE
|
||||
.checked_add(
|
||||
entry_index
|
||||
.checked_mul(core::mem::size_of::<u64>())
|
||||
.ok_or_else(|| DriverError::Mmio("Intel GGTT entry offset overflow".into()))?,
|
||||
)
|
||||
.ok_or_else(|| DriverError::Mmio("Intel GGTT base offset overflow".into()))
|
||||
}
|
||||
|
||||
fn ensure_mmio_access(mmio_size: usize, offset: usize, width: usize, op: &str) -> Result<()> {
|
||||
let end = offset
|
||||
.checked_add(width)
|
||||
.ok_or_else(|| DriverError::Mmio(format!("{op} offset overflow at {offset:#x}")))?;
|
||||
if end > mmio_size {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"{op} outside MMIO aperture: end={end:#x} size={mmio_size:#x}"
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn encode_pte(physical_addr: u64, flags: u64) -> u64 {
|
||||
(physical_addr & GTT_PTE_ADDR_MASK)
|
||||
| (flags & (GTT_PTE_PRESENT | GTT_PTE_WRITE))
|
||||
| GTT_PTE_PRESENT
|
||||
}
|
||||
@@ -0,0 +1,667 @@
|
||||
pub mod display;
|
||||
pub mod gtt;
|
||||
pub mod ring;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Mutex;
|
||||
|
||||
use log::{debug, info, warn};
|
||||
use redox_driver_sys::irq::IrqHandle;
|
||||
use redox_driver_sys::memory::MmioRegion;
|
||||
use redox_driver_sys::pci::{PciBarInfo, PciDevice, PciDeviceInfo};
|
||||
|
||||
use crate::driver::{DriverError, GpuDriver, Result};
|
||||
use crate::gem::{GemHandle, GemManager};
|
||||
use crate::kms::connector::{synthetic_edid, Connector};
|
||||
use crate::kms::crtc::Crtc;
|
||||
use crate::kms::encoder::Encoder;
|
||||
use crate::kms::{ConnectorInfo, ConnectorType, ModeInfo};
|
||||
|
||||
use self::display::{DisplayPipe, IntelDisplay};
|
||||
use self::gtt::IntelGtt;
|
||||
use self::ring::{IntelRing, RingType};
|
||||
|
||||
const FORCEWAKE: usize = 0xA18C;
|
||||
const PP_STATUS: usize = 0xC7200;
|
||||
const PIPECONF_BASE: usize = 0x70008;
|
||||
const PIPE_STRIDE: usize = 0x1000;
|
||||
const DDI_BUF_CTL_BASE: usize = 0x64000;
|
||||
const DDI_PORT_STRIDE: usize = 0x100;
|
||||
const GFX_FLSH_CNTL_REG: usize = 0x101008;
|
||||
|
||||
const RENDER_RING_BASE: usize = 0x02000;
|
||||
const RING_TAIL_OFFSET: usize = 0x30;
|
||||
const RING_HEAD_OFFSET: usize = 0x34;
|
||||
|
||||
pub struct IntelDriver {
|
||||
info: PciDeviceInfo,
|
||||
mmio: MmioRegion,
|
||||
irq_handle: Mutex<Option<IrqHandle>>,
|
||||
display: IntelDisplay,
|
||||
gem: Mutex<GemManager>,
|
||||
connectors: Mutex<Vec<Connector>>,
|
||||
crtcs: Mutex<Vec<Crtc>>,
|
||||
encoders: Mutex<Vec<Encoder>>,
|
||||
gtt: Mutex<IntelGtt>,
|
||||
ring: Mutex<IntelRing>,
|
||||
vblank_count: AtomicU64,
|
||||
}
|
||||
|
||||
impl IntelDriver {
|
||||
pub fn new(info: PciDeviceInfo, firmware: HashMap<String, Vec<u8>>) -> Result<Self> {
|
||||
if !info.is_intel_gpu() {
|
||||
return Err(DriverError::Pci(format!(
|
||||
"device {} is not an Intel display-class GPU",
|
||||
info.location
|
||||
)));
|
||||
}
|
||||
|
||||
let gtt_bar = find_memory_bar(&info, 0, "GGTT BAR0")?;
|
||||
let mmio_bar = find_memory_bar(&info, 2, "MMIO BAR2")?;
|
||||
validate_intel_bars(&info, >t_bar, &mmio_bar)?;
|
||||
|
||||
let mut device = PciDevice::open_location(&info.location)
|
||||
.map_err(|e| DriverError::Pci(format!("failed to re-open PCI device: {e}")))?;
|
||||
device
|
||||
.enable_device()
|
||||
.map_err(|e| DriverError::Pci(format!("enable_device failed: {e}")))?;
|
||||
|
||||
let mmio = map_bar(&mut device, &mmio_bar, "Intel MMIO BAR2")?;
|
||||
let display_mmio = map_bar(&mut device, &mmio_bar, "Intel display MMIO")?;
|
||||
let ring_mmio = map_bar(&mut device, &mmio_bar, "Intel ring MMIO")?;
|
||||
let gtt_control_mmio = map_bar(&mut device, &mmio_bar, "Intel GGTT control MMIO")?;
|
||||
let gtt_mmio = map_bar(&mut device, >t_bar, "Intel GGTT BAR0")?;
|
||||
|
||||
enable_forcewake(&mmio)?;
|
||||
|
||||
let display = IntelDisplay::new(display_mmio)?;
|
||||
let mut gtt = IntelGtt::init(gtt_mmio, gtt_control_mmio)?;
|
||||
let mut ring = IntelRing::create(ring_mmio, RingType::Render)?;
|
||||
ring.bind_gtt(&mut gtt)?;
|
||||
|
||||
let (connectors, encoders) = detect_display_topology(&display)?;
|
||||
let crtcs = build_crtcs(&display)?;
|
||||
|
||||
let irq_handle = match info.irq {
|
||||
Some(irq) => Some(
|
||||
IrqHandle::request(irq)
|
||||
.map_err(|e| DriverError::Io(format!("failed to request IRQ {irq}: {e}")))?,
|
||||
),
|
||||
None => {
|
||||
warn!(
|
||||
"redox-drm: Intel device {} has no IRQ assigned",
|
||||
info.location
|
||||
);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if !firmware.is_empty() {
|
||||
warn!(
|
||||
"redox-drm: Intel driver ignores {} firmware blob(s); i915-class GPUs usually boot without scheme:firmware blobs",
|
||||
firmware.len()
|
||||
);
|
||||
}
|
||||
|
||||
info!(
|
||||
"redox-drm: Intel driver ready for {} with {} connector(s)",
|
||||
info.location,
|
||||
connectors.len()
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
info,
|
||||
mmio,
|
||||
irq_handle: Mutex::new(irq_handle),
|
||||
display,
|
||||
gem: Mutex::new(GemManager::new()),
|
||||
connectors: Mutex::new(connectors),
|
||||
crtcs: Mutex::new(crtcs),
|
||||
encoders: Mutex::new(encoders),
|
||||
gtt: Mutex::new(gtt),
|
||||
ring: Mutex::new(ring),
|
||||
vblank_count: AtomicU64::new(0),
|
||||
})
|
||||
}
|
||||
|
||||
fn refresh_connectors(&self) -> Result<Vec<ConnectorInfo>> {
|
||||
let (connectors, encoders) = detect_display_topology(&self.display)?;
|
||||
let infos = connectors
|
||||
.iter()
|
||||
.map(|connector| connector.info.clone())
|
||||
.collect();
|
||||
|
||||
{
|
||||
let mut connector_state = self.connectors.lock().map_err(|_| {
|
||||
DriverError::Initialization("Intel connector state poisoned".into())
|
||||
})?;
|
||||
*connector_state = connectors;
|
||||
}
|
||||
|
||||
{
|
||||
let mut encoder_state = self
|
||||
.encoders
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel encoder state poisoned".into()))?;
|
||||
*encoder_state = encoders;
|
||||
}
|
||||
|
||||
Ok(infos)
|
||||
}
|
||||
|
||||
fn cached_connectors(&self) -> Vec<ConnectorInfo> {
|
||||
match self.connectors.lock() {
|
||||
Ok(connectors) => connectors
|
||||
.iter()
|
||||
.map(|connector| connector.info.clone())
|
||||
.collect(),
|
||||
Err(poisoned) => {
|
||||
warn!("redox-drm: Intel connector state poisoned; using inner state");
|
||||
poisoned
|
||||
.into_inner()
|
||||
.iter()
|
||||
.map(|connector| connector.info.clone())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn connector_port(&self, connector_id: u32) -> Result<u8> {
|
||||
let connectors = self
|
||||
.connectors
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel connector state poisoned".into()))?;
|
||||
let connector = connectors
|
||||
.iter()
|
||||
.find(|connector| connector.info.id == connector_id)
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown connector {connector_id}")))?;
|
||||
|
||||
Ok(connector.info.connector_type_id.saturating_sub(1) as u8)
|
||||
}
|
||||
|
||||
fn process_irq(&self) -> Result<Option<(u32, u64)>> {
|
||||
let previous = self.cached_connectors();
|
||||
let current = self.refresh_connectors()?;
|
||||
|
||||
if connector_status_changed(&previous, ¤t) {
|
||||
info!(
|
||||
"redox-drm: Intel hotplug event detected on {}",
|
||||
self.info.location
|
||||
);
|
||||
}
|
||||
|
||||
let ring_busy = self
|
||||
.ring
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel ring state poisoned".into()))?
|
||||
.has_activity()?;
|
||||
|
||||
if let Some(crtc_id) = self.active_crtc_id()? {
|
||||
let count = self.vblank_count.fetch_add(1, Ordering::SeqCst) + 1;
|
||||
debug!(
|
||||
"redox-drm: Intel IRQ decoded as display event crtc={} ring_busy={}",
|
||||
crtc_id, ring_busy
|
||||
);
|
||||
return Ok(Some((crtc_id, count)));
|
||||
}
|
||||
|
||||
if ring_busy {
|
||||
debug!("redox-drm: Intel IRQ signaled command stream activity without active CRTC");
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn active_crtc_id(&self) -> Result<Option<u32>> {
|
||||
let crtcs = self
|
||||
.crtcs
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel CRTC state poisoned".into()))?;
|
||||
|
||||
if let Some(active) = crtcs.iter().find(|crtc| crtc.mode.is_some()) {
|
||||
return Ok(Some(active.id));
|
||||
}
|
||||
|
||||
Ok(self
|
||||
.display
|
||||
.pipes()?
|
||||
.into_iter()
|
||||
.find(|pipe| pipe.enabled)
|
||||
.map(|pipe| u32::from(pipe.index) + 1))
|
||||
}
|
||||
|
||||
fn ensure_gem_gpu_mapping(&self, handle: GemHandle) -> Result<u64> {
|
||||
{
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
|
||||
if let Some(gpu_addr) = gem.gpu_addr(handle)? {
|
||||
return Ok(gpu_addr);
|
||||
}
|
||||
}
|
||||
|
||||
let (phys_addr, size) = {
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
|
||||
let object = gem.object(handle)?;
|
||||
(object.phys_addr as u64, object.size)
|
||||
};
|
||||
|
||||
let gpu_addr = {
|
||||
let mut gtt = self
|
||||
.gtt
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel GGTT state poisoned".into()))?;
|
||||
let gpu_addr = gtt.alloc_range(size)?;
|
||||
if let Err(error) = gtt.map_range(gpu_addr, phys_addr, size, 1 << 1) {
|
||||
let _ = gtt.release_range(gpu_addr, size);
|
||||
return Err(error);
|
||||
}
|
||||
gpu_addr
|
||||
};
|
||||
|
||||
if let Err(error) = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?
|
||||
.set_gpu_addr(handle, gpu_addr)
|
||||
{
|
||||
let mut gtt = self
|
||||
.gtt
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel GGTT state poisoned".into()))?;
|
||||
let _ = gtt.unmap_range(gpu_addr, size);
|
||||
let _ = gtt.release_range(gpu_addr, size);
|
||||
return Err(error);
|
||||
}
|
||||
|
||||
Ok(gpu_addr)
|
||||
}
|
||||
|
||||
fn read_mmio(&self, offset: usize) -> Result<u32> {
|
||||
let end = offset
|
||||
.checked_add(core::mem::size_of::<u32>())
|
||||
.ok_or_else(|| {
|
||||
DriverError::Mmio(format!("Intel MMIO offset overflow at {offset:#x}"))
|
||||
})?;
|
||||
if end > self.mmio.size() {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"Intel MMIO read outside BAR2 aperture: end={end:#x} size={:#x}",
|
||||
self.mmio.size()
|
||||
)));
|
||||
}
|
||||
Ok(self.mmio.read32(offset))
|
||||
}
|
||||
}
|
||||
|
||||
impl GpuDriver for IntelDriver {
|
||||
fn driver_name(&self) -> &str {
|
||||
"i915-redox"
|
||||
}
|
||||
|
||||
fn driver_desc(&self) -> &str {
|
||||
"Intel i915-class DRM/KMS backend for Redox"
|
||||
}
|
||||
|
||||
fn driver_date(&self) -> &str {
|
||||
"2026-04-12"
|
||||
}
|
||||
|
||||
fn detect_connectors(&self) -> Vec<ConnectorInfo> {
|
||||
match self.refresh_connectors() {
|
||||
Ok(connectors) => connectors,
|
||||
Err(error) => {
|
||||
warn!("redox-drm: Intel connector refresh failed: {}", error);
|
||||
self.cached_connectors()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_modes(&self, connector_id: u32) -> Vec<ModeInfo> {
|
||||
self.detect_connectors()
|
||||
.into_iter()
|
||||
.find(|connector| connector.id == connector_id)
|
||||
.map(|connector| connector.modes)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn set_crtc(
|
||||
&self,
|
||||
crtc_id: u32,
|
||||
fb_handle: u32,
|
||||
connectors: &[u32],
|
||||
mode: &ModeInfo,
|
||||
) -> Result<()> {
|
||||
if connectors.is_empty() {
|
||||
return Err(DriverError::InvalidArgument(
|
||||
"set_crtc requires at least one connector",
|
||||
));
|
||||
}
|
||||
|
||||
let fb_addr = self.ensure_gem_gpu_mapping(fb_handle)?;
|
||||
let mut pipe = self.display.pipe_for_crtc(crtc_id)?;
|
||||
pipe.port = Some(self.connector_port(connectors[0])?);
|
||||
|
||||
self.display.set_mode(&pipe, mode)?;
|
||||
self.display.page_flip(&pipe, fb_addr)?;
|
||||
|
||||
let mut crtcs = self
|
||||
.crtcs
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel CRTC state poisoned".into()))?;
|
||||
let crtc = crtcs
|
||||
.iter_mut()
|
||||
.find(|crtc| crtc.id == crtc_id)
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown CRTC {crtc_id}")))?;
|
||||
crtc.program(fb_handle, connectors, mode)
|
||||
}
|
||||
|
||||
fn page_flip(&self, crtc_id: u32, fb_handle: u32, _flags: u32) -> Result<u64> {
|
||||
let fb_addr = self.ensure_gem_gpu_mapping(fb_handle)?;
|
||||
let pipe = self.display.pipe_for_crtc(crtc_id)?;
|
||||
self.display.page_flip(&pipe, fb_addr)?;
|
||||
|
||||
let mut ring = self
|
||||
.ring
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel ring state poisoned".into()))?;
|
||||
ring.flush()?;
|
||||
Ok(ring.last_seqno())
|
||||
}
|
||||
|
||||
fn get_vblank(&self, crtc_id: u32) -> Result<u64> {
|
||||
let crtcs = self
|
||||
.crtcs
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel CRTC state poisoned".into()))?;
|
||||
if !crtcs.iter().any(|crtc| crtc.id == crtc_id) {
|
||||
return Err(DriverError::NotFound(format!("unknown CRTC {crtc_id}")));
|
||||
}
|
||||
Ok(self.vblank_count.load(Ordering::SeqCst))
|
||||
}
|
||||
|
||||
fn gem_create(&self, size: u64) -> Result<GemHandle> {
|
||||
let handle = {
|
||||
let mut gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
|
||||
gem.create(size)?
|
||||
};
|
||||
|
||||
if let Err(error) = self.ensure_gem_gpu_mapping(handle) {
|
||||
let _ = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?
|
||||
.close(handle);
|
||||
return Err(error);
|
||||
}
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
fn gem_close(&self, handle: GemHandle) -> Result<()> {
|
||||
let (gpu_addr, size) = {
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
|
||||
let object = gem.object(handle)?;
|
||||
(object.gpu_addr, object.size)
|
||||
};
|
||||
|
||||
if let Some(gpu_addr) = gpu_addr {
|
||||
let mut gtt = self
|
||||
.gtt
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel GGTT state poisoned".into()))?;
|
||||
gtt.unmap_range(gpu_addr, size)?;
|
||||
gtt.release_range(gpu_addr, size)?;
|
||||
}
|
||||
|
||||
self.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?
|
||||
.close(handle)
|
||||
}
|
||||
|
||||
fn gem_mmap(&self, handle: GemHandle) -> Result<usize> {
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
|
||||
gem.mmap(handle)
|
||||
}
|
||||
|
||||
fn gem_size(&self, handle: GemHandle) -> Result<u64> {
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
|
||||
Ok(gem.object(handle)?.size)
|
||||
}
|
||||
|
||||
fn gem_export_dmafd(&self, handle: GemHandle) -> Result<i32> {
|
||||
let mut gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
|
||||
gem.export_dmafd(handle)
|
||||
}
|
||||
|
||||
fn gem_import_dmafd(&self, fd: i32) -> Result<GemHandle> {
|
||||
let handle = {
|
||||
let gem = self
|
||||
.gem
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
|
||||
gem.import_dmafd(fd)?
|
||||
};
|
||||
|
||||
let _ = self.ensure_gem_gpu_mapping(handle)?;
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
fn get_edid(&self, connector_id: u32) -> Vec<u8> {
|
||||
match self.connectors.lock() {
|
||||
Ok(connectors) => connectors
|
||||
.iter()
|
||||
.find(|connector| connector.info.id == connector_id)
|
||||
.map(|connector| connector.edid.clone())
|
||||
.unwrap_or_default(),
|
||||
Err(poisoned) => poisoned
|
||||
.into_inner()
|
||||
.iter()
|
||||
.find(|connector| connector.info.id == connector_id)
|
||||
.map(|connector| connector.edid.clone())
|
||||
.unwrap_or_default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_irq(&self) -> Result<Option<(u32, u64)>> {
|
||||
let irq_event = {
|
||||
let mut irq_handle = self
|
||||
.irq_handle
|
||||
.lock()
|
||||
.map_err(|_| DriverError::Initialization("Intel IRQ state poisoned".into()))?;
|
||||
match irq_handle.as_mut() {
|
||||
Some(handle) => handle
|
||||
.try_wait()
|
||||
.map_err(|e| DriverError::Io(format!("Intel IRQ poll failed: {e}")))?,
|
||||
None => return Ok(None),
|
||||
}
|
||||
};
|
||||
|
||||
if irq_event.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
self.process_irq()
|
||||
}
|
||||
}
|
||||
|
||||
fn detect_display_topology(display: &IntelDisplay) -> Result<(Vec<Connector>, Vec<Encoder>)> {
|
||||
let detected = display.detect_connectors()?;
|
||||
let mut connectors = Vec::with_capacity(detected.len());
|
||||
let mut encoders = Vec::with_capacity(detected.len());
|
||||
|
||||
for connector in detected {
|
||||
let port = connector.connector_type_id.saturating_sub(1) as u8;
|
||||
let edid = match connector.connector_type {
|
||||
ConnectorType::DisplayPort | ConnectorType::EDP => display.read_edid(port),
|
||||
_ => display.read_edid(port),
|
||||
};
|
||||
|
||||
encoders.push(Encoder::new(
|
||||
connector.encoder_id,
|
||||
pipe_id_for_port(display, port),
|
||||
));
|
||||
connectors.push(Connector {
|
||||
edid: if edid.is_empty() {
|
||||
synthetic_edid()
|
||||
} else {
|
||||
edid
|
||||
},
|
||||
info: ConnectorInfo {
|
||||
modes: display.modes_for_connector(&connector),
|
||||
..connector
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
Ok((connectors, encoders))
|
||||
}
|
||||
|
||||
fn build_crtcs(display: &IntelDisplay) -> Result<Vec<Crtc>> {
|
||||
let mut crtcs: Vec<Crtc> = display
|
||||
.pipes()?
|
||||
.into_iter()
|
||||
.map(|pipe| Crtc::new(u32::from(pipe.index) + 1))
|
||||
.collect();
|
||||
|
||||
if crtcs.is_empty() {
|
||||
crtcs.push(Crtc::new(1));
|
||||
}
|
||||
|
||||
Ok(crtcs)
|
||||
}
|
||||
|
||||
fn pipe_id_for_port(display: &IntelDisplay, port: u8) -> u32 {
|
||||
display
|
||||
.pipes()
|
||||
.ok()
|
||||
.and_then(|pipes| {
|
||||
pipes
|
||||
.into_iter()
|
||||
.find(|pipe| pipe.port == Some(port))
|
||||
.map(|pipe| u32::from(pipe.index) + 1)
|
||||
})
|
||||
.unwrap_or(1)
|
||||
}
|
||||
|
||||
fn connector_status_changed(previous: &[ConnectorInfo], current: &[ConnectorInfo]) -> bool {
|
||||
if previous.len() != current.len() {
|
||||
return true;
|
||||
}
|
||||
|
||||
previous.iter().zip(current.iter()).any(|(old, new)| {
|
||||
old.id != new.id
|
||||
|| old.connection != new.connection
|
||||
|| old.connector_type != new.connector_type
|
||||
})
|
||||
}
|
||||
|
||||
fn enable_forcewake(mmio: &MmioRegion) -> Result<()> {
|
||||
let end = FORCEWAKE
|
||||
.checked_add(core::mem::size_of::<u32>())
|
||||
.ok_or_else(|| DriverError::Mmio("Intel FORCEWAKE offset overflow".into()))?;
|
||||
if end > mmio.size() {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"Intel FORCEWAKE register outside MMIO aperture: end={end:#x} size={:#x}",
|
||||
mmio.size()
|
||||
)));
|
||||
}
|
||||
|
||||
mmio.write32(FORCEWAKE, 1);
|
||||
let _ = mmio.read32(FORCEWAKE);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validate_intel_bars(
|
||||
info: &PciDeviceInfo,
|
||||
gtt_bar: &PciBarInfo,
|
||||
mmio_bar: &PciBarInfo,
|
||||
) -> Result<()> {
|
||||
if !gtt_bar.is_memory() {
|
||||
return Err(DriverError::Pci(format!(
|
||||
"device {} GGTT BAR{} is not a memory BAR",
|
||||
info.location, gtt_bar.index
|
||||
)));
|
||||
}
|
||||
if !mmio_bar.is_memory() {
|
||||
return Err(DriverError::Pci(format!(
|
||||
"device {} MMIO BAR{} is not a memory BAR",
|
||||
info.location, mmio_bar.index
|
||||
)));
|
||||
}
|
||||
|
||||
if gtt_bar.size < core::mem::size_of::<u64>() as u64 {
|
||||
return Err(DriverError::Pci(format!(
|
||||
"device {} GGTT BAR{} is too small ({:#x})",
|
||||
info.location, gtt_bar.index, gtt_bar.size
|
||||
)));
|
||||
}
|
||||
if gtt_bar.size % core::mem::size_of::<u64>() as u64 != 0 {
|
||||
return Err(DriverError::Pci(format!(
|
||||
"device {} GGTT BAR{} size {:#x} is not 8-byte aligned",
|
||||
info.location, gtt_bar.index, gtt_bar.size
|
||||
)));
|
||||
}
|
||||
|
||||
let required_mmio_end = [
|
||||
FORCEWAKE + core::mem::size_of::<u32>(),
|
||||
PP_STATUS + core::mem::size_of::<u32>(),
|
||||
GFX_FLSH_CNTL_REG + core::mem::size_of::<u32>(),
|
||||
RENDER_RING_BASE + RING_TAIL_OFFSET + core::mem::size_of::<u32>(),
|
||||
RENDER_RING_BASE + RING_HEAD_OFFSET + core::mem::size_of::<u32>(),
|
||||
]
|
||||
.into_iter()
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
if mmio_bar.size < required_mmio_end as u64 {
|
||||
return Err(DriverError::Pci(format!(
|
||||
"device {} MMIO BAR{} is too small ({:#x}) for required register window ending at {:#x}",
|
||||
info.location, mmio_bar.index, mmio_bar.size, required_mmio_end
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn find_memory_bar(info: &PciDeviceInfo, index: usize, name: &str) -> Result<PciBarInfo> {
|
||||
info.find_memory_bar(index)
|
||||
.copied()
|
||||
.ok_or_else(|| DriverError::Pci(format!("device {} has no {}", info.location, name)))
|
||||
}
|
||||
|
||||
fn map_bar(device: &mut PciDevice, bar: &PciBarInfo, name: &str) -> Result<MmioRegion> {
|
||||
device
|
||||
.map_bar(bar.index, bar.addr, bar.size as usize)
|
||||
.map_err(|e| DriverError::Mmio(format!("failed to map {name}: {e}")))
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn ddi_buf_ctl(port: u8) -> usize {
|
||||
DDI_BUF_CTL_BASE + usize::from(port) * DDI_PORT_STRIDE
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn pipeconf(pipe: &DisplayPipe) -> usize {
|
||||
PIPECONF_BASE + usize::from(pipe.index) * PIPE_STRIDE
|
||||
}
|
||||
@@ -0,0 +1,267 @@
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use log::{debug, info};
|
||||
use redox_driver_sys::dma::DmaBuffer;
|
||||
use redox_driver_sys::memory::MmioRegion;
|
||||
|
||||
use crate::driver::{DriverError, Result};
|
||||
|
||||
use super::gtt::IntelGtt;
|
||||
|
||||
const RING_BUFFER_BYTES: usize = 4096;
|
||||
const RING_ALIGNMENT: usize = 4096;
|
||||
const RING_WAIT_ATTEMPTS: usize = 2000;
|
||||
const RING_WAIT_DELAY: Duration = Duration::from_micros(50);
|
||||
|
||||
const RBBASE: usize = 0x04;
|
||||
const RBBASE_HI: usize = 0x08;
|
||||
const RBTAIL: usize = 0x30;
|
||||
const RBHEAD: usize = 0x34;
|
||||
const RBSTART: usize = 0x38;
|
||||
const RBCTL: usize = 0x3C;
|
||||
|
||||
const RING_CTL_ENABLE: u32 = 1 << 0;
|
||||
const RING_CTL_SIZE_MASK: u32 = !0x0FFF;
|
||||
|
||||
const MI_NOOP: u32 = 0x0000_0000;
|
||||
const MI_FLUSH_DW: u32 = 0x0200_0000;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum RingType {
|
||||
Render,
|
||||
Blitter,
|
||||
VideoEnhance,
|
||||
}
|
||||
|
||||
pub struct IntelRing {
|
||||
mmio: MmioRegion,
|
||||
base: usize,
|
||||
head: u32,
|
||||
tail: u32,
|
||||
size: u32,
|
||||
ring_type: RingType,
|
||||
buffer: DmaBuffer,
|
||||
gpu_addr: Option<u64>,
|
||||
last_seqno: u64,
|
||||
}
|
||||
|
||||
impl IntelRing {
|
||||
pub fn create(mmio: MmioRegion, ring_type: RingType) -> Result<Self> {
|
||||
let mut buffer = DmaBuffer::allocate(RING_BUFFER_BYTES, RING_ALIGNMENT)
|
||||
.map_err(|e| DriverError::Buffer(format!("Intel ring allocation failed: {e}")))?;
|
||||
zero_dma(&mut buffer);
|
||||
|
||||
let ring = Self {
|
||||
mmio,
|
||||
base: ring_base(ring_type),
|
||||
head: 0,
|
||||
tail: 0,
|
||||
size: RING_BUFFER_BYTES as u32,
|
||||
ring_type,
|
||||
buffer,
|
||||
gpu_addr: None,
|
||||
last_seqno: 0,
|
||||
};
|
||||
|
||||
ring.ensure_reg_access(RBCTL, core::mem::size_of::<u32>(), "ring control")?;
|
||||
ring.write_reg(RBHEAD, 0)?;
|
||||
ring.write_reg(RBTAIL, 0)?;
|
||||
ring.write_reg(RBSTART, 0)?;
|
||||
|
||||
info!(
|
||||
"redox-drm: Intel {:?} ring allocated ({} bytes)",
|
||||
ring.ring_type, ring.size
|
||||
);
|
||||
Ok(ring)
|
||||
}
|
||||
|
||||
pub fn bind_gtt(&mut self, gtt: &mut IntelGtt) -> Result<()> {
|
||||
if self.gpu_addr.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let gpu_addr = gtt.alloc_range(self.size as u64)?;
|
||||
if let Err(error) = gtt.map_range(
|
||||
gpu_addr,
|
||||
self.buffer.physical_address() as u64,
|
||||
self.size as u64,
|
||||
1 << 1,
|
||||
) {
|
||||
let _ = gtt.release_range(gpu_addr, self.size as u64);
|
||||
return Err(error);
|
||||
}
|
||||
|
||||
self.gpu_addr = Some(gpu_addr);
|
||||
self.program_ring_registers(gpu_addr)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn submit_batch(&mut self, buffer: &[u32]) -> Result<()> {
|
||||
if buffer.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
if self.gpu_addr.is_none() {
|
||||
return Err(DriverError::Initialization(
|
||||
"Intel ring must be bound into GGTT before submission".into(),
|
||||
));
|
||||
}
|
||||
|
||||
self.wait_for_space(buffer.len())?;
|
||||
|
||||
for &dword in buffer {
|
||||
self.write_dword(dword)?;
|
||||
}
|
||||
|
||||
self.publish_tail()?;
|
||||
self.last_seqno = self.last_seqno.saturating_add(1);
|
||||
debug!(
|
||||
"redox-drm: Intel {:?} ring submitted {} DWORDs seqno={}",
|
||||
self.ring_type,
|
||||
buffer.len(),
|
||||
self.last_seqno
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn wait_for_space(&mut self, count: usize) -> Result<()> {
|
||||
let required = (count * core::mem::size_of::<u32>()) as u32;
|
||||
if required >= self.size {
|
||||
return Err(DriverError::Buffer(format!(
|
||||
"Intel ring submission too large: {required} bytes >= ring size {}",
|
||||
self.size
|
||||
)));
|
||||
}
|
||||
|
||||
for _ in 0..RING_WAIT_ATTEMPTS {
|
||||
self.sync_from_hw()?;
|
||||
if required <= self.free_bytes() {
|
||||
return Ok(());
|
||||
}
|
||||
thread::sleep(RING_WAIT_DELAY);
|
||||
}
|
||||
|
||||
Err(DriverError::Buffer(format!(
|
||||
"Intel {:?} ring did not free {} bytes in time",
|
||||
self.ring_type, required
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn flush(&mut self) -> Result<()> {
|
||||
self.submit_batch(&[MI_FLUSH_DW, MI_NOOP])
|
||||
}
|
||||
|
||||
pub fn has_activity(&mut self) -> Result<bool> {
|
||||
self.sync_from_hw()?;
|
||||
Ok(self.head != self.tail)
|
||||
}
|
||||
|
||||
pub fn sync_from_hw(&mut self) -> Result<()> {
|
||||
self.head = self.read_reg(RBHEAD)? & (self.size - 1);
|
||||
self.tail = self.read_reg(RBTAIL)? & (self.size - 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn last_seqno(&self) -> u64 {
|
||||
self.last_seqno
|
||||
}
|
||||
|
||||
fn program_ring_registers(&mut self, gpu_addr: u64) -> Result<()> {
|
||||
self.write_reg(RBHEAD, 0)?;
|
||||
self.write_reg(RBTAIL, 0)?;
|
||||
self.write_reg(RBSTART, lower_32(gpu_addr))?;
|
||||
self.write_reg(RBBASE, lower_32(gpu_addr))?;
|
||||
self.write_reg(RBBASE_HI, upper_32(gpu_addr))?;
|
||||
|
||||
let mut ctl = self.read_reg(RBCTL)?;
|
||||
ctl &= !RING_CTL_SIZE_MASK;
|
||||
ctl |= (self.size - 0x1000) & RING_CTL_SIZE_MASK;
|
||||
ctl |= RING_CTL_ENABLE;
|
||||
self.write_reg(RBCTL, ctl)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn free_bytes(&self) -> u32 {
|
||||
let used = if self.tail >= self.head {
|
||||
self.tail - self.head
|
||||
} else {
|
||||
self.size - (self.head - self.tail)
|
||||
};
|
||||
self.size.saturating_sub(used).saturating_sub(4)
|
||||
}
|
||||
|
||||
fn write_dword(&mut self, value: u32) -> Result<()> {
|
||||
let write_offset = self.tail as usize;
|
||||
let width = core::mem::size_of::<u32>();
|
||||
let end = write_offset
|
||||
.checked_add(width)
|
||||
.ok_or_else(|| DriverError::Buffer("Intel ring write offset overflow".into()))?;
|
||||
if end > self.buffer.len() {
|
||||
return Err(DriverError::Buffer(format!(
|
||||
"Intel ring write out of bounds: end={end:#x} size={:#x}",
|
||||
self.buffer.len()
|
||||
)));
|
||||
}
|
||||
let ptr = unsafe { self.buffer.as_mut_ptr().add(write_offset) as *mut u32 };
|
||||
unsafe { core::ptr::write_volatile(ptr, value) };
|
||||
|
||||
self.tail = (self.tail + width as u32) % self.size;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn publish_tail(&self) -> Result<()> {
|
||||
self.write_reg(RBTAIL, self.tail)
|
||||
}
|
||||
|
||||
fn read_reg(&self, reg: usize) -> Result<u32> {
|
||||
let offset = self
|
||||
.base
|
||||
.checked_add(reg)
|
||||
.ok_or_else(|| DriverError::Mmio("Intel ring register offset overflow".into()))?;
|
||||
self.ensure_reg_access(offset, core::mem::size_of::<u32>(), "ring read")?;
|
||||
Ok(self.mmio.read32(offset))
|
||||
}
|
||||
|
||||
fn write_reg(&self, reg: usize, value: u32) -> Result<()> {
|
||||
let offset = self
|
||||
.base
|
||||
.checked_add(reg)
|
||||
.ok_or_else(|| DriverError::Mmio("Intel ring register offset overflow".into()))?;
|
||||
self.ensure_reg_access(offset, core::mem::size_of::<u32>(), "ring write")?;
|
||||
self.mmio.write32(offset, value);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_reg_access(&self, offset: usize, width: usize, op: &str) -> Result<()> {
|
||||
let end = offset.checked_add(width).ok_or_else(|| {
|
||||
DriverError::Mmio(format!("Intel {op} offset overflow at {offset:#x}"))
|
||||
})?;
|
||||
if end > self.mmio.size() {
|
||||
return Err(DriverError::Mmio(format!(
|
||||
"Intel {op} outside MMIO aperture: end={end:#x} size={:#x}",
|
||||
self.mmio.size()
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn ring_base(ring_type: RingType) -> usize {
|
||||
match ring_type {
|
||||
RingType::Render => 0x02000,
|
||||
RingType::Blitter => 0x22000,
|
||||
RingType::VideoEnhance => 0x1A000,
|
||||
}
|
||||
}
|
||||
|
||||
fn zero_dma(buffer: &mut DmaBuffer) {
|
||||
unsafe { core::ptr::write_bytes(buffer.as_mut_ptr(), 0, buffer.len()) };
|
||||
}
|
||||
|
||||
fn lower_32(value: u64) -> u32 {
|
||||
value as u32
|
||||
}
|
||||
|
||||
fn upper_32(value: u64) -> u32 {
|
||||
(value >> 32) as u32
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
pub mod amd;
|
||||
pub mod intel;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use redox_driver_sys::pci::{PciDevice, PciDeviceInfo, PCI_VENDOR_ID_AMD, PCI_VENDOR_ID_INTEL};
|
||||
|
||||
use crate::driver::{DriverError, GpuDriver, Result};
|
||||
|
||||
pub struct DriverRegistry;
|
||||
|
||||
impl DriverRegistry {
|
||||
pub fn probe(
|
||||
info: PciDeviceInfo,
|
||||
firmware: HashMap<String, Vec<u8>>,
|
||||
) -> Result<Arc<dyn GpuDriver>> {
|
||||
let full = if info.bars.is_empty() {
|
||||
let mut device = PciDevice::open_location(&info.location)
|
||||
.map_err(|e| DriverError::Pci(format!("open PCI device failed: {e}")))?;
|
||||
device
|
||||
.full_info()
|
||||
.map_err(|e| DriverError::Pci(format!("read PCI device info failed: {e}")))?
|
||||
} else {
|
||||
info
|
||||
};
|
||||
|
||||
match full.vendor_id {
|
||||
PCI_VENDOR_ID_AMD => {
|
||||
let driver = amd::AmdDriver::new(full, firmware)?;
|
||||
Ok(Arc::new(driver))
|
||||
}
|
||||
PCI_VENDOR_ID_INTEL => {
|
||||
let driver = intel::IntelDriver::new(full, firmware)?;
|
||||
Ok(Arc::new(driver))
|
||||
}
|
||||
_ => Err(DriverError::Pci(format!(
|
||||
"unsupported GPU vendor {:#06x} at {}",
|
||||
full.vendor_id, full.location
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,146 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use log::{debug, warn};
|
||||
use redox_driver_sys::dma::DmaBuffer;
|
||||
|
||||
use crate::dmabuf::DmabufManager;
|
||||
use crate::driver::{DriverError, Result};
|
||||
|
||||
pub type GemHandle = u32;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GemObject {
|
||||
#[allow(dead_code)]
|
||||
pub handle: GemHandle,
|
||||
#[allow(dead_code)]
|
||||
pub size: u64,
|
||||
pub phys_addr: usize,
|
||||
pub virt_addr: usize,
|
||||
pub gpu_addr: Option<u64>,
|
||||
}
|
||||
|
||||
struct GemAllocation {
|
||||
object: GemObject,
|
||||
#[allow(dead_code)]
|
||||
dma: DmaBuffer,
|
||||
}
|
||||
|
||||
pub struct GemManager {
|
||||
next_handle: GemHandle,
|
||||
objects: BTreeMap<GemHandle, GemAllocation>,
|
||||
dmabuf: DmabufManager,
|
||||
}
|
||||
|
||||
impl GemManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
next_handle: 1,
|
||||
objects: BTreeMap::new(),
|
||||
dmabuf: DmabufManager::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create(&mut self, size: u64) -> Result<GemHandle> {
|
||||
if size == 0 {
|
||||
return Err(DriverError::InvalidArgument(
|
||||
"GEM create size must be non-zero",
|
||||
));
|
||||
}
|
||||
|
||||
let handle = self.next_handle;
|
||||
self.next_handle = self.next_handle.saturating_add(1);
|
||||
|
||||
let dma = DmaBuffer::allocate(size as usize, 4096)
|
||||
.map_err(|e| DriverError::Buffer(format!("DMA allocation failed: {e}")))?;
|
||||
if !dma.is_physically_contiguous() {
|
||||
warn!(
|
||||
"redox-drm: GEM handle {} allocated without physically contiguous backing",
|
||||
handle
|
||||
);
|
||||
}
|
||||
|
||||
let object = GemObject {
|
||||
handle,
|
||||
size,
|
||||
phys_addr: dma.physical_address(),
|
||||
virt_addr: dma.as_ptr() as usize,
|
||||
gpu_addr: None,
|
||||
};
|
||||
|
||||
debug!(
|
||||
"redox-drm: created GEM handle {} size={} phys={:#x} virt={:#x}",
|
||||
handle, size, object.phys_addr, object.virt_addr
|
||||
);
|
||||
|
||||
self.objects.insert(handle, GemAllocation { object, dma });
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
pub fn close(&mut self, handle: GemHandle) -> Result<()> {
|
||||
if self.objects.remove(&handle).is_none() {
|
||||
return Err(DriverError::NotFound(format!(
|
||||
"unknown GEM handle {handle}"
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn mmap(&self, handle: GemHandle) -> Result<usize> {
|
||||
let allocation = self
|
||||
.objects
|
||||
.get(&handle)
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown GEM handle {handle}")))?;
|
||||
Ok(allocation.object.virt_addr)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn export_dmafd(&mut self, handle: GemHandle) -> Result<i32> {
|
||||
let allocation = self
|
||||
.objects
|
||||
.get(&handle)
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown GEM handle {handle}")))?;
|
||||
|
||||
self.dmabuf
|
||||
.export_with_info(handle, allocation.object.phys_addr, allocation.object.size)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn import_dmafd(&self, fd: i32) -> Result<GemHandle> {
|
||||
let handle = self.dmabuf.import(fd)?;
|
||||
let _ = self.object(handle)?;
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
pub fn object(&self, handle: GemHandle) -> Result<&GemObject> {
|
||||
self.objects
|
||||
.get(&handle)
|
||||
.map(|allocation| &allocation.object)
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown GEM handle {handle}")))
|
||||
}
|
||||
|
||||
pub fn phys_addr(&self, handle: GemHandle) -> Result<usize> {
|
||||
Ok(self.object(handle)?.phys_addr)
|
||||
}
|
||||
|
||||
pub fn set_gpu_addr(&mut self, handle: GemHandle, gpu_addr: u64) -> Result<()> {
|
||||
let allocation = self
|
||||
.objects
|
||||
.get_mut(&handle)
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown GEM handle {handle}")))?;
|
||||
allocation.object.gpu_addr = Some(gpu_addr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn gpu_addr(&self, handle: GemHandle) -> Result<Option<u64>> {
|
||||
Ok(self.object(handle)?.gpu_addr)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn object_mut_ptr(&mut self, handle: GemHandle) -> Result<usize> {
|
||||
let allocation = self
|
||||
.objects
|
||||
.get_mut(&handle)
|
||||
.ok_or_else(|| DriverError::NotFound(format!("unknown GEM handle {handle}")))?;
|
||||
Ok(allocation.dma.as_mut_ptr() as usize)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
use crate::kms::{ConnectorInfo, ConnectorStatus, ConnectorType, ModeInfo};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Connector {
|
||||
pub info: ConnectorInfo,
|
||||
#[allow(dead_code)]
|
||||
pub edid: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Connector {
|
||||
pub fn synthetic_displayport(id: u32, encoder_id: u32) -> Self {
|
||||
let edid = synthetic_edid();
|
||||
let modes = ModeInfo::from_edid(&edid);
|
||||
|
||||
Self {
|
||||
info: ConnectorInfo {
|
||||
id,
|
||||
connector_type: ConnectorType::DisplayPort,
|
||||
connector_type_id: 1,
|
||||
connection: ConnectorStatus::Connected,
|
||||
mm_width: 600,
|
||||
mm_height: 340,
|
||||
encoder_id,
|
||||
modes: if modes.is_empty() {
|
||||
vec![ModeInfo::default_1080p()]
|
||||
} else {
|
||||
modes
|
||||
},
|
||||
},
|
||||
edid,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn synthetic_edid() -> Vec<u8> {
|
||||
vec![
|
||||
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0xfa, 0x12, 0x01, 0x00, 0x00,
|
||||
0x00, 0x01, 0x1e, 0x01, 0x04, 0xa5, 0x3c, 0x22, 0x78, 0x3a, 0xee, 0x95, 0xa3, 0x54, 0x4c,
|
||||
0x99, 0x26, 0x0f, 0x50, 0x54, 0xbf, 0xef, 0x80, 0x71, 0x4f, 0x81, 0x80, 0x81, 0x40, 0x81,
|
||||
0xc0, 0x95, 0x00, 0xa9, 0xc0, 0xb3, 0x00, 0xd1, 0xc0, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
|
||||
0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x55, 0x50, 0x21, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00,
|
||||
0xfd, 0x00, 0x32, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x20,
|
||||
0x44, 0x50, 0x0a, 0x20, 0x20, 0x00, 0xa7,
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
use crate::driver::{DriverError, Result};
|
||||
use crate::kms::ModeInfo;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Crtc {
|
||||
pub id: u32,
|
||||
pub current_fb: u32,
|
||||
pub connectors: Vec<u32>,
|
||||
pub mode: Option<ModeInfo>,
|
||||
#[allow(dead_code)]
|
||||
pub x: u32,
|
||||
#[allow(dead_code)]
|
||||
pub y: u32,
|
||||
#[allow(dead_code)]
|
||||
pub gamma_size: u32,
|
||||
}
|
||||
|
||||
impl Crtc {
|
||||
pub fn new(id: u32) -> Self {
|
||||
Self {
|
||||
id,
|
||||
current_fb: 0,
|
||||
connectors: Vec::new(),
|
||||
mode: None,
|
||||
x: 0,
|
||||
y: 0,
|
||||
gamma_size: 256,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn program(&mut self, fb_handle: u32, connectors: &[u32], mode: &ModeInfo) -> Result<()> {
|
||||
if connectors.is_empty() {
|
||||
return Err(DriverError::InvalidArgument(
|
||||
"set_crtc requires at least one connector",
|
||||
));
|
||||
}
|
||||
|
||||
self.current_fb = fb_handle;
|
||||
self.connectors = connectors.to_vec();
|
||||
self.mode = Some(mode.clone());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
use crate::kms::EncoderInfo;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Encoder {
|
||||
#[allow(dead_code)]
|
||||
pub info: EncoderInfo,
|
||||
}
|
||||
|
||||
impl Encoder {
|
||||
pub fn new(id: u32, crtc_id: u32) -> Self {
|
||||
Self {
|
||||
info: EncoderInfo {
|
||||
id,
|
||||
encoder_type: 0,
|
||||
crtc_id,
|
||||
possible_crtcs: 1,
|
||||
possible_clones: 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,182 @@
|
||||
pub mod connector;
|
||||
pub mod crtc;
|
||||
pub mod encoder;
|
||||
pub mod plane;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ModeInfo {
|
||||
pub clock: u32,
|
||||
pub hdisplay: u16,
|
||||
pub hsync_start: u16,
|
||||
pub hsync_end: u16,
|
||||
pub htotal: u16,
|
||||
pub hskew: u16,
|
||||
pub vdisplay: u16,
|
||||
pub vsync_start: u16,
|
||||
pub vsync_end: u16,
|
||||
pub vtotal: u16,
|
||||
pub vscan: u16,
|
||||
pub vrefresh: u32,
|
||||
pub flags: u32,
|
||||
pub type_: u32,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl ModeInfo {
|
||||
pub fn default_1080p() -> Self {
|
||||
Self {
|
||||
clock: 148_500,
|
||||
hdisplay: 1920,
|
||||
hsync_start: 2008,
|
||||
hsync_end: 2052,
|
||||
htotal: 2200,
|
||||
hskew: 0,
|
||||
vdisplay: 1080,
|
||||
vsync_start: 1084,
|
||||
vsync_end: 1089,
|
||||
vtotal: 1125,
|
||||
vscan: 0,
|
||||
vrefresh: 60,
|
||||
flags: 0,
|
||||
type_: 0,
|
||||
name: "1920x1080@60".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_edid(edid: &[u8]) -> Vec<Self> {
|
||||
const EDID_HEADER: [u8; 8] = [0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00];
|
||||
|
||||
if edid.len() < 128 || edid.get(0..8) != Some(&EDID_HEADER) {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let mut modes = Vec::new();
|
||||
for descriptor in edid[54..126].chunks_exact(18) {
|
||||
let pixel_clock = u16::from_le_bytes([descriptor[0], descriptor[1]]) as u32;
|
||||
if pixel_clock == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let hdisplay = descriptor[2] as u16 | (((descriptor[4] >> 4) as u16) << 8);
|
||||
let hblank = descriptor[3] as u16 | (((descriptor[4] & 0x0f) as u16) << 8);
|
||||
let vdisplay = descriptor[5] as u16 | (((descriptor[7] >> 4) as u16) << 8);
|
||||
let vblank = descriptor[6] as u16 | (((descriptor[7] & 0x0f) as u16) << 8);
|
||||
let hsync_offset =
|
||||
descriptor[8] as u16 | ((((descriptor[11] >> 6) & 0x03) as u16) << 8);
|
||||
let hsync_width = descriptor[9] as u16 | ((((descriptor[11] >> 4) & 0x03) as u16) << 8);
|
||||
let vsync_offset =
|
||||
((descriptor[10] >> 4) as u16) | ((((descriptor[11] >> 2) & 0x03) as u16) << 4);
|
||||
let vsync_width =
|
||||
(descriptor[10] & 0x0f) as u16 | (((descriptor[11] & 0x03) as u16) << 4);
|
||||
|
||||
if hdisplay == 0 || vdisplay == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let htotal = hdisplay.saturating_add(hblank);
|
||||
let vtotal = vdisplay.saturating_add(vblank);
|
||||
let clock = pixel_clock.saturating_mul(10);
|
||||
let vrefresh = if htotal != 0 && vtotal != 0 {
|
||||
clock.saturating_mul(1000) / (htotal as u32).saturating_mul(vtotal as u32)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
modes.push(Self {
|
||||
clock,
|
||||
hdisplay,
|
||||
hsync_start: hdisplay.saturating_add(hsync_offset),
|
||||
hsync_end: hdisplay
|
||||
.saturating_add(hsync_offset)
|
||||
.saturating_add(hsync_width),
|
||||
htotal,
|
||||
hskew: 0,
|
||||
vdisplay,
|
||||
vsync_start: vdisplay.saturating_add(vsync_offset),
|
||||
vsync_end: vdisplay
|
||||
.saturating_add(vsync_offset)
|
||||
.saturating_add(vsync_width),
|
||||
vtotal,
|
||||
vscan: 0,
|
||||
vrefresh,
|
||||
flags: if (descriptor[17] & 0x80) != 0 { 1 } else { 0 },
|
||||
type_: 0,
|
||||
name: format!("{}x{}@{}", hdisplay, vdisplay, vrefresh),
|
||||
});
|
||||
}
|
||||
|
||||
modes
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ConnectorInfo {
|
||||
pub id: u32,
|
||||
pub connector_type: ConnectorType,
|
||||
#[allow(dead_code)]
|
||||
pub connector_type_id: u32,
|
||||
pub connection: ConnectorStatus,
|
||||
pub mm_width: u32,
|
||||
pub mm_height: u32,
|
||||
pub encoder_id: u32,
|
||||
pub modes: Vec<ModeInfo>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum ConnectorType {
|
||||
Unknown,
|
||||
VGA,
|
||||
DVII,
|
||||
DVID,
|
||||
DVIA,
|
||||
#[allow(dead_code)]
|
||||
Composite,
|
||||
#[allow(dead_code)]
|
||||
SVideo,
|
||||
#[allow(dead_code)]
|
||||
LVDS,
|
||||
#[allow(dead_code)]
|
||||
Component,
|
||||
#[allow(dead_code)]
|
||||
NinePinDIN,
|
||||
DisplayPort,
|
||||
HDMIA,
|
||||
#[allow(dead_code)]
|
||||
HDMIB,
|
||||
#[allow(dead_code)]
|
||||
TV,
|
||||
EDP,
|
||||
Virtual,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum ConnectorStatus {
|
||||
Connected,
|
||||
Disconnected,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CrtcInfo {
|
||||
pub id: u32,
|
||||
pub fb_id: u32,
|
||||
pub x: u32,
|
||||
pub y: u32,
|
||||
pub gamma_size: u32,
|
||||
pub mode: Option<ModeInfo>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct EncoderInfo {
|
||||
#[allow(dead_code)]
|
||||
pub id: u32,
|
||||
#[allow(dead_code)]
|
||||
pub encoder_type: u32,
|
||||
#[allow(dead_code)]
|
||||
pub crtc_id: u32,
|
||||
#[allow(dead_code)]
|
||||
pub possible_crtcs: u32,
|
||||
#[allow(dead_code)]
|
||||
pub possible_clones: u32,
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
use crate::driver::{DriverError, Result};
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum PlaneKind {
|
||||
Primary,
|
||||
Cursor,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Plane {
|
||||
pub id: u32,
|
||||
pub kind: PlaneKind,
|
||||
pub fb_handle: Option<u32>,
|
||||
pub crtc_id: Option<u32>,
|
||||
}
|
||||
|
||||
impl Plane {
|
||||
#[allow(dead_code)]
|
||||
pub fn new(id: u32, kind: PlaneKind) -> Self {
|
||||
Self {
|
||||
id,
|
||||
kind,
|
||||
fb_handle: None,
|
||||
crtc_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn attach(&mut self, crtc_id: u32, fb_handle: u32) -> Result<()> {
|
||||
if fb_handle == 0 {
|
||||
return Err(DriverError::InvalidArgument(
|
||||
"plane attach requires a framebuffer handle",
|
||||
));
|
||||
}
|
||||
|
||||
self.crtc_id = Some(crtc_id);
|
||||
self.fb_handle = Some(fb_handle);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,312 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
mod dmabuf;
|
||||
mod driver;
|
||||
mod drivers;
|
||||
mod gem;
|
||||
mod kms;
|
||||
mod scheme;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::process;
|
||||
|
||||
use std::sync::mpsc;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use log::{error, info, LevelFilter, Metadata, Record};
|
||||
use redox_driver_sys::pci::{
|
||||
enumerate_pci_class, PciDevice, PciDeviceInfo, PciLocation, PCI_CLASS_DISPLAY,
|
||||
PCI_VENDOR_ID_AMD, PCI_VENDOR_ID_INTEL,
|
||||
};
|
||||
use redox_scheme::{SignalBehavior, Socket};
|
||||
|
||||
use crate::driver::{DriverError, GpuDriver, Result};
|
||||
use crate::drivers::DriverRegistry;
|
||||
use crate::scheme::DrmScheme;
|
||||
|
||||
struct StderrLogger {
|
||||
level: LevelFilter,
|
||||
}
|
||||
|
||||
impl log::Log for StderrLogger {
|
||||
fn enabled(&self, metadata: &Metadata) -> bool {
|
||||
metadata.level() <= self.level
|
||||
}
|
||||
|
||||
fn log(&self, record: &Record) {
|
||||
if self.enabled(record.metadata()) {
|
||||
eprintln!("[{}] {}", record.level(), record.args());
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&self) {}
|
||||
}
|
||||
|
||||
fn init_logging(level: LevelFilter) {
|
||||
let logger = Box::leak(Box::new(StderrLogger { level }));
|
||||
if log::set_logger(logger).is_err() {
|
||||
return;
|
||||
}
|
||||
log::set_max_level(level);
|
||||
}
|
||||
|
||||
fn run() -> Result<()> {
|
||||
let info = select_gpu_from_args()?;
|
||||
verify_supported_gpu(&info)?;
|
||||
|
||||
let firmware = FirmwareCache::load_for_device(&info)?;
|
||||
|
||||
let driver = DriverRegistry::probe(info.clone(), firmware.into_blobs())?;
|
||||
info!(
|
||||
"redox-drm: initialized driver {} ({}) for {}",
|
||||
driver.driver_name(),
|
||||
driver.driver_desc(),
|
||||
info.location
|
||||
);
|
||||
|
||||
let socket = Socket::create("drm")
|
||||
.map_err(|e| DriverError::Initialization(format!("failed to register drm scheme: {e}")))?;
|
||||
info!("redox-drm: registered scheme:drm");
|
||||
|
||||
let (vblank_tx, vblank_rx) = mpsc::sync_channel::<(u32, u64)>(8);
|
||||
|
||||
let irq_driver: Arc<dyn GpuDriver> = driver.clone();
|
||||
std::thread::spawn(move || loop {
|
||||
match irq_driver.handle_irq() {
|
||||
Ok(Some((crtc_id, count))) => {
|
||||
let _ = vblank_tx.try_send((crtc_id, count));
|
||||
}
|
||||
Ok(None) => {}
|
||||
Err(e) => {
|
||||
error!("redox-drm: IRQ handler error: {}", e);
|
||||
}
|
||||
}
|
||||
std::thread::sleep(std::time::Duration::from_millis(16));
|
||||
});
|
||||
|
||||
let drm_scheme = Arc::new(Mutex::new(DrmScheme::new(driver)));
|
||||
let vblank_scheme = drm_scheme.clone();
|
||||
|
||||
std::thread::spawn(move || loop {
|
||||
if let Ok((crtc_id, vblank_count)) = vblank_rx.recv() {
|
||||
if let Ok(mut scheme) = vblank_scheme.lock() {
|
||||
scheme.retire_vblank(crtc_id, vblank_count);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
loop {
|
||||
let request = match socket.next_request(SignalBehavior::Restart) {
|
||||
Ok(Some(request)) => request,
|
||||
Ok(None) => {
|
||||
info!("redox-drm: scheme unmounted, exiting");
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("redox-drm: failed to receive scheme request: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let response = {
|
||||
let mut scheme = match drm_scheme.lock() {
|
||||
Ok(scheme) => scheme,
|
||||
Err(_) => {
|
||||
error!("redox-drm: DRM scheme state poisoned");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
request.handle_scheme_block_mut(&mut *scheme)
|
||||
};
|
||||
|
||||
let response = match response {
|
||||
Ok(response) => response,
|
||||
Err(_request) => {
|
||||
error!("redox-drm: failed to handle request");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = socket.write_response(response, SignalBehavior::Restart) {
|
||||
error!("redox-drm: failed to write scheme response: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn select_gpu_from_args() -> Result<PciDeviceInfo> {
|
||||
let mut args = env::args().skip(1);
|
||||
let parsed = match (args.next(), args.next(), args.next()) {
|
||||
(Some(bus), Some(device), Some(function)) => {
|
||||
Some(parse_location(&bus, &device, &function)?)
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
if let Some(location) = parsed {
|
||||
let mut pci = PciDevice::open_location(&location).map_err(|e| {
|
||||
DriverError::Pci(format!("failed to open PCI device {}: {e}", location))
|
||||
})?;
|
||||
return pci.full_info().map_err(|e| {
|
||||
DriverError::Pci(format!("failed to read PCI info for {}: {e}", location))
|
||||
});
|
||||
}
|
||||
|
||||
let devices = enumerate_pci_class(PCI_CLASS_DISPLAY)
|
||||
.map_err(|e| DriverError::Pci(format!("PCI scan failed: {e}")))?;
|
||||
let first = devices
|
||||
.into_iter()
|
||||
.find(|d| d.vendor_id == PCI_VENDOR_ID_AMD || d.vendor_id == PCI_VENDOR_ID_INTEL)
|
||||
.ok_or_else(|| {
|
||||
DriverError::NotFound("no AMD or Intel GPU found via scheme:pci".to_string())
|
||||
})?;
|
||||
let mut pci = PciDevice::open_location(&first.location)
|
||||
.map_err(|e| DriverError::Pci(format!("failed to open GPU {}: {e}", first.location)))?;
|
||||
pci.full_info()
|
||||
.map_err(|e| DriverError::Pci(format!("failed to read GPU {}: {e}", first.location)))
|
||||
}
|
||||
|
||||
fn parse_location(bus: &str, device: &str, function: &str) -> Result<PciLocation> {
|
||||
let bus = parse_u8(bus)?;
|
||||
let device = parse_u8(device)?;
|
||||
let function = parse_u8(function)?;
|
||||
Ok(PciLocation {
|
||||
segment: 0,
|
||||
bus,
|
||||
device,
|
||||
function,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_u8(value: &str) -> Result<u8> {
|
||||
let trimmed = value.trim_start_matches("0x");
|
||||
u8::from_str_radix(trimmed, 16)
|
||||
.or_else(|_| trimmed.parse::<u8>())
|
||||
.map_err(|_| DriverError::InvalidArgument("invalid PCI coordinate"))
|
||||
}
|
||||
|
||||
fn verify_supported_gpu(info: &PciDeviceInfo) -> Result<()> {
|
||||
if info.class_code != PCI_CLASS_DISPLAY {
|
||||
return Err(DriverError::Pci(format!(
|
||||
"device {} is class {:#04x}, expected display class {:#04x}",
|
||||
info.location, info.class_code, PCI_CLASS_DISPLAY
|
||||
)));
|
||||
}
|
||||
|
||||
if info.vendor_id != PCI_VENDOR_ID_AMD && info.vendor_id != PCI_VENDOR_ID_INTEL {
|
||||
return Err(DriverError::Pci(format!(
|
||||
"device {} is vendor {:#06x}, expected AMD {:#06x} or Intel {:#06x}",
|
||||
info.location, info.vendor_id, PCI_VENDOR_ID_AMD, PCI_VENDOR_ID_INTEL
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct FirmwareCache {
|
||||
blobs: HashMap<String, Vec<u8>>,
|
||||
}
|
||||
|
||||
impl FirmwareCache {
|
||||
fn load_for_device(info: &PciDeviceInfo) -> Result<Self> {
|
||||
if info.vendor_id != PCI_VENDOR_ID_AMD {
|
||||
info!(
|
||||
"redox-drm: skipping firmware load for Intel GPU {}",
|
||||
info.location
|
||||
);
|
||||
return Ok(Self {
|
||||
blobs: HashMap::new(),
|
||||
});
|
||||
}
|
||||
|
||||
let firmware_keys: &[&str] = if info.vendor_id == PCI_VENDOR_ID_AMD {
|
||||
&[
|
||||
"amdgpu/psp_13_0_0_sos",
|
||||
"amdgpu/psp_13_0_0_ta",
|
||||
"amdgpu/gc_11_0_0_pfp",
|
||||
"amdgpu/gc_11_0_0_me",
|
||||
"amdgpu/gc_11_0_0_ce",
|
||||
"amdgpu/gc_11_0_0_rlc",
|
||||
"amdgpu/gc_11_0_0_mec",
|
||||
"amdgpu/gc_11_0_0_mec2",
|
||||
"amdgpu/dcn_3_1_dmcub",
|
||||
"amdgpu/dmcub_dcn20.bin",
|
||||
"amdgpu/dmcub_dcn31.bin",
|
||||
"amdgpu/sdma_5_0",
|
||||
"amdgpu/sdma_5_2",
|
||||
"amdgpu/vcn_3_0_0",
|
||||
"amdgpu/vcn_3_1_0",
|
||||
]
|
||||
} else {
|
||||
&[]
|
||||
};
|
||||
|
||||
let mut blobs = HashMap::new();
|
||||
let mut loaded_any = false;
|
||||
|
||||
for &key in firmware_keys {
|
||||
let path = format!("/scheme/firmware/{}", key);
|
||||
match File::open(&path) {
|
||||
Ok(mut file) => {
|
||||
let metadata = file.metadata();
|
||||
let estimated_size = metadata.map(|m| m.len() as usize).unwrap_or(1024 * 1024);
|
||||
let mut buf = Vec::with_capacity(estimated_size);
|
||||
match file.read_to_end(&mut buf) {
|
||||
Ok(bytes_read) => {
|
||||
info!("redox-drm: loaded firmware {} ({} bytes)", key, bytes_read);
|
||||
loaded_any = true;
|
||||
blobs.insert(key.to_string(), buf);
|
||||
}
|
||||
Err(e) => {
|
||||
info!("redox-drm: failed to read firmware {}: {}", key, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
info!("redox-drm: firmware {} not available: {}", key, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !loaded_any && info.vendor_id == PCI_VENDOR_ID_AMD {
|
||||
return Err(DriverError::NotFound(
|
||||
"no AMD firmware blobs available from scheme:firmware".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
info!(
|
||||
"redox-drm: firmware cache populated with {} blob(s)",
|
||||
blobs.len()
|
||||
);
|
||||
Ok(Self { blobs })
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn get(&self, key: &str) -> Option<&[u8]> {
|
||||
self.blobs.get(key).map(|v| v.as_slice())
|
||||
}
|
||||
|
||||
fn into_blobs(self) -> HashMap<String, Vec<u8>> {
|
||||
self.blobs
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let log_level = match env::var("REDOX_DRM_LOG").as_deref() {
|
||||
Ok("trace") => LevelFilter::Trace,
|
||||
Ok("debug") => LevelFilter::Debug,
|
||||
Ok("warn") => LevelFilter::Warn,
|
||||
Ok("error") => LevelFilter::Error,
|
||||
_ => LevelFilter::Info,
|
||||
};
|
||||
|
||||
init_logging(log_level);
|
||||
|
||||
if let Err(error) = run() {
|
||||
error!("redox-drm: fatal error: {}", error);
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,975 @@
|
||||
use std::collections::{BTreeMap, HashSet};
|
||||
use std::mem::size_of;
|
||||
use std::sync::Arc;
|
||||
|
||||
use log::{debug, warn};
|
||||
use redox_scheme::SchemeBlockMut;
|
||||
use syscall04::data::Stat;
|
||||
use syscall04::error::{Error, Result, EBADF, EBUSY, EINVAL, ENOENT, EOPNOTSUPP};
|
||||
use syscall04::flag::{EventFlags, MapFlags, MunmapFlags, MODE_FILE};
|
||||
|
||||
use crate::driver::GpuDriver;
|
||||
use crate::gem::GemHandle;
|
||||
use crate::kms::ModeInfo;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct FbInfo {
|
||||
gem_handle: GemHandle,
|
||||
width: u32,
|
||||
height: u32,
|
||||
pitch: u32,
|
||||
bpp: u32,
|
||||
}
|
||||
|
||||
// ---- DRM ioctl request codes ----
|
||||
const DRM_IOCTL_BASE: usize = 0x00A0;
|
||||
const DRM_IOCTL_MODE_GETRESOURCES: usize = DRM_IOCTL_BASE;
|
||||
const DRM_IOCTL_MODE_GETCONNECTOR: usize = DRM_IOCTL_BASE + 7;
|
||||
const DRM_IOCTL_MODE_GETMODES: usize = DRM_IOCTL_BASE + 8;
|
||||
const DRM_IOCTL_MODE_SETCRTC: usize = DRM_IOCTL_BASE + 2;
|
||||
const DRM_IOCTL_MODE_GETCRTC: usize = DRM_IOCTL_BASE + 3;
|
||||
const DRM_IOCTL_MODE_GETENCODER: usize = DRM_IOCTL_BASE + 6;
|
||||
const DRM_IOCTL_MODE_PAGE_FLIP: usize = DRM_IOCTL_BASE + 16;
|
||||
const DRM_IOCTL_MODE_CREATE_DUMB: usize = DRM_IOCTL_BASE + 18;
|
||||
const DRM_IOCTL_MODE_MAP_DUMB: usize = DRM_IOCTL_BASE + 19;
|
||||
const DRM_IOCTL_MODE_DESTROY_DUMB: usize = DRM_IOCTL_BASE + 20;
|
||||
const DRM_IOCTL_MODE_ADDFB: usize = DRM_IOCTL_BASE + 21;
|
||||
const DRM_IOCTL_MODE_RMFB: usize = DRM_IOCTL_BASE + 22;
|
||||
const DRM_IOCTL_GET_CAP: usize = DRM_IOCTL_BASE + 23;
|
||||
const DRM_IOCTL_SET_CLIENT_CAP: usize = DRM_IOCTL_BASE + 24;
|
||||
const DRM_IOCTL_VERSION: usize = DRM_IOCTL_BASE + 25;
|
||||
|
||||
// ---- Wire types for DRM ioctls ----
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmResourcesWire {
|
||||
connector_count: u32,
|
||||
crtc_count: u32,
|
||||
encoder_count: u32,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmConnectorWire {
|
||||
connector_id: u32,
|
||||
connection: u32,
|
||||
connector_type: u32,
|
||||
mm_width: u32,
|
||||
mm_height: u32,
|
||||
encoder_id: u32,
|
||||
mode_count: u32,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmModeWire {
|
||||
clock: u32,
|
||||
hdisplay: u16,
|
||||
hsync_start: u16,
|
||||
hsync_end: u16,
|
||||
htotal: u16,
|
||||
hskew: u16,
|
||||
vdisplay: u16,
|
||||
vsync_start: u16,
|
||||
vsync_end: u16,
|
||||
vtotal: u16,
|
||||
vscan: u16,
|
||||
vrefresh: u32,
|
||||
flags: u32,
|
||||
type_: u32,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmSetCrtcWire {
|
||||
crtc_id: u32,
|
||||
fb_handle: u32,
|
||||
connector_count: u32,
|
||||
connectors: [u32; 8],
|
||||
mode: DrmModeWire,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmPageFlipWire {
|
||||
crtc_id: u32,
|
||||
fb_handle: u32,
|
||||
flags: u32,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmCreateDumbWire {
|
||||
width: u32,
|
||||
height: u32,
|
||||
bpp: u32,
|
||||
flags: u32,
|
||||
pitch: u32,
|
||||
size: u64,
|
||||
handle: u32,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmMapDumbWire {
|
||||
handle: u32,
|
||||
offset: u64,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmDestroyDumbWire {
|
||||
handle: u32,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmGetEncoderWire {
|
||||
encoder_id: u32,
|
||||
encoder_type: u32,
|
||||
crtc_id: u32,
|
||||
possible_crtcs: u32,
|
||||
possible_clones: u32,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmAddFbWire {
|
||||
width: u32,
|
||||
height: u32,
|
||||
pitch: u32,
|
||||
bpp: u32,
|
||||
depth: u32,
|
||||
handle: u32,
|
||||
fb_id: u32,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmRmFbWire {
|
||||
fb_id: u32,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmGetCrtcWire {
|
||||
crtc_id: u32,
|
||||
fb_id: u32,
|
||||
x: u32,
|
||||
y: u32,
|
||||
mode_valid: u32,
|
||||
mode: DrmModeWire,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmVersionWire {
|
||||
major: i32,
|
||||
minor: i32,
|
||||
patch: i32,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmGetCapWire {
|
||||
capability: u64,
|
||||
value: u64,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct DrmSetClientCapWire {
|
||||
capability: u64,
|
||||
value: u64,
|
||||
}
|
||||
|
||||
// ---- Internal handle types ----
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum NodeKind {
|
||||
Card,
|
||||
Connector(u32),
|
||||
}
|
||||
|
||||
struct Handle {
|
||||
node: NodeKind,
|
||||
response: Vec<u8>,
|
||||
mapped_gem: Option<GemHandle>,
|
||||
owned_fbs: Vec<u32>,
|
||||
owned_gems: Vec<GemHandle>,
|
||||
}
|
||||
|
||||
pub struct DrmScheme {
|
||||
driver: Arc<dyn GpuDriver>,
|
||||
next_id: usize,
|
||||
next_fb_id: u32,
|
||||
handles: BTreeMap<usize, Handle>,
|
||||
active_crtc_fb: BTreeMap<u32, u32>,
|
||||
active_crtc_mode: BTreeMap<u32, ModeInfo>,
|
||||
pending_flip_fb: BTreeMap<u32, (u64, u32)>,
|
||||
fb_registry: BTreeMap<u32, FbInfo>,
|
||||
}
|
||||
|
||||
impl DrmScheme {
|
||||
pub fn new(driver: Arc<dyn GpuDriver>) -> Self {
|
||||
Self {
|
||||
driver,
|
||||
next_id: 0,
|
||||
next_fb_id: 1,
|
||||
handles: BTreeMap::new(),
|
||||
active_crtc_fb: BTreeMap::new(),
|
||||
active_crtc_mode: BTreeMap::new(),
|
||||
pending_flip_fb: BTreeMap::new(),
|
||||
fb_registry: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn on_close(&mut self, id: usize) {
|
||||
self.handles.remove(&id);
|
||||
}
|
||||
|
||||
fn is_fb_active(&self, fb_id: u32) -> bool {
|
||||
self.active_crtc_fb.values().any(|&id| id == fb_id)
|
||||
|| self.pending_flip_fb.values().any(|&(_, id)| id == fb_id)
|
||||
}
|
||||
|
||||
pub fn retire_vblank(&mut self, crtc_id: u32, vblank_count: u64) {
|
||||
if let Some((expected, fb_id)) = self.pending_flip_fb.get(&crtc_id).copied() {
|
||||
if expected <= vblank_count {
|
||||
self.pending_flip_fb.remove(&crtc_id);
|
||||
self.try_reap_fb(fb_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_reap_fb(&mut self, fb_id: u32) {
|
||||
let gem_handle = match self.fb_registry.get(&fb_id) {
|
||||
Some(info) => info.gem_handle,
|
||||
None => return,
|
||||
};
|
||||
let still_owned = self.handles.values().any(|h| h.owned_fbs.contains(&fb_id));
|
||||
if still_owned {
|
||||
return;
|
||||
}
|
||||
self.fb_registry.remove(&fb_id);
|
||||
let still_referenced = self
|
||||
.fb_registry
|
||||
.values()
|
||||
.any(|i| i.gem_handle == gem_handle);
|
||||
let gem_owned = self
|
||||
.handles
|
||||
.values()
|
||||
.any(|h| h.owned_gems.contains(&gem_handle));
|
||||
if !still_referenced && !gem_owned {
|
||||
if let Err(e) = self.driver.gem_close(gem_handle) {
|
||||
warn!(
|
||||
"redox-drm: try_reap_fb gem_close({}) failed: {}",
|
||||
gem_handle, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Encode helpers ----
|
||||
|
||||
fn encode_resources(&self) -> Vec<u8> {
|
||||
let connectors = self.driver.detect_connectors();
|
||||
let payload = DrmResourcesWire {
|
||||
connector_count: connectors.len() as u32,
|
||||
crtc_count: 1,
|
||||
encoder_count: connectors.len() as u32,
|
||||
};
|
||||
bytes_of(&payload)
|
||||
}
|
||||
|
||||
fn encode_connector(&self, connector_id: u32) -> Result<Vec<u8>> {
|
||||
let connector = self
|
||||
.driver
|
||||
.detect_connectors()
|
||||
.into_iter()
|
||||
.find(|c| c.id == connector_id)
|
||||
.ok_or_else(|| Error::new(ENOENT))?;
|
||||
|
||||
let header = DrmConnectorWire {
|
||||
connector_id: connector.id,
|
||||
connection: match connector.connection {
|
||||
crate::kms::ConnectorStatus::Connected => 1,
|
||||
crate::kms::ConnectorStatus::Disconnected => 2,
|
||||
crate::kms::ConnectorStatus::Unknown => 0,
|
||||
},
|
||||
connector_type: connector_type_to_u32(connector.connector_type),
|
||||
mm_width: connector.mm_width,
|
||||
mm_height: connector.mm_height,
|
||||
encoder_id: connector.encoder_id,
|
||||
mode_count: connector.modes.len() as u32,
|
||||
};
|
||||
|
||||
let mut out = bytes_of(&header);
|
||||
for mode in &connector.modes {
|
||||
out.extend_from_slice(&bytes_of(&mode_to_wire(mode)));
|
||||
out.extend_from_slice(mode.name.as_bytes());
|
||||
out.push(0);
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
// ---- ioctl dispatch ----
|
||||
|
||||
fn handle_ioctl(&mut self, id: usize, request: usize, payload: &[u8]) -> Result<usize> {
|
||||
let response = match request {
|
||||
DRM_IOCTL_MODE_GETRESOURCES => self.encode_resources(),
|
||||
|
||||
DRM_IOCTL_MODE_GETCONNECTOR => {
|
||||
let connector_id = if payload.len() >= size_of::<u32>() {
|
||||
read_u32(payload, 0)?
|
||||
} else {
|
||||
match self.handles.get(&id).map(|h| &h.node) {
|
||||
Some(NodeKind::Connector(cid)) => *cid,
|
||||
_ => return Err(Error::new(EINVAL)),
|
||||
}
|
||||
};
|
||||
self.encode_connector(connector_id)?
|
||||
}
|
||||
|
||||
DRM_IOCTL_MODE_GETMODES => {
|
||||
let connector_id = read_u32(payload, 0)?;
|
||||
let modes = self.driver.get_modes(connector_id);
|
||||
encode_modes(&modes)
|
||||
}
|
||||
|
||||
DRM_IOCTL_MODE_SETCRTC => {
|
||||
let req = decode_wire::<DrmSetCrtcWire>(payload)?;
|
||||
if req.fb_handle == 0 && req.connector_count == 0 {
|
||||
let completed_flip = self.pending_flip_fb.remove(&req.crtc_id);
|
||||
let prev_fb_id = self.active_crtc_fb.remove(&req.crtc_id);
|
||||
self.active_crtc_mode.remove(&req.crtc_id);
|
||||
if let Some((_, fb_id)) = completed_flip {
|
||||
self.try_reap_fb(fb_id);
|
||||
}
|
||||
if let Some(fb_id) = prev_fb_id {
|
||||
self.try_reap_fb(fb_id);
|
||||
}
|
||||
return Ok(1);
|
||||
}
|
||||
let count = req.connector_count as usize;
|
||||
if count > req.connectors.len() {
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
let conns = req.connectors[..count].to_vec();
|
||||
let fb_info = self.fb_registry.get(&req.fb_handle).ok_or_else(|| {
|
||||
warn!("redox-drm: SETCRTC with unknown fb_id {}", req.fb_handle);
|
||||
Error::new(ENOENT)
|
||||
})?;
|
||||
let mode = wire_to_mode(&req.mode);
|
||||
let fb_pitch = fb_info.pitch as u64;
|
||||
let required_fb_lines = mode.vdisplay as u64;
|
||||
let fb_height = fb_info.height as u64;
|
||||
let fb_width = fb_info.width as u64;
|
||||
let mode_width = mode.hdisplay as u64;
|
||||
if fb_pitch.checked_mul(required_fb_lines).is_none() {
|
||||
warn!("redox-drm: SETCRTC FB pitch * mode_height overflows");
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
if fb_pitch == 0 || fb_height < required_fb_lines || fb_width < mode_width {
|
||||
warn!(
|
||||
"redox-drm: SETCRTC FB {}x{} pitch={} too small for mode {}x{}",
|
||||
fb_info.width, fb_info.height, fb_info.pitch, mode.hdisplay, mode.vdisplay
|
||||
);
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
let gem_handle = fb_info.gem_handle;
|
||||
self.driver
|
||||
.set_crtc(req.crtc_id, gem_handle, &conns, &mode)
|
||||
.map_err(driver_to_syscall)?;
|
||||
let completed_flip = self.pending_flip_fb.remove(&req.crtc_id);
|
||||
let prev_fb = self.active_crtc_fb.insert(req.crtc_id, req.fb_handle);
|
||||
self.active_crtc_mode.insert(req.crtc_id, mode);
|
||||
if let Some((_, fb_id)) = completed_flip {
|
||||
self.try_reap_fb(fb_id);
|
||||
}
|
||||
if let Some(prev) = prev_fb {
|
||||
if prev != req.fb_handle {
|
||||
self.try_reap_fb(prev);
|
||||
}
|
||||
}
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
DRM_IOCTL_MODE_PAGE_FLIP => {
|
||||
let req = decode_wire::<DrmPageFlipWire>(payload)?;
|
||||
if self.pending_flip_fb.contains_key(&req.crtc_id) {
|
||||
warn!(
|
||||
"redox-drm: PAGE_FLIP rejected — flip already pending on CRTC {}",
|
||||
req.crtc_id
|
||||
);
|
||||
return Err(Error::new(EBUSY));
|
||||
}
|
||||
let fb_info = self.fb_registry.get(&req.fb_handle).ok_or_else(|| {
|
||||
warn!("redox-drm: PAGE_FLIP with unknown fb_id {}", req.fb_handle);
|
||||
Error::new(ENOENT)
|
||||
})?;
|
||||
if let Some(active_mode) = self.active_crtc_mode.get(&req.crtc_id) {
|
||||
let fb_pitch = fb_info.pitch as u64;
|
||||
let required_lines = active_mode.vdisplay as u64;
|
||||
let required_width = active_mode.hdisplay as u64;
|
||||
if fb_pitch == 0
|
||||
|| (fb_info.height as u64) < required_lines
|
||||
|| (fb_info.width as u64) < required_width
|
||||
{
|
||||
warn!(
|
||||
"redox-drm: PAGE_FLIP FB {}x{} pitch={} too small for active mode {}x{}",
|
||||
fb_info.width, fb_info.height, fb_info.pitch,
|
||||
active_mode.hdisplay, active_mode.vdisplay
|
||||
);
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
}
|
||||
let gem_handle = fb_info.gem_handle;
|
||||
let seqno = self
|
||||
.driver
|
||||
.page_flip(req.crtc_id, gem_handle, req.flags)
|
||||
.map_err(driver_to_syscall)?;
|
||||
let current_vblank = self.driver.get_vblank(req.crtc_id).unwrap_or(0);
|
||||
let prev = self.active_crtc_fb.insert(req.crtc_id, req.fb_handle);
|
||||
if let Some(old_fb) = prev {
|
||||
if old_fb != req.fb_handle {
|
||||
self.pending_flip_fb
|
||||
.insert(req.crtc_id, (current_vblank.saturating_add(1), old_fb));
|
||||
}
|
||||
}
|
||||
seqno.to_le_bytes().to_vec()
|
||||
}
|
||||
|
||||
DRM_IOCTL_MODE_CREATE_DUMB => {
|
||||
let mut req = decode_wire::<DrmCreateDumbWire>(payload)?;
|
||||
let pitch = (req.width.saturating_mul(req.bpp).saturating_add(7)) / 8;
|
||||
req.pitch = pitch;
|
||||
req.size = (pitch as u64).saturating_mul(req.height as u64);
|
||||
req.handle = self
|
||||
.driver
|
||||
.gem_create(req.size)
|
||||
.map_err(driver_to_syscall)?;
|
||||
if let Some(handle) = self.handles.get_mut(&id) {
|
||||
handle.owned_gems.push(req.handle);
|
||||
}
|
||||
bytes_of(&req)
|
||||
}
|
||||
|
||||
DRM_IOCTL_MODE_MAP_DUMB => {
|
||||
let mut req = decode_wire::<DrmMapDumbWire>(payload)?;
|
||||
let owned = self
|
||||
.handles
|
||||
.get(&id)
|
||||
.map(|h| h.owned_gems.contains(&req.handle))
|
||||
.unwrap_or(false);
|
||||
if !owned {
|
||||
warn!(
|
||||
"redox-drm: MAP_DUMB handle {} not owned by this fd",
|
||||
req.handle
|
||||
);
|
||||
return Err(Error::new(EBADF));
|
||||
}
|
||||
req.offset = self
|
||||
.driver
|
||||
.gem_mmap(req.handle)
|
||||
.map_err(driver_to_syscall)? as u64;
|
||||
if let Some(handle) = self.handles.get_mut(&id) {
|
||||
handle.mapped_gem = Some(req.handle);
|
||||
}
|
||||
bytes_of(&req)
|
||||
}
|
||||
|
||||
DRM_IOCTL_MODE_DESTROY_DUMB => {
|
||||
let req = decode_wire::<DrmDestroyDumbWire>(payload)?;
|
||||
let owned = self
|
||||
.handles
|
||||
.get(&id)
|
||||
.map(|h| h.owned_gems.contains(&req.handle))
|
||||
.unwrap_or(false);
|
||||
if !owned {
|
||||
warn!(
|
||||
"redox-drm: DESTROY_DUMB handle {} not owned by this fd",
|
||||
req.handle
|
||||
);
|
||||
return Err(Error::new(EBADF));
|
||||
}
|
||||
let backs_fb = self
|
||||
.fb_registry
|
||||
.values()
|
||||
.any(|info| info.gem_handle == req.handle);
|
||||
if backs_fb {
|
||||
warn!(
|
||||
"redox-drm: DESTROY_DUMB handle {} rejected — backs an active framebuffer",
|
||||
req.handle
|
||||
);
|
||||
return Err(Error::new(EBUSY));
|
||||
}
|
||||
self.driver
|
||||
.gem_close(req.handle)
|
||||
.map_err(driver_to_syscall)?;
|
||||
if let Some(handle) = self.handles.get_mut(&id) {
|
||||
handle.owned_gems.retain(|&h| h != req.handle);
|
||||
}
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
DRM_IOCTL_MODE_GETENCODER => {
|
||||
let _req = decode_wire::<DrmGetEncoderWire>(payload)?;
|
||||
let resp = DrmGetEncoderWire {
|
||||
encoder_id: _req.encoder_id,
|
||||
encoder_type: 0,
|
||||
crtc_id: 1,
|
||||
possible_crtcs: 1,
|
||||
possible_clones: 0,
|
||||
};
|
||||
bytes_of(&resp)
|
||||
}
|
||||
|
||||
DRM_IOCTL_MODE_GETCRTC => {
|
||||
let req = decode_wire::<DrmGetCrtcWire>(payload)?;
|
||||
let (fb_id, mode_valid, mode) = match (
|
||||
self.active_crtc_fb.get(&req.crtc_id),
|
||||
self.active_crtc_mode.get(&req.crtc_id),
|
||||
) {
|
||||
(Some(&fb), Some(m)) if self.fb_registry.contains_key(&fb) => {
|
||||
(fb, 1u32, mode_to_wire(m))
|
||||
}
|
||||
_ => (0u32, 0u32, DrmModeWire::default()),
|
||||
};
|
||||
let resp = DrmGetCrtcWire {
|
||||
crtc_id: req.crtc_id,
|
||||
fb_id,
|
||||
x: 0,
|
||||
y: 0,
|
||||
mode_valid,
|
||||
mode,
|
||||
};
|
||||
bytes_of(&resp)
|
||||
}
|
||||
|
||||
DRM_IOCTL_MODE_ADDFB => {
|
||||
let req = decode_wire::<DrmAddFbWire>(payload)?;
|
||||
if req.handle == 0 {
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
if req.width == 0 || req.height == 0 || req.bpp == 0 {
|
||||
warn!(
|
||||
"redox-drm: ADDFB zero dimension width={} height={} bpp={}",
|
||||
req.width, req.height, req.bpp
|
||||
);
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
let min_stride = (req.width.saturating_mul(req.bpp).saturating_add(7)) / 8;
|
||||
let pitch = if req.pitch != 0 {
|
||||
req.pitch
|
||||
} else {
|
||||
min_stride
|
||||
};
|
||||
if pitch == 0 || pitch < min_stride {
|
||||
warn!(
|
||||
"redox-drm: ADDFB pitch {} below minimum stride {} ({}x{})",
|
||||
pitch, min_stride, req.width, req.bpp
|
||||
);
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
let required_size = (pitch as u64).checked_mul(req.height as u64);
|
||||
if required_size.is_none() {
|
||||
warn!(
|
||||
"redox-drm: ADDFB pitch * height overflows pitch={} height={}",
|
||||
pitch, req.height
|
||||
);
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
let owned = self
|
||||
.handles
|
||||
.get(&id)
|
||||
.map(|h| h.owned_gems.contains(&req.handle))
|
||||
.unwrap_or(false);
|
||||
if !owned {
|
||||
warn!(
|
||||
"redox-drm: ADDFB handle {} not owned by this fd",
|
||||
req.handle
|
||||
);
|
||||
return Err(Error::new(EBADF));
|
||||
}
|
||||
let actual_size = self.driver.gem_size(req.handle).map_err(|e| {
|
||||
warn!("redox-drm: ADDFB handle {} not found: {}", req.handle, e);
|
||||
Error::new(ENOENT)
|
||||
})?;
|
||||
if required_size.unwrap() > actual_size {
|
||||
warn!(
|
||||
"redox-drm: ADDFB requires {} bytes but GEM {} is {} bytes",
|
||||
required_size.unwrap(),
|
||||
req.handle,
|
||||
actual_size
|
||||
);
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
let fb_id = self.next_fb_id;
|
||||
self.next_fb_id = self.next_fb_id.saturating_add(1);
|
||||
self.fb_registry.insert(
|
||||
fb_id,
|
||||
FbInfo {
|
||||
gem_handle: req.handle,
|
||||
width: req.width,
|
||||
height: req.height,
|
||||
pitch,
|
||||
bpp: req.bpp,
|
||||
},
|
||||
);
|
||||
if let Some(handle) = self.handles.get_mut(&id) {
|
||||
handle.owned_fbs.push(fb_id);
|
||||
}
|
||||
let mut resp = req;
|
||||
resp.fb_id = fb_id;
|
||||
bytes_of(&resp)
|
||||
}
|
||||
|
||||
DRM_IOCTL_MODE_RMFB => {
|
||||
let req = decode_wire::<DrmRmFbWire>(payload)?;
|
||||
let owned = self
|
||||
.handles
|
||||
.get(&id)
|
||||
.map(|h| h.owned_fbs.contains(&req.fb_id))
|
||||
.unwrap_or(false);
|
||||
if !owned {
|
||||
warn!("redox-drm: RMFB {} not owned by this fd", req.fb_id);
|
||||
return Err(Error::new(EBADF));
|
||||
}
|
||||
let in_use = self.is_fb_active(req.fb_id);
|
||||
if in_use {
|
||||
warn!(
|
||||
"redox-drm: RMFB {} rejected — still active on a CRTC",
|
||||
req.fb_id
|
||||
);
|
||||
return Err(Error::new(EBUSY));
|
||||
}
|
||||
if let Some(fb_info) = self.fb_registry.remove(&req.fb_id) {
|
||||
let still_referenced = self
|
||||
.fb_registry
|
||||
.values()
|
||||
.any(|i| i.gem_handle == fb_info.gem_handle);
|
||||
let still_owned = self
|
||||
.handles
|
||||
.values()
|
||||
.any(|h| h.owned_gems.contains(&fb_info.gem_handle));
|
||||
if !still_referenced && !still_owned {
|
||||
if let Err(e) = self.driver.gem_close(fb_info.gem_handle) {
|
||||
warn!(
|
||||
"redox-drm: RMFB gem_close({}) failed: {}",
|
||||
fb_info.gem_handle, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(handle) = self.handles.get_mut(&id) {
|
||||
handle.owned_fbs.retain(|&fb| fb != req.fb_id);
|
||||
}
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
DRM_IOCTL_GET_CAP => {
|
||||
let mut req = decode_wire::<DrmGetCapWire>(payload)?;
|
||||
req.value = match req.capability {
|
||||
0 => 1,
|
||||
1 => 1,
|
||||
_ => 0,
|
||||
};
|
||||
bytes_of(&req)
|
||||
}
|
||||
|
||||
DRM_IOCTL_SET_CLIENT_CAP => Vec::new(),
|
||||
|
||||
DRM_IOCTL_VERSION => {
|
||||
let resp = DrmVersionWire {
|
||||
major: 1,
|
||||
minor: 0,
|
||||
patch: 0,
|
||||
};
|
||||
bytes_of(&resp)
|
||||
}
|
||||
|
||||
_ => {
|
||||
warn!("redox-drm: unsupported ioctl {:#x}", request);
|
||||
return Err(Error::new(EOPNOTSUPP));
|
||||
}
|
||||
};
|
||||
|
||||
let response = if response.is_empty() {
|
||||
vec![0]
|
||||
} else {
|
||||
response
|
||||
};
|
||||
|
||||
let handle = self.handles.get_mut(&id).ok_or_else(|| Error::new(EBADF))?;
|
||||
let len = response.len();
|
||||
handle.response = response;
|
||||
Ok(len)
|
||||
}
|
||||
}
|
||||
|
||||
// ---- SchemeBlockMut implementation ----
|
||||
|
||||
impl SchemeBlockMut for DrmScheme {
|
||||
fn open(&mut self, path: &str, _flags: usize, _uid: u32, _gid: u32) -> Result<Option<usize>> {
|
||||
let node = match path.trim_matches('/') {
|
||||
"card0" => NodeKind::Card,
|
||||
p if p.starts_with("card0Connector/") => {
|
||||
let tail = p.trim_start_matches("card0Connector/");
|
||||
let connector_id = tail.parse::<u32>().map_err(|_| Error::new(ENOENT))?;
|
||||
NodeKind::Connector(connector_id)
|
||||
}
|
||||
_ => return Err(Error::new(ENOENT)),
|
||||
};
|
||||
|
||||
let id = self.next_id;
|
||||
self.next_id = self.next_id.saturating_add(1);
|
||||
self.handles.insert(
|
||||
id,
|
||||
Handle {
|
||||
node,
|
||||
response: Vec::new(),
|
||||
mapped_gem: None,
|
||||
owned_fbs: Vec::new(),
|
||||
owned_gems: Vec::new(),
|
||||
},
|
||||
);
|
||||
Ok(Some(id))
|
||||
}
|
||||
|
||||
fn read(&mut self, id: usize, buf: &mut [u8]) -> Result<Option<usize>> {
|
||||
let handle = self.handles.get_mut(&id).ok_or_else(|| Error::new(EBADF))?;
|
||||
let len = handle.response.len().min(buf.len());
|
||||
buf[..len].copy_from_slice(&handle.response[..len]);
|
||||
Ok(Some(len))
|
||||
}
|
||||
|
||||
fn write(&mut self, id: usize, buf: &[u8]) -> Result<Option<usize>> {
|
||||
let (request_bytes, payload) = match buf.split_first_chunk::<8>() {
|
||||
Some(pair) => pair,
|
||||
None => {
|
||||
let _ = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
|
||||
return Ok(Some(0));
|
||||
}
|
||||
};
|
||||
let request = usize::from_le_bytes(*request_bytes);
|
||||
let written = self.handle_ioctl(id, request, payload)?;
|
||||
Ok(Some(written))
|
||||
}
|
||||
|
||||
fn fpath(&mut self, id: usize, buf: &mut [u8]) -> Result<Option<usize>> {
|
||||
let handle = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
|
||||
let path = match handle.node {
|
||||
NodeKind::Card => "drm:card0".to_string(),
|
||||
NodeKind::Connector(cid) => format!("drm:card0Connector/{cid}"),
|
||||
};
|
||||
let bytes = path.as_bytes();
|
||||
let len = bytes.len().min(buf.len());
|
||||
buf[..len].copy_from_slice(&bytes[..len]);
|
||||
Ok(Some(len))
|
||||
}
|
||||
|
||||
fn fstat(&mut self, id: usize, stat: &mut Stat) -> Result<Option<usize>> {
|
||||
let handle = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
|
||||
stat.st_mode = MODE_FILE | 0o666;
|
||||
stat.st_size = handle.response.len() as u64;
|
||||
stat.st_blksize = 4096;
|
||||
Ok(Some(0))
|
||||
}
|
||||
|
||||
fn fsync(&mut self, id: usize) -> Result<Option<usize>> {
|
||||
let _ = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
|
||||
Ok(Some(0))
|
||||
}
|
||||
|
||||
fn fevent(&mut self, id: usize, _flags: EventFlags) -> Result<Option<EventFlags>> {
|
||||
let _ = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
|
||||
Ok(Some(EventFlags::empty()))
|
||||
}
|
||||
|
||||
fn close(&mut self, id: usize) -> Result<Option<usize>> {
|
||||
if let Some(handle) = self.handles.remove(&id) {
|
||||
let mut auto_closed_gems = HashSet::new();
|
||||
for fb_id in &handle.owned_fbs {
|
||||
let in_use = self.is_fb_active(*fb_id);
|
||||
if in_use {
|
||||
continue;
|
||||
}
|
||||
if let Some(fb_info) = self.fb_registry.remove(fb_id) {
|
||||
let still_referenced = self
|
||||
.fb_registry
|
||||
.values()
|
||||
.any(|i| i.gem_handle == fb_info.gem_handle);
|
||||
let still_owned = self
|
||||
.handles
|
||||
.values()
|
||||
.any(|h| h.owned_gems.contains(&fb_info.gem_handle));
|
||||
if !still_referenced && !still_owned {
|
||||
match self.driver.gem_close(fb_info.gem_handle) {
|
||||
Ok(()) => {
|
||||
auto_closed_gems.insert(fb_info.gem_handle);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"redox-drm: close gem_close({}) failed: {}",
|
||||
fb_info.gem_handle, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for gem_handle in handle.owned_gems {
|
||||
if auto_closed_gems.contains(&gem_handle) {
|
||||
continue;
|
||||
}
|
||||
let backs_fb = self
|
||||
.fb_registry
|
||||
.values()
|
||||
.any(|info| info.gem_handle == gem_handle);
|
||||
if !backs_fb {
|
||||
if let Err(e) = self.driver.gem_close(gem_handle) {
|
||||
warn!(
|
||||
"redox-drm: close gem GEM {} cleanup failed: {}",
|
||||
gem_handle, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Some(0))
|
||||
}
|
||||
|
||||
fn mmap_prep(
|
||||
&mut self,
|
||||
id: usize,
|
||||
_offset: u64,
|
||||
_size: usize,
|
||||
_flags: MapFlags,
|
||||
) -> Result<Option<usize>> {
|
||||
let handle = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
|
||||
let gem_handle = handle.mapped_gem.ok_or_else(|| Error::new(EINVAL))?;
|
||||
let addr = self
|
||||
.driver
|
||||
.gem_mmap(gem_handle)
|
||||
.map_err(driver_to_syscall)?;
|
||||
debug!(
|
||||
"redox-drm: mmap_prep GEM handle {} at addr={:#x}",
|
||||
gem_handle, addr
|
||||
);
|
||||
Ok(Some(addr))
|
||||
}
|
||||
|
||||
fn munmap(
|
||||
&mut self,
|
||||
id: usize,
|
||||
_offset: u64,
|
||||
_size: usize,
|
||||
_flags: MunmapFlags,
|
||||
) -> Result<Option<usize>> {
|
||||
let _ = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
|
||||
Ok(Some(0))
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Conversion helpers ----
|
||||
|
||||
fn connector_type_to_u32(ct: crate::kms::ConnectorType) -> u32 {
|
||||
match ct {
|
||||
crate::kms::ConnectorType::Unknown => 0,
|
||||
crate::kms::ConnectorType::VGA => 1,
|
||||
crate::kms::ConnectorType::DVII => 2,
|
||||
crate::kms::ConnectorType::DVID => 3,
|
||||
crate::kms::ConnectorType::DVIA => 4,
|
||||
crate::kms::ConnectorType::Composite => 5,
|
||||
crate::kms::ConnectorType::SVideo => 6,
|
||||
crate::kms::ConnectorType::LVDS => 7,
|
||||
crate::kms::ConnectorType::Component => 8,
|
||||
crate::kms::ConnectorType::NinePinDIN => 9,
|
||||
crate::kms::ConnectorType::DisplayPort => 10,
|
||||
crate::kms::ConnectorType::HDMIA => 11,
|
||||
crate::kms::ConnectorType::HDMIB => 12,
|
||||
crate::kms::ConnectorType::TV => 13,
|
||||
crate::kms::ConnectorType::EDP => 14,
|
||||
crate::kms::ConnectorType::Virtual => 15,
|
||||
}
|
||||
}
|
||||
|
||||
fn mode_to_wire(mode: &ModeInfo) -> DrmModeWire {
|
||||
DrmModeWire {
|
||||
clock: mode.clock,
|
||||
hdisplay: mode.hdisplay,
|
||||
hsync_start: mode.hsync_start,
|
||||
hsync_end: mode.hsync_end,
|
||||
htotal: mode.htotal,
|
||||
hskew: mode.hskew,
|
||||
vdisplay: mode.vdisplay,
|
||||
vsync_start: mode.vsync_start,
|
||||
vsync_end: mode.vsync_end,
|
||||
vtotal: mode.vtotal,
|
||||
vscan: mode.vscan,
|
||||
vrefresh: mode.vrefresh,
|
||||
flags: mode.flags,
|
||||
type_: mode.type_,
|
||||
}
|
||||
}
|
||||
|
||||
fn wire_to_mode(w: &DrmModeWire) -> ModeInfo {
|
||||
ModeInfo {
|
||||
clock: w.clock,
|
||||
hdisplay: w.hdisplay,
|
||||
hsync_start: w.hsync_start,
|
||||
hsync_end: w.hsync_end,
|
||||
htotal: w.htotal,
|
||||
hskew: w.hskew,
|
||||
vdisplay: w.vdisplay,
|
||||
vsync_start: w.vsync_start,
|
||||
vsync_end: w.vsync_end,
|
||||
vtotal: w.vtotal,
|
||||
vscan: w.vscan,
|
||||
vrefresh: w.vrefresh,
|
||||
flags: w.flags,
|
||||
type_: w.type_,
|
||||
name: format!("{}x{}@{}", w.hdisplay, w.vdisplay, w.vrefresh),
|
||||
}
|
||||
}
|
||||
|
||||
fn encode_modes(modes: &[ModeInfo]) -> Vec<u8> {
|
||||
let mut out = Vec::new();
|
||||
for mode in modes {
|
||||
out.extend_from_slice(&bytes_of(&mode_to_wire(mode)));
|
||||
out.extend_from_slice(mode.name.as_bytes());
|
||||
out.push(0);
|
||||
}
|
||||
if out.is_empty() {
|
||||
out.push(0);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
fn bytes_of<T>(value: &T) -> Vec<u8> {
|
||||
let ptr = value as *const T as *const u8;
|
||||
let len = size_of::<T>();
|
||||
unsafe { std::slice::from_raw_parts(ptr, len) }.to_vec()
|
||||
}
|
||||
|
||||
fn read_u32(buf: &[u8], offset: usize) -> Result<u32> {
|
||||
let end = offset.saturating_add(size_of::<u32>());
|
||||
let bytes = buf.get(offset..end).ok_or_else(|| Error::new(EINVAL))?;
|
||||
let array: [u8; 4] = bytes.try_into().map_err(|_| Error::new(EINVAL))?;
|
||||
Ok(u32::from_le_bytes(array))
|
||||
}
|
||||
|
||||
fn decode_wire<T: Copy>(buf: &[u8]) -> Result<T> {
|
||||
if buf.len() < size_of::<T>() {
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
let ptr = buf.as_ptr() as *const T;
|
||||
Ok(unsafe { ptr.read_unaligned() })
|
||||
}
|
||||
|
||||
fn driver_to_syscall(error: crate::driver::DriverError) -> Error {
|
||||
warn!("redox-drm: driver error: {}", error);
|
||||
Error::new(EINVAL)
|
||||
}
|
||||
Reference in New Issue
Block a user