Red Bear OS — microkernel OS in Rust, based on Redox

Derivative of Redox OS (https://www.redox-os.org) adding:
- AMD GPU driver (amdgpu) via LinuxKPI compat layer
- ext4 filesystem support (ext4d scheme daemon)
- ACPI fixes for AMD bare metal (x2APIC, DMAR, IVRS, MCFG)
- Custom branding (hostname, os-release, boot identity)

Build system is full upstream Redox with RBOS overlay in local/.
Patches for kernel, base, and relibc are symlinked from local/patches/
and protected from make clean/distclean. Custom recipes live in
local/recipes/ with symlinks into the recipes/ search path.

Build:  make all CONFIG_NAME=redbear-full
Sync:   ./local/scripts/sync-upstream.sh
This commit is contained in:
2026-04-12 19:05:00 +01:00
commit 50b731f1b7
3392 changed files with 98327 additions and 0 deletions
@@ -0,0 +1,201 @@
use std::collections::BTreeMap;
use log::{debug, warn};
use crate::driver::{DriverError, Result};
use crate::gem::GemHandle;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct DmabufInfo {
pub phys_addr: usize,
pub size: u64,
pub gem_handle: GemHandle,
}
#[derive(Clone, Debug)]
struct DmabufEntry {
#[allow(dead_code)]
info: DmabufInfo,
#[allow(dead_code)]
scheme_path: String,
#[allow(dead_code)]
refcount: usize,
}
pub struct DmabufManager {
#[allow(dead_code)]
next_fd: i32,
#[allow(dead_code)]
exported: BTreeMap<i32, GemHandle>,
#[allow(dead_code)]
entries: BTreeMap<GemHandle, DmabufEntry>,
}
impl DmabufManager {
pub fn new() -> Self {
Self {
next_fd: 10_000,
exported: BTreeMap::new(),
entries: BTreeMap::new(),
}
}
#[allow(dead_code)]
pub fn export(&mut self, handle: GemHandle) -> Result<i32> {
self.export_with_info(handle, 0, 0)
}
#[allow(dead_code)]
pub fn export_with_info(
&mut self,
handle: GemHandle,
phys_addr: usize,
size: u64,
) -> Result<i32> {
if handle == 0 {
return Err(DriverError::InvalidArgument(
"DMA-BUF export requires a non-zero GEM handle",
));
}
let fd = self.allocate_fd()?;
let scheme_path = Self::scheme_path(handle);
if let Some(entry) = self.entries.get_mut(&handle) {
entry.info.phys_addr = Self::merge_phys_addr(entry.info.phys_addr, phys_addr)?;
entry.info.size = Self::merge_size(entry.info.size, size)?;
entry.refcount = entry.refcount.checked_add(1).ok_or_else(|| {
DriverError::Buffer(format!(
"DMA-BUF refcount overflow for GEM handle {}",
handle
))
})?;
debug!(
"redox-drm: dup() DMA-BUF export fd {} -> {} (GEM handle {}, refs={})",
entry.scheme_path, fd, handle, entry.refcount
);
} else {
self.entries.insert(
handle,
DmabufEntry {
info: DmabufInfo {
phys_addr,
size,
gem_handle: handle,
},
scheme_path: scheme_path.clone(),
refcount: 1,
},
);
warn!(
"redox-drm: exported DMA-BUF {} as synthetic fd {} for GEM handle {} \
(phys={:#x}, size={})",
scheme_path, fd, handle, phys_addr, size
);
}
self.exported.insert(fd, handle);
Ok(fd)
}
pub fn import(&self, fd: i32) -> Result<GemHandle> {
let info = self
.lookup(fd)
.ok_or_else(|| DriverError::NotFound(format!("unknown synthetic dma-buf fd {fd}")))?;
debug!(
"redox-drm: imported DMA-BUF fd {} -> GEM handle {} (phys={:#x}, size={})",
fd, info.gem_handle, info.phys_addr, info.size
);
Ok(info.gem_handle)
}
pub fn close(&mut self, fd: i32) -> Result<()> {
let handle = self
.exported
.remove(&fd)
.ok_or_else(|| DriverError::NotFound(format!("unknown synthetic dma-buf fd {fd}")))?;
let remove_entry = {
let entry = self.entries.get_mut(&handle).ok_or_else(|| {
DriverError::NotFound(format!(
"DMA-BUF bookkeeping missing for GEM handle {}",
handle
))
})?;
if entry.refcount == 0 {
return Err(DriverError::Buffer(format!(
"DMA-BUF refcount underflow for GEM handle {}",
handle
)));
}
entry.refcount -= 1;
debug!(
"redox-drm: closed DMA-BUF fd {} for {} (GEM handle {}, refs={})",
fd, entry.scheme_path, handle, entry.refcount
);
entry.refcount == 0
};
if remove_entry {
let _ = self.entries.remove(&handle);
warn!(
"redox-drm: released final DMA-BUF export for GEM handle {}",
handle
);
}
Ok(())
}
pub fn lookup(&self, fd: i32) -> Option<DmabufInfo> {
let handle = self.exported.get(&fd)?;
self.entries.get(handle).map(|entry| entry.info)
}
pub fn dup(&mut self, fd: i32) -> Result<i32> {
let info = self
.lookup(fd)
.ok_or_else(|| DriverError::NotFound(format!("unknown synthetic dma-buf fd {fd}")))?;
self.export_with_info(info.gem_handle, info.phys_addr, info.size)
}
fn allocate_fd(&mut self) -> Result<i32> {
let fd = self.next_fd;
self.next_fd = self.next_fd.checked_add(1).ok_or_else(|| {
DriverError::Buffer("synthetic DMA-BUF fd space exhausted".to_string())
})?;
Ok(fd)
}
fn scheme_path(handle: GemHandle) -> String {
format!("drm:card0/dmabuf/{handle}")
}
fn merge_phys_addr(current: usize, incoming: usize) -> Result<usize> {
if current == 0 || incoming == 0 || current == incoming {
return Ok(current.max(incoming));
}
Err(DriverError::Buffer(format!(
"conflicting DMA-BUF physical addresses: existing={:#x}, incoming={:#x}",
current, incoming
)))
}
fn merge_size(current: u64, incoming: u64) -> Result<u64> {
if current == 0 || incoming == 0 || current == incoming {
return Ok(current.max(incoming));
}
Err(DriverError::Buffer(format!(
"conflicting DMA-BUF sizes: existing={}, incoming={}",
current, incoming
)))
}
}
@@ -0,0 +1,67 @@
use thiserror::Error;
use crate::gem::GemHandle;
use crate::kms::{ConnectorInfo, ModeInfo};
pub type Result<T> = std::result::Result<T, DriverError>;
#[derive(Debug, Error)]
pub enum DriverError {
#[error("driver initialization failed: {0}")]
Initialization(String),
#[error("invalid argument: {0}")]
InvalidArgument(&'static str),
#[error("resource not found: {0}")]
NotFound(String),
#[allow(dead_code)]
#[error("operation not supported: {0}")]
Unsupported(&'static str),
#[error("MMIO failure: {0}")]
Mmio(String),
#[error("PCI failure: {0}")]
Pci(String),
#[error("buffer failure: {0}")]
Buffer(String),
#[error("I/O failure: {0}")]
Io(String),
}
pub trait GpuDriver: Send + Sync {
fn driver_name(&self) -> &str;
fn driver_desc(&self) -> &str;
#[allow(dead_code)]
fn driver_date(&self) -> &str;
fn detect_connectors(&self) -> Vec<ConnectorInfo>;
fn get_modes(&self, connector_id: u32) -> Vec<ModeInfo>;
fn set_crtc(
&self,
crtc_id: u32,
fb_handle: u32,
connectors: &[u32],
mode: &ModeInfo,
) -> Result<()>;
fn page_flip(&self, crtc_id: u32, fb_handle: u32, flags: u32) -> Result<u64>;
#[allow(dead_code)]
fn get_vblank(&self, crtc_id: u32) -> Result<u64>;
fn gem_create(&self, size: u64) -> Result<GemHandle>;
fn gem_close(&self, handle: GemHandle) -> Result<()>;
fn gem_mmap(&self, handle: GemHandle) -> Result<usize>;
fn gem_size(&self, handle: GemHandle) -> Result<u64>;
#[allow(dead_code)]
fn gem_export_dmafd(&self, handle: GemHandle) -> Result<i32>;
#[allow(dead_code)]
fn gem_import_dmafd(&self, fd: i32) -> Result<GemHandle>;
#[allow(dead_code)]
fn get_edid(&self, connector_id: u32) -> Vec<u8>;
fn handle_irq(&self) -> Result<Option<(u32, u64)>>;
}
@@ -0,0 +1,516 @@
use log::{info, warn};
use std::ptr;
#[cfg(no_amdgpu_c)]
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use std::time::Duration;
use crate::driver::{DriverError, Result};
use crate::kms::connector::synthetic_edid;
use crate::kms::{ConnectorInfo, ConnectorStatus, ConnectorType, ModeInfo};
#[repr(C)]
pub struct ConnectorInfoFFI {
pub id: i32,
pub connector_type: i32,
pub connector_type_id: i32,
pub connection: i32,
pub mm_width: i32,
pub mm_height: i32,
pub encoder_id: i32,
}
#[cfg(not(no_amdgpu_c))]
unsafe extern "C" {
/// Full hardware initialization: sets MMIO base, FB aperture, PCI device.
/// Must be called before any other DC function — the C side depends on
/// globals populated here (g_mmio_base, g_fb_phys, etc.).
#[link_name = "amdgpu_redox_init"]
fn ffi_amdgpu_redox_init(
mmio_base: *const u8,
mmio_size: usize,
fb_phys: u64,
fb_size: usize,
) -> i32;
#[link_name = "amdgpu_dc_detect_connectors"]
fn ffi_amdgpu_dc_detect_connectors() -> i32;
#[link_name = "amdgpu_dc_get_connector_info"]
fn ffi_amdgpu_dc_get_connector_info(idx: i32, info: *mut ConnectorInfoFFI) -> i32;
#[link_name = "amdgpu_dc_set_crtc"]
fn ffi_amdgpu_dc_set_crtc(crtc_id: i32, fb_addr: u64, width: u32, height: u32) -> i32;
/// Releases global state in the C layer.
#[link_name = "amdgpu_redox_cleanup"]
fn ffi_amdgpu_redox_cleanup();
}
#[cfg(no_amdgpu_c)]
static FALLBACK_MMIO_BASE: AtomicUsize = AtomicUsize::new(0);
#[cfg(no_amdgpu_c)]
static FALLBACK_MMIO_SIZE: AtomicUsize = AtomicUsize::new(0);
#[cfg(no_amdgpu_c)]
const FALLBACK_ENOENT: i32 = 2;
#[cfg(no_amdgpu_c)]
fn amdgpu_dc_init(mmio_base: *const u8, mmio_size: usize) -> i32 {
FALLBACK_MMIO_BASE.store(mmio_base as usize, Ordering::Relaxed);
FALLBACK_MMIO_SIZE.store(mmio_size, Ordering::Relaxed);
0
}
#[cfg(no_amdgpu_c)]
fn amdgpu_dc_init_with_fb(
mmio_base: *const u8,
mmio_size: usize,
_fb_phys: u64,
_fb_size: usize,
) -> i32 {
FALLBACK_MMIO_BASE.store(mmio_base as usize, Ordering::Relaxed);
FALLBACK_MMIO_SIZE.store(mmio_size, Ordering::Relaxed);
0
}
#[cfg(no_amdgpu_c)]
fn amdgpu_dc_detect_connectors() -> i32 {
warn!("redox-drm: compiled without AMD C backend (no_amdgpu_c); no real connector detection available");
0
}
#[cfg(no_amdgpu_c)]
fn amdgpu_dc_get_connector_info(_idx: i32, _info: *mut ConnectorInfoFFI) -> i32 {
-FALLBACK_ENOENT
}
#[cfg(no_amdgpu_c)]
fn amdgpu_dc_set_crtc(_crtc_id: i32, _fb_addr: u64, _width: u32, _height: u32) -> i32 {
0
}
#[cfg(no_amdgpu_c)]
fn amdgpu_dc_cleanup() {
FALLBACK_MMIO_BASE.store(0, Ordering::Relaxed);
FALLBACK_MMIO_SIZE.store(0, Ordering::Relaxed);
}
#[cfg(not(no_amdgpu_c))]
fn amdgpu_dc_init(mmio_base: *const u8, mmio_size: usize) -> i32 {
unsafe { ffi_amdgpu_redox_init(mmio_base, mmio_size, 0, 0) }
}
#[cfg(not(no_amdgpu_c))]
fn amdgpu_dc_init_with_fb(
mmio_base: *const u8,
mmio_size: usize,
fb_phys: u64,
fb_size: usize,
) -> i32 {
unsafe { ffi_amdgpu_redox_init(mmio_base, mmio_size, fb_phys, fb_size) }
}
#[cfg(not(no_amdgpu_c))]
fn amdgpu_dc_detect_connectors() -> i32 {
unsafe { ffi_amdgpu_dc_detect_connectors() }
}
#[cfg(not(no_amdgpu_c))]
fn amdgpu_dc_get_connector_info(idx: i32, info: *mut ConnectorInfoFFI) -> i32 {
unsafe { ffi_amdgpu_dc_get_connector_info(idx, info) }
}
#[cfg(not(no_amdgpu_c))]
fn amdgpu_dc_set_crtc(crtc_id: i32, fb_addr: u64, width: u32, height: u32) -> i32 {
unsafe { ffi_amdgpu_dc_set_crtc(crtc_id, fb_addr, width, height) }
}
#[cfg(not(no_amdgpu_c))]
fn amdgpu_dc_cleanup() {
unsafe { ffi_amdgpu_redox_cleanup() }
}
pub struct DisplayCore {
initialized: bool,
mmio_base: usize,
mmio_size: usize,
fb_phys: u64,
fb_size: usize,
}
impl DisplayCore {
pub fn new(mmio_base: *const u8, mmio_size: usize) -> Result<Self> {
Self::with_framebuffer(mmio_base, mmio_size, 0, 0)
}
pub fn with_framebuffer(
mmio_base: *const u8,
mmio_size: usize,
fb_phys: u64,
fb_size: usize,
) -> Result<Self> {
let rc = if fb_phys != 0 && fb_size != 0 {
amdgpu_dc_init_with_fb(mmio_base, mmio_size, fb_phys, fb_size)
} else {
amdgpu_dc_init(mmio_base, mmio_size)
};
if rc < 0 {
return Err(DriverError::Initialization(format!(
"amdgpu display init failed with status {}",
rc
)));
}
info!(
"redox-drm: AMD DC initialized with {} bytes of MMIO, fb_phys={:#x}, fb_size={}",
mmio_size, fb_phys, fb_size
);
Ok(Self {
initialized: true,
mmio_base: mmio_base as usize,
mmio_size,
fb_phys,
fb_size,
})
}
pub fn fb_phys(&self) -> u64 {
self.fb_phys
}
pub fn fb_size(&self) -> usize {
self.fb_size
}
pub fn detect_connectors(&self) -> Result<Vec<ConnectorInfo>> {
if !self.initialized {
return Err(DriverError::Initialization(
"display core not initialized".to_string(),
));
}
let count = amdgpu_dc_detect_connectors();
if count < 0 {
return Err(DriverError::Mmio(format!(
"AMD DC connector detection failed with status {}",
count
)));
}
if count == 0 {
warn!("redox-drm: AMD DC reported 0 connected displays");
return Ok(Vec::new());
}
let mut connectors = Vec::new();
for idx in 0..count {
let mut raw = ConnectorInfoFFI {
id: 0,
connector_type: 0,
connector_type_id: 0,
connection: 2,
mm_width: 0,
mm_height: 0,
encoder_id: 0,
};
let rc = amdgpu_dc_get_connector_info(idx, &mut raw as *mut ConnectorInfoFFI);
if rc < 0 {
warn!(
"redox-drm: failed to fetch connector {} from AMD DC (status {})",
idx, rc
);
continue;
}
connectors.push(ConnectorInfo {
id: raw.id.max(0) as u32,
connector_type: map_connector_type(raw.connector_type),
connector_type_id: raw.connector_type_id.max(0) as u32,
connection: map_connection_status(raw.connection),
mm_width: raw.mm_width.max(0) as u32,
mm_height: raw.mm_height.max(0) as u32,
encoder_id: raw.encoder_id.max(0) as u32,
modes: self.modes_for_connector(idx as u32),
});
}
Ok(connectors)
}
pub fn set_crtc(&self, crtc_id: u32, fb_addr: u64, width: u32, height: u32) -> Result<()> {
if !self.initialized {
return Err(DriverError::Initialization(
"display core must be initialized before modesetting".to_string(),
));
}
let rc = amdgpu_dc_set_crtc(crtc_id as i32, fb_addr, width, height);
if rc < 0 {
return Err(DriverError::Mmio(format!(
"amdgpu_dc_set_crtc failed for CRTC {} with status {}",
crtc_id, rc
)));
}
Ok(())
}
pub fn flip_surface(&self, crtc_id: u32, fb_addr: u64) -> Result<()> {
if !self.initialized {
return Err(DriverError::Initialization(
"display core must be initialized before page flip".to_string(),
));
}
const HUBP_FLIP_ADDR_LOW: usize = 0x5800;
const HUBP_FLIP_ADDR_HIGH: usize = 0x5804;
let hubp_base = HUBP_FLIP_ADDR_LOW + (crtc_id as usize) * 0x400;
let hubp_high = HUBP_FLIP_ADDR_HIGH + (crtc_id as usize) * 0x400;
self.write_reg(hubp_high, (fb_addr >> 32) as u32)?;
self.write_reg(hubp_base, fb_addr as u32)?;
let flip_control = 0x5834 + (crtc_id as usize) * 0x400;
self.write_reg(flip_control, 1)?;
Ok(())
}
pub fn read_edid(&self, connector_index: u32) -> Vec<u8> {
if !self.initialized {
return Vec::new();
}
match self.read_edid_block(connector_index, 0x00) {
Ok(edid) if edid.len() >= 128 => edid,
Ok(_) | Err(_) => Vec::new(),
}
}
fn modes_for_connector(&self, connector_index: u32) -> Vec<ModeInfo> {
let real_edid = self.read_edid(connector_index);
let mut modes = ModeInfo::from_edid(&real_edid);
if modes.is_empty() {
modes = ModeInfo::from_edid(&synthetic_edid());
}
if modes.is_empty() {
modes.push(ModeInfo::default_1080p());
}
modes
}
fn read_edid_block(&self, connector_index: u32, offset: u8) -> Result<Vec<u8>> {
const MM_DC_I2C_CONTROL: usize = 0x1e98;
const MM_DC_I2C_ARBITRATION: usize = 0x1e99;
const MM_DC_I2C_SW_STATUS: usize = 0x1e9b;
const MM_DC_I2C_DDC1_SPEED: usize = 0x1ea2;
const MM_DC_I2C_DDC1_SETUP: usize = 0x1ea3;
const MM_DC_I2C_TRANSACTION0: usize = 0x1eae;
const MM_DC_I2C_TRANSACTION1: usize = 0x1eaf;
const MM_DC_I2C_DATA: usize = 0x1eb2;
const CONTROL_GO: u32 = 0x0000_0001;
const CONTROL_SOFT_RESET: u32 = 0x0000_0002;
const CONTROL_SW_STATUS_RESET: u32 = 0x0000_0008;
const CONTROL_DDC_SELECT_MASK: u32 = 0x0000_0700;
const CONTROL_DDC_SELECT_SHIFT: u32 = 8;
const CONTROL_TRANSACTION_COUNT_MASK: u32 = 0x0030_0000;
const CONTROL_TRANSACTION_COUNT_SHIFT: u32 = 20;
const ARBITRATION_STATUS_MASK: u32 = 0x0000_000c;
const ARBITRATION_STATUS_SHIFT: u32 = 2;
const ARBITRATION_REQ: u32 = 0x0010_0000;
const ARBITRATION_DONE: u32 = 0x0020_0000;
const SW_STATUS_DONE: u32 = 0x0000_0004;
const SW_STATUS_ABORTED: u32 = 0x0000_0010;
const SW_STATUS_TIMEOUT: u32 = 0x0000_0020;
const SW_STATUS_NACK: u32 = 0x0000_0100;
const SETUP_ENABLE: u32 = 0x0000_0040;
const SETUP_SEND_RESET_LENGTH: u32 = 0x0000_0004;
const SETUP_TIME_LIMIT_SHIFT: u32 = 24;
const SPEED_THRESHOLD: u32 = 0x0000_0002;
const SPEED_PRESCALE_SHIFT: u32 = 16;
const SPEED_START_STOP_TIMING: u32 = 0x0000_0200;
const TX_RW: u32 = 0x0000_0001;
const TX_STOP_ON_NACK: u32 = 0x0000_0100;
const TX_START: u32 = 0x0000_1000;
const TX_STOP: u32 = 0x0000_2000;
const TX_COUNT_SHIFT: u32 = 16;
const DATA_RW: u32 = 0x0000_0001;
const DATA_VALUE_SHIFT: u32 = 8;
const DATA_VALUE_MASK: u32 = 0x0000_ff00;
const DATA_INDEX_SHIFT: u32 = 16;
const DATA_INDEX_WRITE: u32 = 0x8000_0000;
const EDID_WRITE_ADDR: u8 = 0xa0;
const EDID_READ_ADDR: u8 = 0xa1;
const EDID_BLOCK_SIZE: usize = 128;
const I2C_STATUS_IDLE: u32 = 0;
const I2C_STATUS_USED_BY_SW: u32 = 1;
const I2C_WAIT_RETRIES: usize = 200;
self.ensure_mmio_reg(MM_DC_I2C_DATA)?;
self.ensure_mmio_reg(MM_DC_I2C_TRANSACTION1)?;
let connector_select = connector_index & 0x7;
let arbitration = self.read_reg(MM_DC_I2C_ARBITRATION)?;
let status = (arbitration & ARBITRATION_STATUS_MASK) >> ARBITRATION_STATUS_SHIFT;
if status == I2C_STATUS_IDLE {
self.write_reg(MM_DC_I2C_ARBITRATION, arbitration | ARBITRATION_REQ)?;
} else if status != I2C_STATUS_USED_BY_SW {
return Err(DriverError::Mmio(format!(
"AMD I2C engine unavailable for connector {} (status {})",
connector_index, status
)));
}
let control = self.read_reg(MM_DC_I2C_CONTROL)?;
self.write_reg(
MM_DC_I2C_CONTROL,
(control
& !(CONTROL_SOFT_RESET | CONTROL_DDC_SELECT_MASK | CONTROL_TRANSACTION_COUNT_MASK))
| CONTROL_SW_STATUS_RESET
| (connector_select << CONTROL_DDC_SELECT_SHIFT),
)?;
self.write_reg(
MM_DC_I2C_DDC1_SETUP,
SETUP_ENABLE | SETUP_SEND_RESET_LENGTH | (3 << SETUP_TIME_LIMIT_SHIFT),
)?;
self.write_reg(
MM_DC_I2C_DDC1_SPEED,
SPEED_THRESHOLD | SPEED_START_STOP_TIMING | (40 << SPEED_PRESCALE_SHIFT),
)?;
self.write_reg(
MM_DC_I2C_TRANSACTION0,
TX_START | TX_STOP_ON_NACK | (1 << TX_COUNT_SHIFT),
)?;
self.write_reg(
MM_DC_I2C_TRANSACTION1,
TX_RW
| TX_START
| TX_STOP
| TX_STOP_ON_NACK
| ((EDID_BLOCK_SIZE as u32) << TX_COUNT_SHIFT),
)?;
self.write_reg(
MM_DC_I2C_DATA,
((EDID_WRITE_ADDR as u32) << DATA_VALUE_SHIFT) | DATA_INDEX_WRITE,
)?;
self.write_reg(MM_DC_I2C_DATA, (offset as u32) << DATA_VALUE_SHIFT)?;
self.write_reg(MM_DC_I2C_DATA, (EDID_READ_ADDR as u32) << DATA_VALUE_SHIFT)?;
let control = self.read_reg(MM_DC_I2C_CONTROL)?;
self.write_reg(
MM_DC_I2C_CONTROL,
(control & !CONTROL_TRANSACTION_COUNT_MASK)
| (1 << CONTROL_TRANSACTION_COUNT_SHIFT)
| CONTROL_GO,
)?;
let mut final_status = 0;
for _ in 0..I2C_WAIT_RETRIES {
final_status = self.read_reg(MM_DC_I2C_SW_STATUS)?;
if (final_status
& (SW_STATUS_DONE | SW_STATUS_ABORTED | SW_STATUS_TIMEOUT | SW_STATUS_NACK))
!= 0
{
break;
}
thread::sleep(Duration::from_millis(1));
}
self.write_reg(MM_DC_I2C_ARBITRATION, ARBITRATION_DONE)?;
if (final_status & SW_STATUS_DONE) == 0 {
return Err(DriverError::Mmio(format!(
"AMD I2C EDID read did not complete for connector {} (status {:#x})",
connector_index, final_status
)));
}
if (final_status & (SW_STATUS_ABORTED | SW_STATUS_TIMEOUT | SW_STATUS_NACK)) != 0 {
return Err(DriverError::Mmio(format!(
"AMD I2C EDID read failed for connector {} (status {:#x})",
connector_index, final_status
)));
}
self.write_reg(
MM_DC_I2C_DATA,
DATA_RW | DATA_INDEX_WRITE | ((2_u32) << DATA_INDEX_SHIFT),
)?;
let mut edid = Vec::with_capacity(EDID_BLOCK_SIZE);
for _ in 0..EDID_BLOCK_SIZE {
let value = self.read_reg(MM_DC_I2C_DATA)?;
edid.push(((value & DATA_VALUE_MASK) >> DATA_VALUE_SHIFT) as u8);
}
Ok(edid)
}
fn ensure_mmio_reg(&self, reg: usize) -> Result<()> {
let offset = reg.checked_mul(4).ok_or_else(|| {
DriverError::Mmio(format!("AMD register offset overflow for {reg:#x}"))
})?;
if offset + 4 > self.mmio_size {
return Err(DriverError::Mmio(format!(
"AMD register {reg:#x} outside MMIO aperture {:#x}",
self.mmio_size
)));
}
Ok(())
}
fn read_reg(&self, reg: usize) -> Result<u32> {
self.ensure_mmio_reg(reg)?;
let offset = reg * 4;
let ptr = (self.mmio_base + offset) as *const u32;
let value = unsafe { ptr::read_volatile(ptr) };
Ok(u32::from_le(value))
}
fn write_reg(&self, reg: usize, value: u32) -> Result<()> {
self.ensure_mmio_reg(reg)?;
let offset = reg * 4;
let ptr = (self.mmio_base + offset) as *mut u32;
unsafe { ptr::write_volatile(ptr, value.to_le()) };
Ok(())
}
}
impl Drop for DisplayCore {
fn drop(&mut self) {
if self.initialized {
amdgpu_dc_cleanup();
}
}
}
fn map_connector_type(value: i32) -> ConnectorType {
match value {
1 => ConnectorType::VGA,
2 => ConnectorType::DVII,
3 => ConnectorType::DVID,
4 => ConnectorType::DVIA,
10 => ConnectorType::DisplayPort,
11 => ConnectorType::HDMIA,
14 => ConnectorType::EDP,
15 => ConnectorType::Virtual,
_ => ConnectorType::Unknown,
}
}
fn map_connection_status(value: i32) -> ConnectorStatus {
match value {
1 => ConnectorStatus::Connected,
2 => ConnectorStatus::Disconnected,
_ => ConnectorStatus::Unknown,
}
}
@@ -0,0 +1,318 @@
use std::collections::BTreeMap;
use log::{info, warn};
use redox_driver_sys::dma::DmaBuffer;
use redox_driver_sys::memory::MmioRegion;
use crate::driver::{DriverError, Result};
const GPU_PAGE_SIZE: u64 = 4096;
const PAGE_TABLE_LEVELS: usize = 4;
const PTE_COUNT: usize = 512;
const PT_BYTES: usize = PTE_COUNT * 8;
const PTE_INDEX_MASK: u64 = 0x1ff;
const PAGE_OFFSET_MASK: u64 = GPU_PAGE_SIZE - 1;
const AMD_PTE_VALID: u64 = 1 << 0;
const AMD_PTE_SYSTEM: u64 = 1 << 1;
const AMD_PTE_FLAG_MASK: u64 = 0x0fff;
const AMD_PTE_ADDR_MASK: u64 = 0x000f_ffff_ffff_f000;
const GTT_MIN_VA_SIZE: u64 = 256 * 1024 * 1024;
const TLB_POLL_LIMIT: usize = 10_000;
// GC 11.0 (RDNA2) VM register offsets (DWORD index * 4 = byte offset)
const MM_VM_CONTEXT0_CNTL: usize = 0x1688 * 4;
const MM_VM_CONTEXT0_PT_BASE_LO32: usize = 0x16f3 * 4;
const MM_VM_CONTEXT0_PT_BASE_HI32: usize = 0x16f4 * 4;
const MM_VM_CONTEXT0_PT_START_LO32: usize = 0x1713 * 4;
const MM_VM_CONTEXT0_PT_START_HI32: usize = 0x1714 * 4;
const MM_VM_CONTEXT0_PT_END_LO32: usize = 0x1733 * 4;
const MM_VM_CONTEXT0_PT_END_HI32: usize = 0x1734 * 4;
const MMVM_INVALIDATE_ENG0_REQ: usize = 0x16ab * 4;
const MMVM_INVALIDATE_ENG0_ACK: usize = 0x16bd * 4;
struct PageTable {
dma: DmaBuffer,
children: BTreeMap<usize, Box<PageTable>>,
}
impl PageTable {
fn allocate() -> Result<Self> {
let dma = DmaBuffer::allocate(PT_BYTES, 4096)
.map_err(|e| DriverError::Buffer(format!("GTT page table alloc failed: {e}")))?;
if !dma.is_physically_contiguous() {
warn!("redox-drm: GTT page table not guaranteed physically contiguous");
}
Ok(Self {
dma,
children: BTreeMap::new(),
})
}
fn phys(&self) -> u64 {
self.dma.physical_address() as u64
}
fn entries(&self) -> &[u64] {
unsafe { std::slice::from_raw_parts(self.dma.as_ptr() as *const u64, PTE_COUNT) }
}
fn entries_mut(&mut self) -> &mut [u64] {
unsafe { std::slice::from_raw_parts_mut(self.dma.as_mut_ptr() as *mut u64, PTE_COUNT) }
}
fn map_page(&mut self, level: usize, gpu_addr: u64, phys_addr: u64, flags: u64) -> Result<()> {
let idx = pt_index(gpu_addr, level)?;
if level == PAGE_TABLE_LEVELS - 1 {
self.entries_mut()[idx] = encode_pte(phys_addr, flags);
return Ok(());
}
let child = match self.children.get_mut(&idx) {
Some(c) => c,
None => {
let c = Box::new(PageTable::allocate()?);
let c_phys = c.phys();
self.entries_mut()[idx] =
(c_phys & AMD_PTE_ADDR_MASK) | AMD_PTE_VALID | AMD_PTE_SYSTEM;
self.children.entry(idx).or_insert(c)
}
};
child.map_page(level + 1, gpu_addr, phys_addr, flags)
}
fn unmap_page(&mut self, level: usize, gpu_addr: u64) -> Result<()> {
let idx = pt_index(gpu_addr, level)?;
if level == PAGE_TABLE_LEVELS - 1 {
self.entries_mut()[idx] = 0;
return Ok(());
}
if let Some(child) = self.children.get_mut(&idx) {
child.unmap_page(level + 1, gpu_addr)?;
}
Ok(())
}
fn translate(&self, level: usize, gpu_addr: u64) -> Option<u64> {
let idx = pt_index(gpu_addr, level).ok()?;
let entry = self.entries()[idx];
if entry & AMD_PTE_VALID == 0 {
return None;
}
if level == PAGE_TABLE_LEVELS - 1 {
return Some((entry & AMD_PTE_ADDR_MASK) | (gpu_addr & PAGE_OFFSET_MASK));
}
self.children.get(&idx)?.translate(level + 1, gpu_addr)
}
}
pub struct GttManager {
initialized: bool,
root: Option<PageTable>,
va_start: u64,
va_end: u64,
fb_offset: u64,
next_alloc: u64,
free_list: Vec<(u64, u64)>,
}
impl Default for GttManager {
fn default() -> Self {
Self::new()
}
}
impl GttManager {
pub fn new() -> Self {
Self {
initialized: false,
root: None,
va_start: 0,
va_end: GTT_MIN_VA_SIZE - 1,
fb_offset: 0,
next_alloc: 0,
free_list: Vec::new(),
}
}
pub fn initialize(&mut self) -> Result<()> {
if self.root.is_none() {
self.root = Some(PageTable::allocate()?);
}
self.fb_offset = 0;
self.va_start = self.fb_offset;
self.va_end = self
.va_start
.checked_add(GTT_MIN_VA_SIZE)
.ok_or_else(|| DriverError::Initialization("GTT VA range overflow".into()))?;
self.next_alloc = self.va_start;
self.initialized = true;
info!(
"redox-drm: AMD GTT initialized va={:#x}..{:#x} root_pt={:#x}",
self.va_start,
self.va_end,
self.root.as_ref().map(|r| r.phys()).unwrap_or(0)
);
Ok(())
}
pub fn is_initialized(&self) -> bool {
self.initialized
}
pub fn alloc_gpu_range(&mut self, size: u64) -> Result<u64> {
self.ensure_init()?;
let aligned_size = (size + GPU_PAGE_SIZE - 1) & !(GPU_PAGE_SIZE - 1);
if let Some(idx) = self.free_list.iter().position(|&(_, s)| s >= aligned_size) {
let (start, free_size) = self.free_list.remove(idx);
let remainder = free_size - aligned_size;
if remainder > 0 {
self.free_list.push((start + aligned_size, remainder));
}
return Ok(start);
}
let gpu_addr = self.next_alloc;
let new_next = gpu_addr
.checked_add(aligned_size)
.ok_or_else(|| DriverError::Buffer("GTT VA allocation overflow".into()))?;
if new_next > self.va_end {
return Err(DriverError::Buffer(format!(
"GTT VA space exhausted: need {:#x}..{:#x}, have ..{:#x}",
gpu_addr, new_next, self.va_end
)));
}
self.next_alloc = new_next;
Ok(gpu_addr)
}
pub fn unmap_range(&mut self, gpu_start: u64, size: u64) -> Result<()> {
self.ensure_init()?;
let aligned_size = (size + GPU_PAGE_SIZE - 1) & !(GPU_PAGE_SIZE - 1);
let num_pages = (aligned_size / GPU_PAGE_SIZE) as usize;
for i in 0..num_pages {
let gpu_addr = gpu_start + (i as u64) * GPU_PAGE_SIZE;
self.root
.as_mut()
.ok_or_else(|| DriverError::Initialization("GTT root missing".into()))?
.unmap_page(0, gpu_addr)?;
}
Ok(())
}
pub fn release_range(&mut self, gpu_start: u64, size: u64) {
let aligned_size = (size + GPU_PAGE_SIZE - 1) & !(GPU_PAGE_SIZE - 1);
self.free_list.push((gpu_start, aligned_size));
}
pub fn map_page(&mut self, gpu_addr: u64, phys_addr: u64, flags: u64) -> Result<()> {
self.ensure_init()?;
if gpu_addr & PAGE_OFFSET_MASK != 0 {
return Err(DriverError::InvalidArgument("gpu_addr not page-aligned"));
}
if phys_addr & PAGE_OFFSET_MASK != 0 {
return Err(DriverError::InvalidArgument("phys_addr not page-aligned"));
}
if gpu_addr < self.va_start || gpu_addr > self.va_end {
return Err(DriverError::InvalidArgument(
"gpu_addr outside GTT aperture",
));
}
self.root
.as_mut()
.ok_or_else(|| DriverError::Initialization("GTT root missing".into()))?
.map_page(0, gpu_addr, phys_addr, flags)
}
pub fn unmap_page(&mut self, gpu_addr: u64) -> Result<()> {
self.ensure_init()?;
self.root
.as_mut()
.ok_or_else(|| DriverError::Initialization("GTT root missing".into()))?
.unmap_page(0, gpu_addr)
}
pub fn map_range(
&mut self,
gpu_start: u64,
phys_start: u64,
size: u64,
flags: u64,
) -> Result<()> {
self.ensure_init()?;
let aligned_size = (size + GPU_PAGE_SIZE - 1) & !(GPU_PAGE_SIZE - 1);
let num_pages = (aligned_size / GPU_PAGE_SIZE) as usize;
for i in 0..num_pages {
let gpu_addr = gpu_start + (i as u64) * GPU_PAGE_SIZE;
let phys_addr = phys_start + (i as u64) * GPU_PAGE_SIZE;
self.map_page(gpu_addr, phys_addr, flags)?;
}
Ok(())
}
pub fn flush_tlb(&self, mmio: &MmioRegion) -> Result<()> {
if !self.initialized {
return Err(DriverError::Initialization("GTT not initialized".into()));
}
let req =
(1u32 << 0) | (1u32 << 19) | (1u32 << 20) | (1u32 << 21) | (1u32 << 22) | (1u32 << 23);
mmio.write32(MMVM_INVALIDATE_ENG0_REQ, req);
for _ in 0..TLB_POLL_LIMIT {
let ack = mmio.read32(MMVM_INVALIDATE_ENG0_ACK);
if ack & (1u32 << 0) != 0 {
return Ok(());
}
}
Err(DriverError::Mmio("GTT TLB flush timeout".into()))
}
pub fn translate(&self, gpu_addr: u64) -> Option<u64> {
if !self.initialized || gpu_addr < self.va_start || gpu_addr > self.va_end {
return None;
}
self.root.as_ref()?.translate(0, gpu_addr)
}
pub fn program_vm_context(&self, mmio: &MmioRegion) -> Result<()> {
let root_phys = self
.root
.as_ref()
.map(|r| r.phys())
.ok_or_else(|| DriverError::Initialization("GTT root missing".into()))?;
mmio.write32(MM_VM_CONTEXT0_PT_BASE_LO32, root_phys as u32);
mmio.write32(MM_VM_CONTEXT0_PT_BASE_HI32, (root_phys >> 32) as u32);
let va_start_pages = self.va_start >> 12;
let va_end_pages = self.va_end >> 12;
mmio.write32(MM_VM_CONTEXT0_PT_START_LO32, va_start_pages as u32);
mmio.write32(MM_VM_CONTEXT0_PT_START_HI32, (va_start_pages >> 32) as u32);
mmio.write32(MM_VM_CONTEXT0_PT_END_LO32, va_end_pages as u32);
mmio.write32(MM_VM_CONTEXT0_PT_END_HI32, (va_end_pages >> 32) as u32);
// Enable VM context 0: depth=0 (4-level), block_size=0 (4KB pages)
mmio.write32(MM_VM_CONTEXT0_CNTL, 1);
self.flush_tlb(mmio)
}
fn ensure_init(&self) -> Result<()> {
if !self.initialized {
return Err(DriverError::Initialization(
"GTT manager not initialized".into(),
));
}
Ok(())
}
}
fn pt_index(gpu_addr: u64, level: usize) -> Result<usize> {
if level >= PAGE_TABLE_LEVELS {
return Err(DriverError::Initialization(format!(
"invalid PT level {level}"
)));
}
let shift = 12 + ((PAGE_TABLE_LEVELS - 1 - level) * 9);
Ok(((gpu_addr >> shift) & PTE_INDEX_MASK) as usize)
}
fn encode_pte(phys_addr: u64, flags: u64) -> u64 {
(phys_addr & AMD_PTE_ADDR_MASK) | (flags & AMD_PTE_FLAG_MASK) | AMD_PTE_VALID | AMD_PTE_SYSTEM
}
@@ -0,0 +1,612 @@
pub mod display;
pub mod gtt;
pub mod ring;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Mutex;
use log::{debug, info, warn};
use redox_driver_sys::irq::IrqHandle;
use redox_driver_sys::memory::MmioRegion;
use redox_driver_sys::pci::{PciBarInfo, PciDevice, PciDeviceInfo};
use crate::driver::{DriverError, GpuDriver, Result};
use crate::gem::{GemHandle, GemManager};
use crate::kms::connector::{synthetic_edid, Connector};
use crate::kms::crtc::Crtc;
use crate::kms::encoder::Encoder;
use crate::kms::{ConnectorInfo, ModeInfo};
use self::display::DisplayCore;
use self::gtt::GttManager;
use self::ring::RingManager;
const AMD_IH_RB_CNTL: usize = 0x0080;
const AMD_IH_RB_RPTR: usize = 0x0083;
const AMD_IH_RB_WPTR: usize = 0x0084;
const AMD_IH_CNTL: usize = 0x00c0;
const AMD_IH_STATUS: usize = 0x00c2;
const AMD_DCN_DISP_INTERRUPT_STATUS: [usize; 6] = [0x012a, 0x012b, 0x012c, 0x012d, 0x012e, 0x012f];
const AMD_DCN_HPD_INT_STATUS: [usize; 6] = [0x1f14, 0x1f1c, 0x1f24, 0x1f2c, 0x1f34, 0x1f3c];
const AMD_DCN_HPD_CONTROL: [usize; 6] = [0x1f16, 0x1f1e, 0x1f26, 0x1f2e, 0x1f36, 0x1f3e];
const AMD_DISP_INTERRUPT_VBLANK_MASK: u32 = 0x0000_0008;
const AMD_DISP_INTERRUPT_HPD_MASK: u32 = 0x0002_0000;
const AMD_HPD_INT_STATUS_MASK: u32 = 0x0000_0001;
const AMD_HPD_RX_INT_STATUS_MASK: u32 = 0x0000_0100;
const AMD_HPD_INT_ACK_MASK: u32 = 0x0000_0001;
const AMD_HPD_RX_INT_ACK_MASK: u32 = 0x0000_0100;
const AMD_IH_STATUS_INTERRUPT_PENDING_MASK: u32 = 0x0000_0001;
const AMD_IH_STATUS_RING_OVERFLOW_MASK: u32 = 0x0000_0002;
#[derive(Clone, Debug)]
pub enum IrqEvent {
Vblank { crtc_id: u32, count: u64 },
Hotplug { connector_id: u32 },
Unknown,
}
pub struct AmdDriver {
info: PciDeviceInfo,
mmio: MmioRegion,
irq_handle: Option<IrqHandle>,
display: DisplayCore,
gem: Mutex<GemManager>,
connectors: Mutex<Vec<Connector>>,
crtcs: Mutex<Vec<Crtc>>,
encoders: Mutex<Vec<Encoder>>,
gtt: Mutex<GttManager>,
ring: Mutex<RingManager>,
vblank_count: AtomicU64,
hotplug_pending: AtomicBool,
firmware: HashMap<String, Vec<u8>>,
}
impl AmdDriver {
pub fn new(info: PciDeviceInfo, firmware: HashMap<String, Vec<u8>>) -> Result<Self> {
let bar0 = find_memory_bar0(&info)?;
let bar2 = info.find_memory_bar(2).copied();
let mut device = PciDevice::open_location(&info.location)
.map_err(|e| DriverError::Pci(format!("failed to re-open PCI device: {e}")))?;
device
.enable_device()
.map_err(|e| DriverError::Pci(format!("enable_device failed: {e}")))?;
let mmio = device
.map_bar(bar0.index, bar0.addr, bar0.size as usize)
.map_err(|e| DriverError::Mmio(format!("map_bar failed: {e}")))?;
let pci_id = mmio.read32(0);
debug!(
"redox-drm: mapped AMD MMIO BAR0 addr={:#x} size={:#x} idreg={:#x}",
bar0.addr, bar0.size, pci_id
);
let (fb_phys, fb_size) = match &bar2 {
Some(bar) => {
debug!(
"redox-drm: AMD VRAM BAR2 addr={:#x} size={:#x}",
bar.addr, bar.size
);
(bar.addr, bar.size as usize)
}
None => {
return Err(DriverError::Pci(format!(
"AMD device {} has no VRAM BAR2 — cannot initialize display without framebuffer aperture",
info.location
)));
}
};
let irq_handle = match info.irq {
Some(irq) => Some(
IrqHandle::request(irq)
.map_err(|e| DriverError::Io(format!("failed to request IRQ {irq}: {e}")))?,
),
None => {
warn!(
"redox-drm: AMD device {} has no IRQ assigned",
info.location
);
None
}
};
let display = DisplayCore::with_framebuffer(mmio.as_ptr(), mmio.size(), fb_phys, fb_size)?;
let (connectors, encoders) = detect_display_topology(&display)?;
RingManager::bind_mmio(&mmio);
let mut gtt = GttManager::new();
gtt.initialize()?;
gtt.program_vm_context(&mmio)?;
let mut ring = RingManager::new();
ring.initialize()?;
let fw_count = firmware.len();
let dmcub_available = firmware.contains_key("amdgpu/dmcub_dcn31.bin")
|| firmware.contains_key("amdgpu/dcn_3_1_dmcub");
if !dmcub_available {
warn!("redox-drm: DMCUB firmware not found in cache — display core may fail to initialize");
}
info!(
"redox-drm: AMD driver ready for {} with {} connector(s), {} firmware blob(s) loaded",
info.location,
connectors.len(),
fw_count
);
Ok(Self {
info,
mmio,
irq_handle,
display,
gem: Mutex::new(GemManager::new()),
connectors: Mutex::new(connectors),
crtcs: Mutex::new(vec![Crtc::new(1)]),
encoders: Mutex::new(encoders),
gtt: Mutex::new(gtt),
ring: Mutex::new(ring),
vblank_count: AtomicU64::new(0),
hotplug_pending: AtomicBool::new(false),
firmware,
})
}
pub fn process_irq(&self) -> Result<IrqEvent> {
let ih_status = self.read_mmio_reg(AMD_IH_STATUS);
let ih_cntl = self.read_mmio_reg(AMD_IH_CNTL);
let ih_rptr = self.read_mmio_reg(AMD_IH_RB_RPTR);
let ih_wptr = self.read_mmio_reg(AMD_IH_RB_WPTR);
let ring_pending = ih_rptr != ih_wptr;
if ih_status & AMD_IH_STATUS_RING_OVERFLOW_MASK != 0 {
warn!(
"redox-drm: AMD IH overflow status={:#010x} cntl={:#010x}",
ih_status, ih_cntl
);
}
if let Some(connector_id) = self.detect_hotplug_interrupt() {
self.hotplug_pending.store(true, Ordering::SeqCst);
self.refresh_connectors()?;
self.hotplug_pending.store(false, Ordering::SeqCst);
self.acknowledge_ih(ih_wptr);
debug!(
"redox-drm: hotplug interrupt on connector {} status={:#010x} cntl={:#010x} rptr={:#010x} wptr={:#010x}",
connector_id, ih_status, ih_cntl, ih_rptr, ih_wptr
);
return Ok(IrqEvent::Hotplug { connector_id });
}
if ring_pending || (ih_status & AMD_IH_STATUS_INTERRUPT_PENDING_MASK != 0) {
if let Some(crtc_id) = self.detect_vblank_interrupt() {
let count = self.vblank_count.fetch_add(1, Ordering::SeqCst) + 1;
self.acknowledge_ih(ih_wptr);
debug!(
"redox-drm: vblank interrupt on CRTC {} count={} status={:#010x} cntl={:#010x} rptr={:#010x} wptr={:#010x}",
crtc_id, count, ih_status, ih_cntl, ih_rptr, ih_wptr
);
return Ok(IrqEvent::Vblank { crtc_id, count });
}
}
self.acknowledge_ih(ih_wptr);
Ok(IrqEvent::Unknown)
}
fn read_mmio_reg(&self, register_index: usize) -> u32 {
self.mmio.read32(register_index.saturating_mul(4))
}
fn write_mmio_reg(&self, register_index: usize, value: u32) {
self.mmio.write32(register_index.saturating_mul(4), value);
}
fn detect_vblank_interrupt(&self) -> Option<u32> {
let active_crtc_ids = self
.crtcs
.lock()
.map(|crtcs| {
crtcs
.iter()
.filter(|crtc| crtc.mode.is_some())
.map(|crtc| crtc.id)
.collect::<Vec<_>>()
})
.unwrap_or_else(|_| vec![1]);
for (index, register) in AMD_DCN_DISP_INTERRUPT_STATUS.iter().copied().enumerate() {
let status = self.read_mmio_reg(register);
if status & AMD_DISP_INTERRUPT_VBLANK_MASK == 0 {
continue;
}
let crtc_id = index as u32 + 1;
if active_crtc_ids.is_empty() || active_crtc_ids.contains(&crtc_id) {
return Some(crtc_id);
}
}
None
}
fn detect_hotplug_interrupt(&self) -> Option<u32> {
for (index, register) in AMD_DCN_HPD_INT_STATUS.iter().copied().enumerate() {
let status = self.read_mmio_reg(register);
if status & (AMD_HPD_INT_STATUS_MASK | AMD_HPD_RX_INT_STATUS_MASK) != 0 {
self.acknowledge_hotplug(index, status);
return Some(index as u32 + 1);
}
}
for (index, register) in AMD_DCN_DISP_INTERRUPT_STATUS.iter().copied().enumerate() {
let status = self.read_mmio_reg(register);
if status & AMD_DISP_INTERRUPT_HPD_MASK != 0 {
let hpd_status = self.read_mmio_reg(AMD_DCN_HPD_INT_STATUS[index]);
self.acknowledge_hotplug(index, hpd_status);
return Some(index as u32 + 1);
}
}
None
}
fn acknowledge_hotplug(&self, hpd_index: usize, hpd_status: u32) {
let control_register = AMD_DCN_HPD_CONTROL[hpd_index];
let control = self.read_mmio_reg(control_register);
let ack = control
| if hpd_status & AMD_HPD_INT_STATUS_MASK != 0 {
AMD_HPD_INT_ACK_MASK
} else {
0
}
| if hpd_status & AMD_HPD_RX_INT_STATUS_MASK != 0 {
AMD_HPD_RX_INT_ACK_MASK
} else {
0
};
self.write_mmio_reg(control_register, ack);
}
fn acknowledge_ih(&self, ih_wptr: u32) {
self.write_mmio_reg(AMD_IH_RB_RPTR, ih_wptr);
let ih_cntl = self.read_mmio_reg(AMD_IH_CNTL);
self.write_mmio_reg(AMD_IH_CNTL, ih_cntl);
let ih_rb_cntl = self.read_mmio_reg(AMD_IH_RB_CNTL);
self.write_mmio_reg(AMD_IH_RB_CNTL, ih_rb_cntl);
}
fn refresh_connectors(&self) -> Result<()> {
let (connectors, encoders) = detect_display_topology(&self.display)?;
{
let mut connector_state = self
.connectors
.lock()
.map_err(|_| DriverError::Initialization("connector state poisoned".to_string()))?;
*connector_state = connectors;
}
{
let mut encoder_state = self
.encoders
.lock()
.map_err(|_| DriverError::Initialization("encoder state poisoned".to_string()))?;
*encoder_state = encoders;
}
Ok(())
}
fn ensure_gem_gpu_mapping(&self, fb_handle: GemHandle) -> Result<u64> {
{
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
if let Some(addr) = gem.object(fb_handle)?.gpu_addr {
return Ok(addr);
}
}
let (phys_addr, fb_size) = {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
let obj = gem.object(fb_handle)?;
(obj.phys_addr as u64, obj.size)
};
let gpu_addr = {
let mut gtt = self
.gtt
.lock()
.map_err(|_| DriverError::Initialization("GTT manager poisoned".to_string()))?;
let addr = gtt.alloc_gpu_range(fb_size)?;
if let Err(e) = gtt.map_range(addr, phys_addr, fb_size, 0) {
if gtt.unmap_range(addr, fb_size).is_ok() {
gtt.release_range(addr, fb_size);
}
return Err(e);
}
if let Err(e) = gtt.flush_tlb(&self.mmio) {
if gtt.unmap_range(addr, fb_size).is_ok() {
if gtt.flush_tlb(&self.mmio).is_ok() {
gtt.release_range(addr, fb_size);
}
}
return Err(e);
}
addr
};
if let Err(e) = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?
.set_gpu_addr(fb_handle, gpu_addr)
{
let mut gtt = self
.gtt
.lock()
.map_err(|_| DriverError::Initialization("GTT manager poisoned".to_string()))?;
if gtt.flush_tlb(&self.mmio).is_ok() && gtt.unmap_range(gpu_addr, fb_size).is_ok() {
gtt.release_range(gpu_addr, fb_size);
} else {
let _ = gtt.unmap_range(gpu_addr, fb_size);
}
return Err(e);
}
Ok(gpu_addr)
}
}
impl GpuDriver for AmdDriver {
fn driver_name(&self) -> &str {
"amdgpu-redox"
}
fn driver_desc(&self) -> &str {
"AMD GPU DRM/KMS backend for Redox"
}
fn driver_date(&self) -> &str {
"2026-04-11"
}
fn detect_connectors(&self) -> Vec<ConnectorInfo> {
match self.connectors.lock() {
Ok(connectors) => connectors
.iter()
.map(|connector| connector.info.clone())
.collect(),
Err(poisoned) => {
warn!("redox-drm: connector state poisoned; using inner state");
poisoned
.into_inner()
.iter()
.map(|connector| connector.info.clone())
.collect()
}
}
}
fn get_modes(&self, connector_id: u32) -> Vec<ModeInfo> {
self.detect_connectors()
.into_iter()
.find(|connector| connector.id == connector_id)
.map(|connector| connector.modes)
.unwrap_or_default()
}
fn set_crtc(
&self,
crtc_id: u32,
fb_handle: u32,
connectors: &[u32],
mode: &ModeInfo,
) -> Result<()> {
let fb_addr = self.ensure_gem_gpu_mapping(fb_handle)?;
self.display
.set_crtc(crtc_id, fb_addr, mode.hdisplay as u32, mode.vdisplay as u32)?;
let mut crtcs = self
.crtcs
.lock()
.map_err(|_| DriverError::Initialization("CRTC state poisoned".to_string()))?;
let crtc = crtcs
.iter_mut()
.find(|candidate| candidate.id == crtc_id)
.ok_or_else(|| DriverError::NotFound(format!("unknown CRTC {crtc_id}")))?;
crtc.program(fb_handle, connectors, mode)
}
fn page_flip(&self, crtc_id: u32, fb_handle: u32, _flags: u32) -> Result<u64> {
{
let crtcs = self
.crtcs
.lock()
.map_err(|_| DriverError::Initialization("CRTC state poisoned".to_string()))?;
if !crtcs.iter().any(|crtc| crtc.id == crtc_id) {
return Err(DriverError::NotFound(format!("unknown CRTC {crtc_id}")));
}
}
let fb_addr = self.ensure_gem_gpu_mapping(fb_handle)?;
self.display.flip_surface(crtc_id, fb_addr)?;
let mut ring = self
.ring
.lock()
.map_err(|_| DriverError::Initialization("ring manager poisoned".to_string()))?;
ring.page_flip()
}
fn get_vblank(&self, crtc_id: u32) -> Result<u64> {
let crtcs = self
.crtcs
.lock()
.map_err(|_| DriverError::Initialization("CRTC state poisoned".to_string()))?;
if !crtcs.iter().any(|crtc| crtc.id == crtc_id) {
return Err(DriverError::NotFound(format!("unknown CRTC {crtc_id}")));
}
Ok(self.vblank_count.load(Ordering::SeqCst))
}
fn gem_create(&self, size: u64) -> Result<GemHandle> {
let mut gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
gem.create(size)
}
fn gem_close(&self, handle: GemHandle) -> Result<()> {
let gpu_info = {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
let obj = gem.object(handle)?;
(obj.gpu_addr, obj.size)
};
if let (Some(gpu_addr), fb_size) = gpu_info {
let mut gtt = self
.gtt
.lock()
.map_err(|_| DriverError::Initialization("GTT manager poisoned".to_string()))?;
gtt.flush_tlb(&self.mmio)?;
gtt.unmap_range(gpu_addr, fb_size)?;
gtt.release_range(gpu_addr, fb_size);
}
self.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?
.close(handle)
}
fn gem_mmap(&self, handle: GemHandle) -> Result<usize> {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
gem.mmap(handle)
}
fn gem_size(&self, handle: GemHandle) -> Result<u64> {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
Ok(gem.object(handle)?.size)
}
fn gem_export_dmafd(&self, handle: GemHandle) -> Result<i32> {
let mut gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
gem.export_dmafd(handle)
}
fn gem_import_dmafd(&self, fd: i32) -> Result<GemHandle> {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
gem.import_dmafd(fd)
}
fn get_edid(&self, connector_id: u32) -> Vec<u8> {
match self.connectors.lock() {
Ok(connectors) => connectors
.iter()
.find(|connector| connector.info.id == connector_id)
.map(|connector| connector.edid.clone())
.unwrap_or_default(),
Err(poisoned) => poisoned
.into_inner()
.iter()
.find(|connector| connector.info.id == connector_id)
.map(|connector| connector.edid.clone())
.unwrap_or_default(),
}
}
fn handle_irq(&self) -> Result<Option<(u32, u64)>> {
match self.process_irq()? {
IrqEvent::Vblank { crtc_id, count } => {
debug!(
"redox-drm: handled AMD vblank IRQ for {} CRTC {} count={} irq={:?}",
self.info.location,
crtc_id,
count,
self.irq_handle.as_ref().map(IrqHandle::irq)
);
Ok(Some((crtc_id, count)))
}
IrqEvent::Hotplug { connector_id } => {
info!(
"redox-drm: handled AMD hotplug IRQ for {} connector {} irq={:?}",
self.info.location,
connector_id,
self.irq_handle.as_ref().map(IrqHandle::irq)
);
Ok(None)
}
IrqEvent::Unknown => {
debug!(
"redox-drm: handled AMD IRQ for {} with no decoded source irq={:?}",
self.info.location,
self.irq_handle.as_ref().map(IrqHandle::irq)
);
Ok(None)
}
}
}
}
fn detect_display_topology(display: &DisplayCore) -> Result<(Vec<Connector>, Vec<Encoder>)> {
let detected = display.detect_connectors()?;
let mut connectors = Vec::new();
let mut encoders = Vec::new();
for (idx, connector) in detected.into_iter().enumerate() {
let encoder_id = connector.encoder_id;
encoders.push(Encoder::new(encoder_id, 1));
let edid = display.read_edid(idx as u32);
connectors.push(Connector {
info: connector,
edid: if edid.is_empty() {
synthetic_edid()
} else {
edid
},
});
}
Ok((connectors, encoders))
}
fn find_memory_bar0(info: &PciDeviceInfo) -> Result<PciBarInfo> {
info.find_memory_bar(0)
.copied()
.ok_or_else(|| DriverError::Pci(format!("device {} has no MMIO BAR0", info.location)))
}
@@ -0,0 +1,404 @@
use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
use log::{info, warn};
use redox_driver_sys::dma::DmaBuffer;
use redox_driver_sys::memory::MmioRegion;
use crate::driver::{DriverError, Result};
const RING_BUFFER_BYTES: usize = 4096;
const RING_BUFFER_DWORDS: usize = RING_BUFFER_BYTES / 4;
const RING_ALIGNMENT_BYTES: usize = 4096;
const FENCE_BUFFER_BYTES: usize = 16;
const WPTR_STRIDE_DWORDS: usize = 1;
const SDMA_OP_NOP: u32 = 0;
const SDMA_OP_FENCE: u32 = 5;
const SDMA_OP_TRAP: u32 = 6;
const SDMA0_GFX_RB_CNTL: usize = 0x0080 * 4;
const SDMA0_GFX_RB_BASE: usize = 0x0081 * 4;
const SDMA0_GFX_RB_BASE_HI: usize = 0x0082 * 4;
const SDMA0_GFX_RB_RPTR: usize = 0x0083 * 4;
const SDMA0_GFX_RB_RPTR_HI: usize = 0x0084 * 4;
const SDMA0_GFX_RB_WPTR: usize = 0x0085 * 4;
const SDMA0_GFX_RB_WPTR_HI: usize = 0x0086 * 4;
const SDMA0_GFX_RB_WPTR_POLL_CNTL: usize = 0x0087 * 4;
const SDMA0_GFX_RB_RPTR_ADDR_HI: usize = 0x0088 * 4;
const SDMA0_GFX_RB_RPTR_ADDR_LO: usize = 0x0089 * 4;
const SDMA0_GFX_IB_CNTL: usize = 0x008a * 4;
const SDMA0_GFX_RB_WPTR_POLL_ADDR_HI: usize = 0x00b2 * 4;
const SDMA0_GFX_RB_WPTR_POLL_ADDR_LO: usize = 0x00b3 * 4;
const SDMA0_GFX_MINOR_PTR_UPDATE: usize = 0x00b5 * 4;
const SDMA_RB_CNTL_RB_ENABLE: u32 = 1 << 0;
const SDMA_RB_CNTL_RB_SIZE_SHIFT: u32 = 1;
const SDMA_RB_CNTL_RB_SIZE_MASK: u32 = 0x1f << SDMA_RB_CNTL_RB_SIZE_SHIFT;
const SDMA_RB_CNTL_RPTR_WRITEBACK_ENABLE: u32 = 1 << 12;
const SDMA_IB_CNTL_IB_ENABLE: u32 = 1 << 0;
const FENCE_OFFSET_BYTES: usize = 0;
const WPTR_POLL_OFFSET_BYTES: usize = 8;
static MMIO_BASE: AtomicPtr<u8> = AtomicPtr::new(core::ptr::null_mut());
static MMIO_SIZE: AtomicUsize = AtomicUsize::new(0);
#[derive(Clone, Copy, Debug)]
struct MmioBinding {
base: usize,
size: usize,
}
// Safety: MmioBinding holds raw address integers, not pointers.
// It is safe to send between threads because register access is volatile.
unsafe impl Send for MmioBinding {}
unsafe impl Sync for MmioBinding {}
impl MmioBinding {
fn try_load() -> Option<Self> {
let base = MMIO_BASE.load(Ordering::Acquire);
let size = MMIO_SIZE.load(Ordering::Acquire);
if base.is_null() {
return None;
}
Some(Self {
base: base as usize,
size,
})
}
fn read32(&self, offset: usize) -> Result<u32> {
if offset.checked_add(4).is_none_or(|end| end > self.size) {
return Err(DriverError::Mmio(format!(
"AMD ring MMIO read out of bounds: offset={offset:#x} size={:#x}",
self.size
)));
}
let ptr = (self.base + offset) as *const u32;
Ok(unsafe { core::ptr::read_volatile(ptr) })
}
fn write32(&self, offset: usize, value: u32) -> Result<()> {
if offset.checked_add(4).is_none_or(|end| end > self.size) {
return Err(DriverError::Mmio(format!(
"AMD ring MMIO write out of bounds: offset={offset:#x} size={:#x}",
self.size
)));
}
let ptr = (self.base + offset) as *mut u32;
unsafe { core::ptr::write_volatile(ptr, value) };
Ok(())
}
}
#[derive(Default)]
pub struct RingManager {
initialized: bool,
ring_buffer: Option<DmaBuffer>,
fence_buffer: Option<DmaBuffer>,
mmio: Option<MmioBinding>,
ring_size_dwords: u32,
read_ptr: u64,
write_ptr: u64,
next_seqno: u64,
last_signaled_seqno: u64,
}
impl RingManager {
pub fn new() -> Self {
Self {
initialized: false,
ring_buffer: None,
fence_buffer: None,
mmio: None,
ring_size_dwords: RING_BUFFER_DWORDS as u32,
read_ptr: 0,
write_ptr: 0,
next_seqno: 1,
last_signaled_seqno: 0,
}
}
pub fn initialize(&mut self) -> Result<()> {
let mut ring_buffer = DmaBuffer::allocate(RING_BUFFER_BYTES, RING_ALIGNMENT_BYTES)
.map_err(|e| DriverError::Buffer(format!("ring buffer allocation failed: {e}")))?;
let mut fence_buffer =
DmaBuffer::allocate(FENCE_BUFFER_BYTES, core::mem::align_of::<u64>())
.map_err(|e| DriverError::Buffer(format!("fence buffer allocation failed: {e}")))?;
Self::zero_dma(&mut ring_buffer);
Self::zero_dma(&mut fence_buffer);
self.mmio = MmioBinding::try_load();
self.program_ring(&ring_buffer, &fence_buffer)?;
self.ring_buffer = Some(ring_buffer);
self.fence_buffer = Some(fence_buffer);
self.read_ptr = 0;
self.write_ptr = 0;
self.next_seqno = 1;
self.last_signaled_seqno = 0;
self.initialized = true;
info!(
"redox-drm: AMD ring manager initialized with {} DW ring buffer{}",
self.ring_size_dwords,
if self.mmio.is_some() {
" and SDMA MMIO programming"
} else {
" (MMIO binding unavailable; submissions stay software-tracked)"
}
);
Ok(())
}
pub fn page_flip(&mut self) -> Result<u64> {
self.ensure_initialized()?;
let seqno = self.next_seqno;
self.next_seqno = self.next_seqno.saturating_add(1);
let mut packet = Vec::with_capacity(16);
self.emit_flip(&mut packet, seqno);
self.emit_fence(&mut packet, seqno)?;
self.submit(&packet, seqno)
}
pub(crate) fn bind_mmio(mmio: &MmioRegion) {
MMIO_BASE.store(mmio.as_ptr() as *mut u8, Ordering::Release);
MMIO_SIZE.store(mmio.size(), Ordering::Release);
}
fn ensure_initialized(&self) -> Result<()> {
if self.initialized {
Ok(())
} else {
Err(DriverError::Initialization(
"ring manager must be initialized before page flips".to_string(),
))
}
}
fn program_ring(&self, ring_buffer: &DmaBuffer, fence_buffer: &DmaBuffer) -> Result<()> {
let Some(mmio) = self.mmio else {
warn!(
"redox-drm: AMD ring manager has no MMIO binding; skipping SDMA register programming"
);
return Ok(());
};
let ring_addr = ring_buffer.physical_address() as u64;
let fence_addr = fence_buffer.physical_address() as u64 + FENCE_OFFSET_BYTES as u64;
let wptr_poll_addr = fence_buffer.physical_address() as u64 + WPTR_POLL_OFFSET_BYTES as u64;
let mut rb_cntl = mmio.read32(SDMA0_GFX_RB_CNTL)?;
rb_cntl &= !(SDMA_RB_CNTL_RB_ENABLE | SDMA_RB_CNTL_RB_SIZE_MASK);
rb_cntl |=
(self.ring_size_order() << SDMA_RB_CNTL_RB_SIZE_SHIFT) & SDMA_RB_CNTL_RB_SIZE_MASK;
mmio.write32(SDMA0_GFX_RB_CNTL, rb_cntl)?;
mmio.write32(SDMA0_GFX_RB_RPTR, 0)?;
mmio.write32(SDMA0_GFX_RB_RPTR_HI, 0)?;
mmio.write32(SDMA0_GFX_RB_WPTR, 0)?;
mmio.write32(SDMA0_GFX_RB_WPTR_HI, 0)?;
mmio.write32(SDMA0_GFX_RB_RPTR_ADDR_HI, upper_32(fence_addr))?;
mmio.write32(SDMA0_GFX_RB_RPTR_ADDR_LO, lower_32(fence_addr) & !0x3)?;
rb_cntl |= SDMA_RB_CNTL_RPTR_WRITEBACK_ENABLE;
mmio.write32(SDMA0_GFX_RB_CNTL, rb_cntl)?;
mmio.write32(SDMA0_GFX_RB_BASE, lower_32(ring_addr >> 8))?;
mmio.write32(SDMA0_GFX_RB_BASE_HI, lower_32(ring_addr >> 40))?;
mmio.write32(SDMA0_GFX_MINOR_PTR_UPDATE, 1)?;
mmio.write32(SDMA0_GFX_RB_WPTR, 0)?;
mmio.write32(SDMA0_GFX_RB_WPTR_HI, 0)?;
mmio.write32(SDMA0_GFX_MINOR_PTR_UPDATE, 0)?;
mmio.write32(SDMA0_GFX_RB_WPTR_POLL_ADDR_LO, lower_32(wptr_poll_addr))?;
mmio.write32(SDMA0_GFX_RB_WPTR_POLL_ADDR_HI, upper_32(wptr_poll_addr))?;
mmio.write32(SDMA0_GFX_RB_WPTR_POLL_CNTL, 0)?;
rb_cntl |= SDMA_RB_CNTL_RB_ENABLE;
mmio.write32(SDMA0_GFX_RB_CNTL, rb_cntl)?;
let mut ib_cntl = mmio.read32(SDMA0_GFX_IB_CNTL)?;
ib_cntl |= SDMA_IB_CNTL_IB_ENABLE;
mmio.write32(SDMA0_GFX_IB_CNTL, ib_cntl)?;
Ok(())
}
fn submit(&mut self, commands: &[u32], seqno: u64) -> Result<u64> {
self.refresh_read_ptr();
self.ensure_space(commands.len())?;
for &command in commands {
self.write_ring_dword(command)?;
}
fence(Ordering::Release);
self.publish_wptr()?;
if self.mmio.is_none() {
self.write_completed_seqno(seqno)?;
}
Ok(seqno)
}
fn refresh_read_ptr(&mut self) {
if let Some(mmio) = self.mmio {
let low = mmio.read32(SDMA0_GFX_RB_RPTR).unwrap_or(0) as u64;
let high = mmio.read32(SDMA0_GFX_RB_RPTR_HI).unwrap_or(0) as u64;
self.read_ptr = ((high << 32) | low) >> 2;
} else {
self.read_ptr = self.write_ptr;
}
}
fn ensure_space(&self, required_dwords: usize) -> Result<()> {
if required_dwords >= self.ring_capacity() {
return Err(DriverError::Buffer(format!(
"ring submission too large: {} DW exceeds capacity {} DW",
required_dwords,
self.ring_capacity() - 1
)));
}
let used = self.used_dwords();
let free = self.ring_capacity().saturating_sub(used).saturating_sub(1);
if required_dwords <= free {
Ok(())
} else {
Err(DriverError::Buffer(format!(
"ring buffer full: required {} DW, free {} DW",
required_dwords, free
)))
}
}
fn used_dwords(&self) -> usize {
let size = self.ring_capacity() as u64;
((self.write_ptr + size).wrapping_sub(self.read_ptr) % size) as usize
}
fn write_ring_dword(&mut self, value: u32) -> Result<()> {
let capacity = self.ring_capacity();
let ring_buffer = self
.ring_buffer
.as_mut()
.ok_or_else(|| DriverError::Initialization("ring buffer missing".to_string()))?;
let index = (self.write_ptr as usize) % capacity;
let ptr = unsafe {
ring_buffer
.as_mut_ptr()
.add(index * core::mem::size_of::<u32>()) as *mut u32
};
unsafe { core::ptr::write_volatile(ptr, value) };
self.write_ptr = (self.write_ptr + WPTR_STRIDE_DWORDS as u64) % capacity as u64;
Ok(())
}
fn publish_wptr(&mut self) -> Result<()> {
self.write_wptr_shadow(self.write_ptr)?;
let Some(mmio) = self.mmio else {
return Ok(());
};
mmio.write32(SDMA0_GFX_MINOR_PTR_UPDATE, 1)?;
mmio.write32(SDMA0_GFX_RB_WPTR, lower_32(self.write_ptr << 2))?;
mmio.write32(SDMA0_GFX_RB_WPTR_HI, upper_32(self.write_ptr << 2))?;
mmio.write32(SDMA0_GFX_MINOR_PTR_UPDATE, 0)?;
Ok(())
}
fn emit_nop(&self, packet: &mut Vec<u32>, count: u32) {
for _ in 0..count {
packet.push(SDMA_OP_NOP);
}
}
fn emit_flip(&self, packet: &mut Vec<u32>, seqno: u64) {
self.emit_nop(packet, 2);
packet.push(0x5049_4c46);
packet.push(lower_32(seqno));
packet.push(upper_32(seqno));
}
fn emit_fence(&self, packet: &mut Vec<u32>, seqno: u64) -> Result<()> {
let fence_addr = self.fence_address()?;
packet.push(SDMA_OP_FENCE);
packet.push(lower_32(fence_addr));
packet.push(upper_32(fence_addr));
packet.push(lower_32(seqno));
packet.push(SDMA_OP_FENCE);
packet.push(lower_32(fence_addr + 4));
packet.push(upper_32(fence_addr + 4));
packet.push(upper_32(seqno));
packet.push(SDMA_OP_TRAP);
packet.push(0);
Ok(())
}
fn fence_address(&self) -> Result<u64> {
let fence_buffer = self
.fence_buffer
.as_ref()
.ok_or_else(|| DriverError::Initialization("fence buffer missing".to_string()))?;
Ok(fence_buffer.physical_address() as u64 + FENCE_OFFSET_BYTES as u64)
}
fn write_completed_seqno(&mut self, seqno: u64) -> Result<()> {
let fence_buffer = self
.fence_buffer
.as_mut()
.ok_or_else(|| DriverError::Initialization("fence buffer missing".to_string()))?;
let ptr = unsafe { fence_buffer.as_mut_ptr().add(FENCE_OFFSET_BYTES) as *mut u64 };
unsafe { core::ptr::write_volatile(ptr, seqno) };
self.last_signaled_seqno = seqno;
Ok(())
}
fn write_wptr_shadow(&mut self, wptr_dwords: u64) -> Result<()> {
let fence_buffer = self
.fence_buffer
.as_mut()
.ok_or_else(|| DriverError::Initialization("fence buffer missing".to_string()))?;
let ptr = unsafe { fence_buffer.as_mut_ptr().add(WPTR_POLL_OFFSET_BYTES) as *mut u64 };
unsafe { core::ptr::write_volatile(ptr, wptr_dwords << 2) };
Ok(())
}
fn ring_size_order(&self) -> u32 {
self.ring_size_dwords.ilog2()
}
fn ring_capacity(&self) -> usize {
self.ring_size_dwords as usize
}
fn zero_dma(buffer: &mut DmaBuffer) {
unsafe { core::ptr::write_bytes(buffer.as_mut_ptr(), 0, buffer.len()) };
}
}
fn lower_32(value: u64) -> u32 {
value as u32
}
fn upper_32(value: u64) -> u32 {
(value >> 32) as u32
}
@@ -0,0 +1,392 @@
use std::sync::Mutex;
use log::{debug, info};
use redox_driver_sys::memory::MmioRegion;
use crate::driver::{DriverError, Result};
use crate::kms::connector::synthetic_edid;
use crate::kms::{ConnectorInfo, ConnectorStatus, ConnectorType, ModeInfo};
const PIPE_COUNT: usize = 3;
const PORT_COUNT: usize = 5;
const PP_STATUS: usize = 0xC7200;
const PIPECONF_BASE: usize = 0x70008;
const DSPCNTR_BASE: usize = 0x70180;
const DSPSURF_BASE: usize = 0x7019C;
const DDI_BUF_CTL_BASE: usize = 0x64000;
const HTOTAL_BASE: usize = 0x60000;
const HBLANK_BASE: usize = 0x60004;
const HSYNC_BASE: usize = 0x60008;
const VTOTAL_BASE: usize = 0x6000C;
const VBLANK_BASE: usize = 0x60010;
const VSYNC_BASE: usize = 0x60014;
const PIPE_SRC_BASE: usize = 0x6001C;
const PLANE_SIZE_BASE: usize = 0x70190;
const PIPE_STRIDE: usize = 0x1000;
const PORT_STRIDE: usize = 0x100;
const PIPECONF_ENABLE: u32 = 1 << 31;
const DSPCNTR_ENABLE: u32 = 1 << 31;
const DDI_BUF_CTL_ENABLE: u32 = 1 << 31;
#[derive(Clone, Copy, Debug)]
pub struct DisplayPipe {
pub index: u8,
pub enabled: bool,
pub port: Option<u8>,
}
pub struct IntelDisplay {
mmio: MmioRegion,
pipes: Mutex<Vec<DisplayPipe>>,
}
impl IntelDisplay {
pub fn new(mmio: MmioRegion) -> Result<Self> {
let pipes = Self::detect_pipes(&mmio)?;
info!(
"redox-drm: Intel display initialized with {} pipe(s)",
pipes.len()
);
Ok(Self {
mmio,
pipes: Mutex::new(pipes),
})
}
pub fn pipes(&self) -> Result<Vec<DisplayPipe>> {
self.refresh_pipes()
}
pub fn pipe_for_crtc(&self, crtc_id: u32) -> Result<DisplayPipe> {
let index = crtc_id
.checked_sub(1)
.ok_or(DriverError::InvalidArgument("invalid Intel CRTC id"))?
as usize;
self.refresh_pipes()?
.get(index)
.copied()
.ok_or_else(|| DriverError::NotFound(format!("unknown Intel pipe for CRTC {crtc_id}")))
}
pub fn detect_pipes(mmio: &MmioRegion) -> Result<Vec<DisplayPipe>> {
let mut pipes = Vec::with_capacity(PIPE_COUNT);
let pp_status = read32(mmio, PP_STATUS).unwrap_or(0);
let connected_ports = connected_ports(mmio);
for index in 0..PIPE_COUNT {
let conf = read32(mmio, pipe_offset(PIPECONF_BASE, index))?;
let enabled = conf & PIPECONF_ENABLE != 0;
let mut port = connected_ports.get(index).copied();
if port.is_none() && index == 0 && pp_status != 0 {
port = Some(0);
}
if port.is_none() && enabled {
port = Some(index as u8);
}
pipes.push(DisplayPipe {
index: index as u8,
enabled,
port,
});
}
if pipes.iter().all(|pipe| pipe.port.is_none()) {
if let Some(pipe) = pipes.first_mut() {
pipe.port = Some(0);
}
}
Ok(pipes)
}
pub fn detect_connectors(&self) -> Result<Vec<ConnectorInfo>> {
let pp_status = self.read32(PP_STATUS).unwrap_or(0);
let pipes = self.refresh_pipes()?;
let mut connectors = Vec::with_capacity(PORT_COUNT);
for port in 0..PORT_COUNT as u8 {
let status = self.read32(ddi_offset(port)).unwrap_or(0);
let connected = status & DDI_BUF_CTL_ENABLE != 0
|| pipes
.iter()
.any(|pipe| pipe.port == Some(port) && pipe.enabled)
|| (port == 0 && pp_status != 0);
let connector_type = connector_type_for_port(port, pp_status);
let modes = self.modes_for_port(port, connector_type);
connectors.push(ConnectorInfo {
id: port as u32 + 1,
connector_type,
connector_type_id: port as u32 + 1,
connection: if connected {
ConnectorStatus::Connected
} else {
ConnectorStatus::Disconnected
},
mm_width: 600,
mm_height: 340,
encoder_id: port as u32 + 1,
modes,
});
}
Ok(connectors)
}
pub fn modes_for_connector(&self, connector: &ConnectorInfo) -> Vec<ModeInfo> {
let port = connector
.connector_type_id
.saturating_sub(1)
.min((PORT_COUNT - 1) as u32) as u8;
self.modes_for_port(port, connector.connector_type)
}
pub fn read_edid(&self, port: u8) -> Vec<u8> {
debug!("redox-drm: Intel HDMI/DVI EDID fallback on port {}", port);
synthetic_edid()
}
pub fn read_dpcd(&self, port: u8) -> Vec<u8> {
let status = self.read32(ddi_offset(port)).unwrap_or(0);
if status & DDI_BUF_CTL_ENABLE == 0 {
return Vec::new();
}
debug!("redox-drm: Intel AUX/DPCD skeleton read on port {}", port);
vec![0x12, 0x0A, 0x84, 0x01]
}
pub fn set_mode(&self, pipe: &DisplayPipe, mode: &ModeInfo) -> Result<()> {
let index = usize::from(pipe.index);
self.write32(
pipe_offset(HTOTAL_BASE, index),
pack_pair(mode.htotal, mode.hdisplay),
)?;
self.write32(
pipe_offset(HBLANK_BASE, index),
pack_pair(mode.htotal, mode.hdisplay),
)?;
self.write32(
pipe_offset(HSYNC_BASE, index),
pack_pair(mode.hsync_end, mode.hsync_start),
)?;
self.write32(
pipe_offset(VTOTAL_BASE, index),
pack_pair(mode.vtotal, mode.vdisplay),
)?;
self.write32(
pipe_offset(VBLANK_BASE, index),
pack_pair(mode.vtotal, mode.vdisplay),
)?;
self.write32(
pipe_offset(VSYNC_BASE, index),
pack_pair(mode.vsync_end, mode.vsync_start),
)?;
self.write32(
pipe_offset(PIPE_SRC_BASE, index),
pack_pair(mode.vdisplay, mode.hdisplay),
)?;
self.write32(
pipe_offset(PLANE_SIZE_BASE, index),
pack_pair(mode.vdisplay, mode.hdisplay),
)?;
let mut dspcntr = self.read32(pipe_offset(DSPCNTR_BASE, index))?;
dspcntr |= DSPCNTR_ENABLE;
self.write32(pipe_offset(DSPCNTR_BASE, index), dspcntr)?;
let mut pipeconf = self.read32(pipe_offset(PIPECONF_BASE, index))?;
pipeconf |= PIPECONF_ENABLE;
self.write32(pipe_offset(PIPECONF_BASE, index), pipeconf)?;
if let Some(port) = pipe.port {
let mut ddi = self.read32(ddi_offset(port))?;
ddi |= DDI_BUF_CTL_ENABLE;
self.write32(ddi_offset(port), ddi)?;
}
self.update_pipe(pipe.index, true, pipe.port)?;
Ok(())
}
pub fn page_flip(&self, pipe: &DisplayPipe, fb_addr: u64) -> Result<()> {
if fb_addr > u64::from(u32::MAX) {
return Err(DriverError::Buffer(format!(
"Intel DSPSURF supports 32-bit GGTT offsets in this skeleton, got {fb_addr:#x}"
)));
}
let index = usize::from(pipe.index);
self.write32(pipe_offset(DSPSURF_BASE, index), fb_addr as u32)
}
fn refresh_pipes(&self) -> Result<Vec<DisplayPipe>> {
let detected = Self::detect_pipes(&self.mmio)?;
let mut cached = self
.pipes
.lock()
.map_err(|_| DriverError::Initialization("Intel display pipe state poisoned".into()))?;
let previous = cached.clone();
let mut refreshed = Vec::with_capacity(detected.len());
for mut pipe in detected {
if let Some(existing) = previous
.iter()
.find(|existing| existing.index == pipe.index)
{
if pipe.port.is_none() {
pipe.port = existing.port;
}
pipe.enabled |= existing.enabled;
}
refreshed.push(pipe);
}
*cached = refreshed.clone();
Ok(refreshed)
}
fn update_pipe(&self, index: u8, enabled: bool, port: Option<u8>) -> Result<()> {
let mut cached = self
.pipes
.lock()
.map_err(|_| DriverError::Initialization("Intel display pipe state poisoned".into()))?;
if let Some(pipe) = cached.iter_mut().find(|pipe| pipe.index == index) {
pipe.enabled = enabled;
pipe.port = port.or(pipe.port);
return Ok(());
}
cached.push(DisplayPipe {
index,
enabled,
port,
});
Ok(())
}
fn modes_for_port(&self, port: u8, connector_type: ConnectorType) -> Vec<ModeInfo> {
let mut modes = match connector_type {
ConnectorType::DisplayPort | ConnectorType::EDP => {
modes_from_dpcd(&self.read_dpcd(port))
}
_ => ModeInfo::from_edid(&self.read_edid(port)),
};
if modes.is_empty() {
modes = ModeInfo::from_edid(&synthetic_edid());
}
if modes.is_empty() {
modes.push(ModeInfo::default_1080p());
}
modes
}
fn read32(&self, offset: usize) -> Result<u32> {
read32(&self.mmio, offset)
}
fn write32(&self, offset: usize, value: u32) -> Result<()> {
write32(&self.mmio, offset, value)
}
}
fn connected_ports(mmio: &MmioRegion) -> Vec<u8> {
let mut ports = Vec::new();
for port in 0..PORT_COUNT as u8 {
if read32(mmio, ddi_offset(port)).unwrap_or(0) & DDI_BUF_CTL_ENABLE != 0 {
ports.push(port);
}
}
ports
}
fn read32(mmio: &MmioRegion, offset: usize) -> Result<u32> {
ensure_access(
mmio.size(),
offset,
core::mem::size_of::<u32>(),
"Intel display read",
)?;
Ok(mmio.read32(offset))
}
fn write32(mmio: &MmioRegion, offset: usize, value: u32) -> Result<()> {
ensure_access(
mmio.size(),
offset,
core::mem::size_of::<u32>(),
"Intel display write",
)?;
mmio.write32(offset, value);
Ok(())
}
fn ensure_access(mmio_size: usize, offset: usize, width: usize, op: &str) -> Result<()> {
let end = offset
.checked_add(width)
.ok_or_else(|| DriverError::Mmio(format!("{op} offset overflow at {offset:#x}")))?;
if end > mmio_size {
return Err(DriverError::Mmio(format!(
"{op} outside MMIO aperture: end={end:#x} size={mmio_size:#x}"
)));
}
Ok(())
}
fn pipe_offset(base: usize, index: usize) -> usize {
base + index * PIPE_STRIDE
}
fn ddi_offset(port: u8) -> usize {
DDI_BUF_CTL_BASE + usize::from(port) * PORT_STRIDE
}
fn pack_pair(upper: u16, lower: u16) -> u32 {
((u32::from(upper).saturating_sub(1)) << 16) | u32::from(lower).saturating_sub(1)
}
fn connector_type_for_port(port: u8, pp_status: u32) -> ConnectorType {
match port {
0 if pp_status != 0 => ConnectorType::EDP,
0 | 1 => ConnectorType::HDMIA,
2 | 3 => ConnectorType::DisplayPort,
_ => ConnectorType::VGA,
}
}
fn modes_from_dpcd(dpcd: &[u8]) -> Vec<ModeInfo> {
if dpcd.is_empty() {
return Vec::new();
}
vec![ModeInfo::default_1080p(), mode_1440p()]
}
fn mode_1440p() -> ModeInfo {
ModeInfo {
clock: 241_500,
hdisplay: 2560,
hsync_start: 2608,
hsync_end: 2640,
htotal: 2720,
hskew: 0,
vdisplay: 1440,
vsync_start: 1443,
vsync_end: 1448,
vtotal: 1481,
vscan: 0,
vrefresh: 60,
flags: 0,
type_: 0,
name: "2560x1440@60".to_string(),
}
}
@@ -0,0 +1,226 @@
use std::collections::BTreeMap;
use log::{debug, info};
use redox_driver_sys::memory::MmioRegion;
use crate::driver::{DriverError, Result};
const GTT_BASE: usize = 0x0000;
const GFX_FLSH_CNTL_REG: usize = 0x101008;
const GFX_FLSH_CNTL_EN: u32 = 1 << 0;
const GTT_PAGE_SIZE: u64 = 4096;
const GTT_PAGE_MASK: u64 = GTT_PAGE_SIZE - 1;
const GTT_PTE_PRESENT: u64 = 1 << 0;
const GTT_PTE_WRITE: u64 = 1 << 1;
const GTT_PTE_ADDR_MASK: u64 = 0xFFFF_FFFF_FFFF_F000;
pub struct IntelGtt {
gtt_mmio: MmioRegion,
control_mmio: MmioRegion,
page_count: usize,
aperture_size: u64,
next_allocation: u64,
free_list: Vec<(u64, u64)>,
mappings: BTreeMap<u64, u64>,
}
impl IntelGtt {
pub fn init(gtt_mmio: MmioRegion, control_mmio: MmioRegion) -> Result<Self> {
let page_count = gtt_mmio.size() / core::mem::size_of::<u64>();
if page_count == 0 {
return Err(DriverError::Initialization(
"Intel GGTT BAR exposes no page table entries".to_string(),
));
}
let aperture_size = (page_count as u64)
.checked_mul(GTT_PAGE_SIZE)
.ok_or_else(|| DriverError::Initialization("Intel GGTT aperture overflow".into()))?;
let gtt = Self {
gtt_mmio,
control_mmio,
page_count,
aperture_size,
next_allocation: 0,
free_list: Vec::new(),
mappings: BTreeMap::new(),
};
gtt.flush()?;
info!(
"redox-drm: Intel GGTT initialized with {} entries ({:#x} aperture)",
page_count, aperture_size
);
Ok(gtt)
}
pub fn alloc_range(&mut self, size: u64) -> Result<u64> {
let aligned_size = align_up(size, GTT_PAGE_SIZE)?;
if let Some(index) = self
.free_list
.iter()
.position(|&(_, free_size)| free_size >= aligned_size)
{
let (start, free_size) = self.free_list.remove(index);
let remainder = free_size.saturating_sub(aligned_size);
if remainder != 0 {
self.free_list.push((start + aligned_size, remainder));
}
return Ok(start);
}
let start = self.next_allocation;
let end = start
.checked_add(aligned_size)
.ok_or_else(|| DriverError::Buffer("Intel GGTT allocation overflow".into()))?;
if end > self.aperture_size {
return Err(DriverError::Buffer(format!(
"Intel GGTT aperture exhausted: need {:#x} bytes, remaining {:#x}",
aligned_size,
self.aperture_size.saturating_sub(start)
)));
}
self.next_allocation = end;
Ok(start)
}
pub fn release_range(&mut self, gpu_addr: u64, size: u64) -> Result<()> {
let aligned_size = align_up(size, GTT_PAGE_SIZE)?;
self.free_list.push((gpu_addr, aligned_size));
Ok(())
}
pub fn map_range(
&mut self,
gpu_addr: u64,
phys_addr: u64,
size: u64,
flags: u64,
) -> Result<()> {
let aligned_size = align_up(size, GTT_PAGE_SIZE)?;
let page_count = (aligned_size / GTT_PAGE_SIZE) as usize;
for page in 0..page_count {
let page_offset = (page as u64) * GTT_PAGE_SIZE;
self.insert_page(gpu_addr + page_offset, phys_addr + page_offset, flags)?;
}
self.mappings.insert(gpu_addr, aligned_size);
self.flush()
}
pub fn unmap_range(&mut self, gpu_addr: u64, size: u64) -> Result<()> {
let aligned_size = align_up(size, GTT_PAGE_SIZE)?;
let page_count = (aligned_size / GTT_PAGE_SIZE) as usize;
for page in 0..page_count {
let page_offset = (page as u64) * GTT_PAGE_SIZE;
self.remove_page(gpu_addr + page_offset)?;
}
self.mappings.remove(&gpu_addr);
self.flush()
}
pub fn insert_page(&self, virtual_addr: u64, physical_addr: u64, flags: u64) -> Result<()> {
ensure_page_alignment(virtual_addr, "virtual_addr")?;
ensure_page_alignment(physical_addr, "physical_addr")?;
let entry_index = self.entry_index(virtual_addr)?;
let entry_offset = gtt_entry_offset(entry_index)?;
self.ensure_gtt_access(entry_offset, core::mem::size_of::<u64>(), "GGTT PTE write")?;
let pte = encode_pte(physical_addr, flags);
self.gtt_mmio.write64(entry_offset, pte);
debug!(
"redox-drm: Intel GGTT map va={:#x} -> pa={:#x} flags={:#x}",
virtual_addr, physical_addr, flags
);
Ok(())
}
pub fn remove_page(&self, virtual_addr: u64) -> Result<()> {
ensure_page_alignment(virtual_addr, "virtual_addr")?;
let entry_index = self.entry_index(virtual_addr)?;
let entry_offset = gtt_entry_offset(entry_index)?;
self.ensure_gtt_access(entry_offset, core::mem::size_of::<u64>(), "GGTT PTE clear")?;
self.gtt_mmio.write64(entry_offset, 0);
debug!("redox-drm: Intel GGTT unmap va={:#x}", virtual_addr);
Ok(())
}
pub fn flush(&self) -> Result<()> {
self.ensure_control_access(GFX_FLSH_CNTL_REG, core::mem::size_of::<u32>(), "GGTT flush")?;
self.control_mmio
.write32(GFX_FLSH_CNTL_REG, GFX_FLSH_CNTL_EN);
let _ = self.control_mmio.read32(GFX_FLSH_CNTL_REG);
Ok(())
}
fn entry_index(&self, virtual_addr: u64) -> Result<usize> {
let entry_index = (virtual_addr / GTT_PAGE_SIZE) as usize;
if entry_index >= self.page_count {
return Err(DriverError::Buffer(format!(
"Intel GGTT entry {entry_index} outside aperture of {} entries",
self.page_count
)));
}
Ok(entry_index)
}
fn ensure_gtt_access(&self, offset: usize, width: usize, op: &str) -> Result<()> {
ensure_mmio_access(self.gtt_mmio.size(), offset, width, op)
}
fn ensure_control_access(&self, offset: usize, width: usize, op: &str) -> Result<()> {
ensure_mmio_access(self.control_mmio.size(), offset, width, op)
}
}
fn align_up(value: u64, alignment: u64) -> Result<u64> {
value
.checked_add(alignment - 1)
.map(|v| v & !(alignment - 1))
.ok_or_else(|| DriverError::Buffer("Intel GGTT size alignment overflow".into()))
}
fn ensure_page_alignment(value: u64, name: &'static str) -> Result<()> {
if value & GTT_PAGE_MASK != 0 {
return Err(DriverError::InvalidArgument(name));
}
Ok(())
}
fn gtt_entry_offset(entry_index: usize) -> Result<usize> {
GTT_BASE
.checked_add(
entry_index
.checked_mul(core::mem::size_of::<u64>())
.ok_or_else(|| DriverError::Mmio("Intel GGTT entry offset overflow".into()))?,
)
.ok_or_else(|| DriverError::Mmio("Intel GGTT base offset overflow".into()))
}
fn ensure_mmio_access(mmio_size: usize, offset: usize, width: usize, op: &str) -> Result<()> {
let end = offset
.checked_add(width)
.ok_or_else(|| DriverError::Mmio(format!("{op} offset overflow at {offset:#x}")))?;
if end > mmio_size {
return Err(DriverError::Mmio(format!(
"{op} outside MMIO aperture: end={end:#x} size={mmio_size:#x}"
)));
}
Ok(())
}
fn encode_pte(physical_addr: u64, flags: u64) -> u64 {
(physical_addr & GTT_PTE_ADDR_MASK)
| (flags & (GTT_PTE_PRESENT | GTT_PTE_WRITE))
| GTT_PTE_PRESENT
}
@@ -0,0 +1,667 @@
pub mod display;
pub mod gtt;
pub mod ring;
use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Mutex;
use log::{debug, info, warn};
use redox_driver_sys::irq::IrqHandle;
use redox_driver_sys::memory::MmioRegion;
use redox_driver_sys::pci::{PciBarInfo, PciDevice, PciDeviceInfo};
use crate::driver::{DriverError, GpuDriver, Result};
use crate::gem::{GemHandle, GemManager};
use crate::kms::connector::{synthetic_edid, Connector};
use crate::kms::crtc::Crtc;
use crate::kms::encoder::Encoder;
use crate::kms::{ConnectorInfo, ConnectorType, ModeInfo};
use self::display::{DisplayPipe, IntelDisplay};
use self::gtt::IntelGtt;
use self::ring::{IntelRing, RingType};
const FORCEWAKE: usize = 0xA18C;
const PP_STATUS: usize = 0xC7200;
const PIPECONF_BASE: usize = 0x70008;
const PIPE_STRIDE: usize = 0x1000;
const DDI_BUF_CTL_BASE: usize = 0x64000;
const DDI_PORT_STRIDE: usize = 0x100;
const GFX_FLSH_CNTL_REG: usize = 0x101008;
const RENDER_RING_BASE: usize = 0x02000;
const RING_TAIL_OFFSET: usize = 0x30;
const RING_HEAD_OFFSET: usize = 0x34;
pub struct IntelDriver {
info: PciDeviceInfo,
mmio: MmioRegion,
irq_handle: Mutex<Option<IrqHandle>>,
display: IntelDisplay,
gem: Mutex<GemManager>,
connectors: Mutex<Vec<Connector>>,
crtcs: Mutex<Vec<Crtc>>,
encoders: Mutex<Vec<Encoder>>,
gtt: Mutex<IntelGtt>,
ring: Mutex<IntelRing>,
vblank_count: AtomicU64,
}
impl IntelDriver {
pub fn new(info: PciDeviceInfo, firmware: HashMap<String, Vec<u8>>) -> Result<Self> {
if !info.is_intel_gpu() {
return Err(DriverError::Pci(format!(
"device {} is not an Intel display-class GPU",
info.location
)));
}
let gtt_bar = find_memory_bar(&info, 0, "GGTT BAR0")?;
let mmio_bar = find_memory_bar(&info, 2, "MMIO BAR2")?;
validate_intel_bars(&info, &gtt_bar, &mmio_bar)?;
let mut device = PciDevice::open_location(&info.location)
.map_err(|e| DriverError::Pci(format!("failed to re-open PCI device: {e}")))?;
device
.enable_device()
.map_err(|e| DriverError::Pci(format!("enable_device failed: {e}")))?;
let mmio = map_bar(&mut device, &mmio_bar, "Intel MMIO BAR2")?;
let display_mmio = map_bar(&mut device, &mmio_bar, "Intel display MMIO")?;
let ring_mmio = map_bar(&mut device, &mmio_bar, "Intel ring MMIO")?;
let gtt_control_mmio = map_bar(&mut device, &mmio_bar, "Intel GGTT control MMIO")?;
let gtt_mmio = map_bar(&mut device, &gtt_bar, "Intel GGTT BAR0")?;
enable_forcewake(&mmio)?;
let display = IntelDisplay::new(display_mmio)?;
let mut gtt = IntelGtt::init(gtt_mmio, gtt_control_mmio)?;
let mut ring = IntelRing::create(ring_mmio, RingType::Render)?;
ring.bind_gtt(&mut gtt)?;
let (connectors, encoders) = detect_display_topology(&display)?;
let crtcs = build_crtcs(&display)?;
let irq_handle = match info.irq {
Some(irq) => Some(
IrqHandle::request(irq)
.map_err(|e| DriverError::Io(format!("failed to request IRQ {irq}: {e}")))?,
),
None => {
warn!(
"redox-drm: Intel device {} has no IRQ assigned",
info.location
);
None
}
};
if !firmware.is_empty() {
warn!(
"redox-drm: Intel driver ignores {} firmware blob(s); i915-class GPUs usually boot without scheme:firmware blobs",
firmware.len()
);
}
info!(
"redox-drm: Intel driver ready for {} with {} connector(s)",
info.location,
connectors.len()
);
Ok(Self {
info,
mmio,
irq_handle: Mutex::new(irq_handle),
display,
gem: Mutex::new(GemManager::new()),
connectors: Mutex::new(connectors),
crtcs: Mutex::new(crtcs),
encoders: Mutex::new(encoders),
gtt: Mutex::new(gtt),
ring: Mutex::new(ring),
vblank_count: AtomicU64::new(0),
})
}
fn refresh_connectors(&self) -> Result<Vec<ConnectorInfo>> {
let (connectors, encoders) = detect_display_topology(&self.display)?;
let infos = connectors
.iter()
.map(|connector| connector.info.clone())
.collect();
{
let mut connector_state = self.connectors.lock().map_err(|_| {
DriverError::Initialization("Intel connector state poisoned".into())
})?;
*connector_state = connectors;
}
{
let mut encoder_state = self
.encoders
.lock()
.map_err(|_| DriverError::Initialization("Intel encoder state poisoned".into()))?;
*encoder_state = encoders;
}
Ok(infos)
}
fn cached_connectors(&self) -> Vec<ConnectorInfo> {
match self.connectors.lock() {
Ok(connectors) => connectors
.iter()
.map(|connector| connector.info.clone())
.collect(),
Err(poisoned) => {
warn!("redox-drm: Intel connector state poisoned; using inner state");
poisoned
.into_inner()
.iter()
.map(|connector| connector.info.clone())
.collect()
}
}
}
fn connector_port(&self, connector_id: u32) -> Result<u8> {
let connectors = self
.connectors
.lock()
.map_err(|_| DriverError::Initialization("Intel connector state poisoned".into()))?;
let connector = connectors
.iter()
.find(|connector| connector.info.id == connector_id)
.ok_or_else(|| DriverError::NotFound(format!("unknown connector {connector_id}")))?;
Ok(connector.info.connector_type_id.saturating_sub(1) as u8)
}
fn process_irq(&self) -> Result<Option<(u32, u64)>> {
let previous = self.cached_connectors();
let current = self.refresh_connectors()?;
if connector_status_changed(&previous, &current) {
info!(
"redox-drm: Intel hotplug event detected on {}",
self.info.location
);
}
let ring_busy = self
.ring
.lock()
.map_err(|_| DriverError::Initialization("Intel ring state poisoned".into()))?
.has_activity()?;
if let Some(crtc_id) = self.active_crtc_id()? {
let count = self.vblank_count.fetch_add(1, Ordering::SeqCst) + 1;
debug!(
"redox-drm: Intel IRQ decoded as display event crtc={} ring_busy={}",
crtc_id, ring_busy
);
return Ok(Some((crtc_id, count)));
}
if ring_busy {
debug!("redox-drm: Intel IRQ signaled command stream activity without active CRTC");
}
Ok(None)
}
fn active_crtc_id(&self) -> Result<Option<u32>> {
let crtcs = self
.crtcs
.lock()
.map_err(|_| DriverError::Initialization("Intel CRTC state poisoned".into()))?;
if let Some(active) = crtcs.iter().find(|crtc| crtc.mode.is_some()) {
return Ok(Some(active.id));
}
Ok(self
.display
.pipes()?
.into_iter()
.find(|pipe| pipe.enabled)
.map(|pipe| u32::from(pipe.index) + 1))
}
fn ensure_gem_gpu_mapping(&self, handle: GemHandle) -> Result<u64> {
{
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
if let Some(gpu_addr) = gem.gpu_addr(handle)? {
return Ok(gpu_addr);
}
}
let (phys_addr, size) = {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
let object = gem.object(handle)?;
(object.phys_addr as u64, object.size)
};
let gpu_addr = {
let mut gtt = self
.gtt
.lock()
.map_err(|_| DriverError::Initialization("Intel GGTT state poisoned".into()))?;
let gpu_addr = gtt.alloc_range(size)?;
if let Err(error) = gtt.map_range(gpu_addr, phys_addr, size, 1 << 1) {
let _ = gtt.release_range(gpu_addr, size);
return Err(error);
}
gpu_addr
};
if let Err(error) = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?
.set_gpu_addr(handle, gpu_addr)
{
let mut gtt = self
.gtt
.lock()
.map_err(|_| DriverError::Initialization("Intel GGTT state poisoned".into()))?;
let _ = gtt.unmap_range(gpu_addr, size);
let _ = gtt.release_range(gpu_addr, size);
return Err(error);
}
Ok(gpu_addr)
}
fn read_mmio(&self, offset: usize) -> Result<u32> {
let end = offset
.checked_add(core::mem::size_of::<u32>())
.ok_or_else(|| {
DriverError::Mmio(format!("Intel MMIO offset overflow at {offset:#x}"))
})?;
if end > self.mmio.size() {
return Err(DriverError::Mmio(format!(
"Intel MMIO read outside BAR2 aperture: end={end:#x} size={:#x}",
self.mmio.size()
)));
}
Ok(self.mmio.read32(offset))
}
}
impl GpuDriver for IntelDriver {
fn driver_name(&self) -> &str {
"i915-redox"
}
fn driver_desc(&self) -> &str {
"Intel i915-class DRM/KMS backend for Redox"
}
fn driver_date(&self) -> &str {
"2026-04-12"
}
fn detect_connectors(&self) -> Vec<ConnectorInfo> {
match self.refresh_connectors() {
Ok(connectors) => connectors,
Err(error) => {
warn!("redox-drm: Intel connector refresh failed: {}", error);
self.cached_connectors()
}
}
}
fn get_modes(&self, connector_id: u32) -> Vec<ModeInfo> {
self.detect_connectors()
.into_iter()
.find(|connector| connector.id == connector_id)
.map(|connector| connector.modes)
.unwrap_or_default()
}
fn set_crtc(
&self,
crtc_id: u32,
fb_handle: u32,
connectors: &[u32],
mode: &ModeInfo,
) -> Result<()> {
if connectors.is_empty() {
return Err(DriverError::InvalidArgument(
"set_crtc requires at least one connector",
));
}
let fb_addr = self.ensure_gem_gpu_mapping(fb_handle)?;
let mut pipe = self.display.pipe_for_crtc(crtc_id)?;
pipe.port = Some(self.connector_port(connectors[0])?);
self.display.set_mode(&pipe, mode)?;
self.display.page_flip(&pipe, fb_addr)?;
let mut crtcs = self
.crtcs
.lock()
.map_err(|_| DriverError::Initialization("Intel CRTC state poisoned".into()))?;
let crtc = crtcs
.iter_mut()
.find(|crtc| crtc.id == crtc_id)
.ok_or_else(|| DriverError::NotFound(format!("unknown CRTC {crtc_id}")))?;
crtc.program(fb_handle, connectors, mode)
}
fn page_flip(&self, crtc_id: u32, fb_handle: u32, _flags: u32) -> Result<u64> {
let fb_addr = self.ensure_gem_gpu_mapping(fb_handle)?;
let pipe = self.display.pipe_for_crtc(crtc_id)?;
self.display.page_flip(&pipe, fb_addr)?;
let mut ring = self
.ring
.lock()
.map_err(|_| DriverError::Initialization("Intel ring state poisoned".into()))?;
ring.flush()?;
Ok(ring.last_seqno())
}
fn get_vblank(&self, crtc_id: u32) -> Result<u64> {
let crtcs = self
.crtcs
.lock()
.map_err(|_| DriverError::Initialization("Intel CRTC state poisoned".into()))?;
if !crtcs.iter().any(|crtc| crtc.id == crtc_id) {
return Err(DriverError::NotFound(format!("unknown CRTC {crtc_id}")));
}
Ok(self.vblank_count.load(Ordering::SeqCst))
}
fn gem_create(&self, size: u64) -> Result<GemHandle> {
let handle = {
let mut gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
gem.create(size)?
};
if let Err(error) = self.ensure_gem_gpu_mapping(handle) {
let _ = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?
.close(handle);
return Err(error);
}
Ok(handle)
}
fn gem_close(&self, handle: GemHandle) -> Result<()> {
let (gpu_addr, size) = {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
let object = gem.object(handle)?;
(object.gpu_addr, object.size)
};
if let Some(gpu_addr) = gpu_addr {
let mut gtt = self
.gtt
.lock()
.map_err(|_| DriverError::Initialization("Intel GGTT state poisoned".into()))?;
gtt.unmap_range(gpu_addr, size)?;
gtt.release_range(gpu_addr, size)?;
}
self.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?
.close(handle)
}
fn gem_mmap(&self, handle: GemHandle) -> Result<usize> {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
gem.mmap(handle)
}
fn gem_size(&self, handle: GemHandle) -> Result<u64> {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
Ok(gem.object(handle)?.size)
}
fn gem_export_dmafd(&self, handle: GemHandle) -> Result<i32> {
let mut gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
gem.export_dmafd(handle)
}
fn gem_import_dmafd(&self, fd: i32) -> Result<GemHandle> {
let handle = {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
gem.import_dmafd(fd)?
};
let _ = self.ensure_gem_gpu_mapping(handle)?;
Ok(handle)
}
fn get_edid(&self, connector_id: u32) -> Vec<u8> {
match self.connectors.lock() {
Ok(connectors) => connectors
.iter()
.find(|connector| connector.info.id == connector_id)
.map(|connector| connector.edid.clone())
.unwrap_or_default(),
Err(poisoned) => poisoned
.into_inner()
.iter()
.find(|connector| connector.info.id == connector_id)
.map(|connector| connector.edid.clone())
.unwrap_or_default(),
}
}
fn handle_irq(&self) -> Result<Option<(u32, u64)>> {
let irq_event = {
let mut irq_handle = self
.irq_handle
.lock()
.map_err(|_| DriverError::Initialization("Intel IRQ state poisoned".into()))?;
match irq_handle.as_mut() {
Some(handle) => handle
.try_wait()
.map_err(|e| DriverError::Io(format!("Intel IRQ poll failed: {e}")))?,
None => return Ok(None),
}
};
if irq_event.is_none() {
return Ok(None);
}
self.process_irq()
}
}
fn detect_display_topology(display: &IntelDisplay) -> Result<(Vec<Connector>, Vec<Encoder>)> {
let detected = display.detect_connectors()?;
let mut connectors = Vec::with_capacity(detected.len());
let mut encoders = Vec::with_capacity(detected.len());
for connector in detected {
let port = connector.connector_type_id.saturating_sub(1) as u8;
let edid = match connector.connector_type {
ConnectorType::DisplayPort | ConnectorType::EDP => display.read_edid(port),
_ => display.read_edid(port),
};
encoders.push(Encoder::new(
connector.encoder_id,
pipe_id_for_port(display, port),
));
connectors.push(Connector {
edid: if edid.is_empty() {
synthetic_edid()
} else {
edid
},
info: ConnectorInfo {
modes: display.modes_for_connector(&connector),
..connector
},
});
}
Ok((connectors, encoders))
}
fn build_crtcs(display: &IntelDisplay) -> Result<Vec<Crtc>> {
let mut crtcs: Vec<Crtc> = display
.pipes()?
.into_iter()
.map(|pipe| Crtc::new(u32::from(pipe.index) + 1))
.collect();
if crtcs.is_empty() {
crtcs.push(Crtc::new(1));
}
Ok(crtcs)
}
fn pipe_id_for_port(display: &IntelDisplay, port: u8) -> u32 {
display
.pipes()
.ok()
.and_then(|pipes| {
pipes
.into_iter()
.find(|pipe| pipe.port == Some(port))
.map(|pipe| u32::from(pipe.index) + 1)
})
.unwrap_or(1)
}
fn connector_status_changed(previous: &[ConnectorInfo], current: &[ConnectorInfo]) -> bool {
if previous.len() != current.len() {
return true;
}
previous.iter().zip(current.iter()).any(|(old, new)| {
old.id != new.id
|| old.connection != new.connection
|| old.connector_type != new.connector_type
})
}
fn enable_forcewake(mmio: &MmioRegion) -> Result<()> {
let end = FORCEWAKE
.checked_add(core::mem::size_of::<u32>())
.ok_or_else(|| DriverError::Mmio("Intel FORCEWAKE offset overflow".into()))?;
if end > mmio.size() {
return Err(DriverError::Mmio(format!(
"Intel FORCEWAKE register outside MMIO aperture: end={end:#x} size={:#x}",
mmio.size()
)));
}
mmio.write32(FORCEWAKE, 1);
let _ = mmio.read32(FORCEWAKE);
Ok(())
}
fn validate_intel_bars(
info: &PciDeviceInfo,
gtt_bar: &PciBarInfo,
mmio_bar: &PciBarInfo,
) -> Result<()> {
if !gtt_bar.is_memory() {
return Err(DriverError::Pci(format!(
"device {} GGTT BAR{} is not a memory BAR",
info.location, gtt_bar.index
)));
}
if !mmio_bar.is_memory() {
return Err(DriverError::Pci(format!(
"device {} MMIO BAR{} is not a memory BAR",
info.location, mmio_bar.index
)));
}
if gtt_bar.size < core::mem::size_of::<u64>() as u64 {
return Err(DriverError::Pci(format!(
"device {} GGTT BAR{} is too small ({:#x})",
info.location, gtt_bar.index, gtt_bar.size
)));
}
if gtt_bar.size % core::mem::size_of::<u64>() as u64 != 0 {
return Err(DriverError::Pci(format!(
"device {} GGTT BAR{} size {:#x} is not 8-byte aligned",
info.location, gtt_bar.index, gtt_bar.size
)));
}
let required_mmio_end = [
FORCEWAKE + core::mem::size_of::<u32>(),
PP_STATUS + core::mem::size_of::<u32>(),
GFX_FLSH_CNTL_REG + core::mem::size_of::<u32>(),
RENDER_RING_BASE + RING_TAIL_OFFSET + core::mem::size_of::<u32>(),
RENDER_RING_BASE + RING_HEAD_OFFSET + core::mem::size_of::<u32>(),
]
.into_iter()
.max()
.unwrap_or(0);
if mmio_bar.size < required_mmio_end as u64 {
return Err(DriverError::Pci(format!(
"device {} MMIO BAR{} is too small ({:#x}) for required register window ending at {:#x}",
info.location, mmio_bar.index, mmio_bar.size, required_mmio_end
)));
}
Ok(())
}
fn find_memory_bar(info: &PciDeviceInfo, index: usize, name: &str) -> Result<PciBarInfo> {
info.find_memory_bar(index)
.copied()
.ok_or_else(|| DriverError::Pci(format!("device {} has no {}", info.location, name)))
}
fn map_bar(device: &mut PciDevice, bar: &PciBarInfo, name: &str) -> Result<MmioRegion> {
device
.map_bar(bar.index, bar.addr, bar.size as usize)
.map_err(|e| DriverError::Mmio(format!("failed to map {name}: {e}")))
}
#[allow(dead_code)]
fn ddi_buf_ctl(port: u8) -> usize {
DDI_BUF_CTL_BASE + usize::from(port) * DDI_PORT_STRIDE
}
#[allow(dead_code)]
fn pipeconf(pipe: &DisplayPipe) -> usize {
PIPECONF_BASE + usize::from(pipe.index) * PIPE_STRIDE
}
@@ -0,0 +1,267 @@
use std::thread;
use std::time::Duration;
use log::{debug, info};
use redox_driver_sys::dma::DmaBuffer;
use redox_driver_sys::memory::MmioRegion;
use crate::driver::{DriverError, Result};
use super::gtt::IntelGtt;
const RING_BUFFER_BYTES: usize = 4096;
const RING_ALIGNMENT: usize = 4096;
const RING_WAIT_ATTEMPTS: usize = 2000;
const RING_WAIT_DELAY: Duration = Duration::from_micros(50);
const RBBASE: usize = 0x04;
const RBBASE_HI: usize = 0x08;
const RBTAIL: usize = 0x30;
const RBHEAD: usize = 0x34;
const RBSTART: usize = 0x38;
const RBCTL: usize = 0x3C;
const RING_CTL_ENABLE: u32 = 1 << 0;
const RING_CTL_SIZE_MASK: u32 = !0x0FFF;
const MI_NOOP: u32 = 0x0000_0000;
const MI_FLUSH_DW: u32 = 0x0200_0000;
#[derive(Clone, Copy, Debug)]
pub enum RingType {
Render,
Blitter,
VideoEnhance,
}
pub struct IntelRing {
mmio: MmioRegion,
base: usize,
head: u32,
tail: u32,
size: u32,
ring_type: RingType,
buffer: DmaBuffer,
gpu_addr: Option<u64>,
last_seqno: u64,
}
impl IntelRing {
pub fn create(mmio: MmioRegion, ring_type: RingType) -> Result<Self> {
let mut buffer = DmaBuffer::allocate(RING_BUFFER_BYTES, RING_ALIGNMENT)
.map_err(|e| DriverError::Buffer(format!("Intel ring allocation failed: {e}")))?;
zero_dma(&mut buffer);
let ring = Self {
mmio,
base: ring_base(ring_type),
head: 0,
tail: 0,
size: RING_BUFFER_BYTES as u32,
ring_type,
buffer,
gpu_addr: None,
last_seqno: 0,
};
ring.ensure_reg_access(RBCTL, core::mem::size_of::<u32>(), "ring control")?;
ring.write_reg(RBHEAD, 0)?;
ring.write_reg(RBTAIL, 0)?;
ring.write_reg(RBSTART, 0)?;
info!(
"redox-drm: Intel {:?} ring allocated ({} bytes)",
ring.ring_type, ring.size
);
Ok(ring)
}
pub fn bind_gtt(&mut self, gtt: &mut IntelGtt) -> Result<()> {
if self.gpu_addr.is_some() {
return Ok(());
}
let gpu_addr = gtt.alloc_range(self.size as u64)?;
if let Err(error) = gtt.map_range(
gpu_addr,
self.buffer.physical_address() as u64,
self.size as u64,
1 << 1,
) {
let _ = gtt.release_range(gpu_addr, self.size as u64);
return Err(error);
}
self.gpu_addr = Some(gpu_addr);
self.program_ring_registers(gpu_addr)?;
Ok(())
}
pub fn submit_batch(&mut self, buffer: &[u32]) -> Result<()> {
if buffer.is_empty() {
return Ok(());
}
if self.gpu_addr.is_none() {
return Err(DriverError::Initialization(
"Intel ring must be bound into GGTT before submission".into(),
));
}
self.wait_for_space(buffer.len())?;
for &dword in buffer {
self.write_dword(dword)?;
}
self.publish_tail()?;
self.last_seqno = self.last_seqno.saturating_add(1);
debug!(
"redox-drm: Intel {:?} ring submitted {} DWORDs seqno={}",
self.ring_type,
buffer.len(),
self.last_seqno
);
Ok(())
}
pub fn wait_for_space(&mut self, count: usize) -> Result<()> {
let required = (count * core::mem::size_of::<u32>()) as u32;
if required >= self.size {
return Err(DriverError::Buffer(format!(
"Intel ring submission too large: {required} bytes >= ring size {}",
self.size
)));
}
for _ in 0..RING_WAIT_ATTEMPTS {
self.sync_from_hw()?;
if required <= self.free_bytes() {
return Ok(());
}
thread::sleep(RING_WAIT_DELAY);
}
Err(DriverError::Buffer(format!(
"Intel {:?} ring did not free {} bytes in time",
self.ring_type, required
)))
}
pub fn flush(&mut self) -> Result<()> {
self.submit_batch(&[MI_FLUSH_DW, MI_NOOP])
}
pub fn has_activity(&mut self) -> Result<bool> {
self.sync_from_hw()?;
Ok(self.head != self.tail)
}
pub fn sync_from_hw(&mut self) -> Result<()> {
self.head = self.read_reg(RBHEAD)? & (self.size - 1);
self.tail = self.read_reg(RBTAIL)? & (self.size - 1);
Ok(())
}
pub fn last_seqno(&self) -> u64 {
self.last_seqno
}
fn program_ring_registers(&mut self, gpu_addr: u64) -> Result<()> {
self.write_reg(RBHEAD, 0)?;
self.write_reg(RBTAIL, 0)?;
self.write_reg(RBSTART, lower_32(gpu_addr))?;
self.write_reg(RBBASE, lower_32(gpu_addr))?;
self.write_reg(RBBASE_HI, upper_32(gpu_addr))?;
let mut ctl = self.read_reg(RBCTL)?;
ctl &= !RING_CTL_SIZE_MASK;
ctl |= (self.size - 0x1000) & RING_CTL_SIZE_MASK;
ctl |= RING_CTL_ENABLE;
self.write_reg(RBCTL, ctl)?;
Ok(())
}
fn free_bytes(&self) -> u32 {
let used = if self.tail >= self.head {
self.tail - self.head
} else {
self.size - (self.head - self.tail)
};
self.size.saturating_sub(used).saturating_sub(4)
}
fn write_dword(&mut self, value: u32) -> Result<()> {
let write_offset = self.tail as usize;
let width = core::mem::size_of::<u32>();
let end = write_offset
.checked_add(width)
.ok_or_else(|| DriverError::Buffer("Intel ring write offset overflow".into()))?;
if end > self.buffer.len() {
return Err(DriverError::Buffer(format!(
"Intel ring write out of bounds: end={end:#x} size={:#x}",
self.buffer.len()
)));
}
let ptr = unsafe { self.buffer.as_mut_ptr().add(write_offset) as *mut u32 };
unsafe { core::ptr::write_volatile(ptr, value) };
self.tail = (self.tail + width as u32) % self.size;
Ok(())
}
fn publish_tail(&self) -> Result<()> {
self.write_reg(RBTAIL, self.tail)
}
fn read_reg(&self, reg: usize) -> Result<u32> {
let offset = self
.base
.checked_add(reg)
.ok_or_else(|| DriverError::Mmio("Intel ring register offset overflow".into()))?;
self.ensure_reg_access(offset, core::mem::size_of::<u32>(), "ring read")?;
Ok(self.mmio.read32(offset))
}
fn write_reg(&self, reg: usize, value: u32) -> Result<()> {
let offset = self
.base
.checked_add(reg)
.ok_or_else(|| DriverError::Mmio("Intel ring register offset overflow".into()))?;
self.ensure_reg_access(offset, core::mem::size_of::<u32>(), "ring write")?;
self.mmio.write32(offset, value);
Ok(())
}
fn ensure_reg_access(&self, offset: usize, width: usize, op: &str) -> Result<()> {
let end = offset.checked_add(width).ok_or_else(|| {
DriverError::Mmio(format!("Intel {op} offset overflow at {offset:#x}"))
})?;
if end > self.mmio.size() {
return Err(DriverError::Mmio(format!(
"Intel {op} outside MMIO aperture: end={end:#x} size={:#x}",
self.mmio.size()
)));
}
Ok(())
}
}
fn ring_base(ring_type: RingType) -> usize {
match ring_type {
RingType::Render => 0x02000,
RingType::Blitter => 0x22000,
RingType::VideoEnhance => 0x1A000,
}
}
fn zero_dma(buffer: &mut DmaBuffer) {
unsafe { core::ptr::write_bytes(buffer.as_mut_ptr(), 0, buffer.len()) };
}
fn lower_32(value: u64) -> u32 {
value as u32
}
fn upper_32(value: u64) -> u32 {
(value >> 32) as u32
}
@@ -0,0 +1,43 @@
pub mod amd;
pub mod intel;
use std::collections::HashMap;
use std::sync::Arc;
use redox_driver_sys::pci::{PciDevice, PciDeviceInfo, PCI_VENDOR_ID_AMD, PCI_VENDOR_ID_INTEL};
use crate::driver::{DriverError, GpuDriver, Result};
pub struct DriverRegistry;
impl DriverRegistry {
pub fn probe(
info: PciDeviceInfo,
firmware: HashMap<String, Vec<u8>>,
) -> Result<Arc<dyn GpuDriver>> {
let full = if info.bars.is_empty() {
let mut device = PciDevice::open_location(&info.location)
.map_err(|e| DriverError::Pci(format!("open PCI device failed: {e}")))?;
device
.full_info()
.map_err(|e| DriverError::Pci(format!("read PCI device info failed: {e}")))?
} else {
info
};
match full.vendor_id {
PCI_VENDOR_ID_AMD => {
let driver = amd::AmdDriver::new(full, firmware)?;
Ok(Arc::new(driver))
}
PCI_VENDOR_ID_INTEL => {
let driver = intel::IntelDriver::new(full, firmware)?;
Ok(Arc::new(driver))
}
_ => Err(DriverError::Pci(format!(
"unsupported GPU vendor {:#06x} at {}",
full.vendor_id, full.location
))),
}
}
}
@@ -0,0 +1,146 @@
use std::collections::BTreeMap;
use log::{debug, warn};
use redox_driver_sys::dma::DmaBuffer;
use crate::dmabuf::DmabufManager;
use crate::driver::{DriverError, Result};
pub type GemHandle = u32;
#[derive(Clone, Debug)]
pub struct GemObject {
#[allow(dead_code)]
pub handle: GemHandle,
#[allow(dead_code)]
pub size: u64,
pub phys_addr: usize,
pub virt_addr: usize,
pub gpu_addr: Option<u64>,
}
struct GemAllocation {
object: GemObject,
#[allow(dead_code)]
dma: DmaBuffer,
}
pub struct GemManager {
next_handle: GemHandle,
objects: BTreeMap<GemHandle, GemAllocation>,
dmabuf: DmabufManager,
}
impl GemManager {
pub fn new() -> Self {
Self {
next_handle: 1,
objects: BTreeMap::new(),
dmabuf: DmabufManager::new(),
}
}
pub fn create(&mut self, size: u64) -> Result<GemHandle> {
if size == 0 {
return Err(DriverError::InvalidArgument(
"GEM create size must be non-zero",
));
}
let handle = self.next_handle;
self.next_handle = self.next_handle.saturating_add(1);
let dma = DmaBuffer::allocate(size as usize, 4096)
.map_err(|e| DriverError::Buffer(format!("DMA allocation failed: {e}")))?;
if !dma.is_physically_contiguous() {
warn!(
"redox-drm: GEM handle {} allocated without physically contiguous backing",
handle
);
}
let object = GemObject {
handle,
size,
phys_addr: dma.physical_address(),
virt_addr: dma.as_ptr() as usize,
gpu_addr: None,
};
debug!(
"redox-drm: created GEM handle {} size={} phys={:#x} virt={:#x}",
handle, size, object.phys_addr, object.virt_addr
);
self.objects.insert(handle, GemAllocation { object, dma });
Ok(handle)
}
pub fn close(&mut self, handle: GemHandle) -> Result<()> {
if self.objects.remove(&handle).is_none() {
return Err(DriverError::NotFound(format!(
"unknown GEM handle {handle}"
)));
}
Ok(())
}
pub fn mmap(&self, handle: GemHandle) -> Result<usize> {
let allocation = self
.objects
.get(&handle)
.ok_or_else(|| DriverError::NotFound(format!("unknown GEM handle {handle}")))?;
Ok(allocation.object.virt_addr)
}
#[allow(dead_code)]
pub fn export_dmafd(&mut self, handle: GemHandle) -> Result<i32> {
let allocation = self
.objects
.get(&handle)
.ok_or_else(|| DriverError::NotFound(format!("unknown GEM handle {handle}")))?;
self.dmabuf
.export_with_info(handle, allocation.object.phys_addr, allocation.object.size)
}
#[allow(dead_code)]
pub fn import_dmafd(&self, fd: i32) -> Result<GemHandle> {
let handle = self.dmabuf.import(fd)?;
let _ = self.object(handle)?;
Ok(handle)
}
pub fn object(&self, handle: GemHandle) -> Result<&GemObject> {
self.objects
.get(&handle)
.map(|allocation| &allocation.object)
.ok_or_else(|| DriverError::NotFound(format!("unknown GEM handle {handle}")))
}
pub fn phys_addr(&self, handle: GemHandle) -> Result<usize> {
Ok(self.object(handle)?.phys_addr)
}
pub fn set_gpu_addr(&mut self, handle: GemHandle, gpu_addr: u64) -> Result<()> {
let allocation = self
.objects
.get_mut(&handle)
.ok_or_else(|| DriverError::NotFound(format!("unknown GEM handle {handle}")))?;
allocation.object.gpu_addr = Some(gpu_addr);
Ok(())
}
pub fn gpu_addr(&self, handle: GemHandle) -> Result<Option<u64>> {
Ok(self.object(handle)?.gpu_addr)
}
#[allow(dead_code)]
pub fn object_mut_ptr(&mut self, handle: GemHandle) -> Result<usize> {
let allocation = self
.objects
.get_mut(&handle)
.ok_or_else(|| DriverError::NotFound(format!("unknown GEM handle {handle}")))?;
Ok(allocation.dma.as_mut_ptr() as usize)
}
}
@@ -0,0 +1,46 @@
use crate::kms::{ConnectorInfo, ConnectorStatus, ConnectorType, ModeInfo};
#[derive(Clone, Debug)]
pub struct Connector {
pub info: ConnectorInfo,
#[allow(dead_code)]
pub edid: Vec<u8>,
}
impl Connector {
pub fn synthetic_displayport(id: u32, encoder_id: u32) -> Self {
let edid = synthetic_edid();
let modes = ModeInfo::from_edid(&edid);
Self {
info: ConnectorInfo {
id,
connector_type: ConnectorType::DisplayPort,
connector_type_id: 1,
connection: ConnectorStatus::Connected,
mm_width: 600,
mm_height: 340,
encoder_id,
modes: if modes.is_empty() {
vec![ModeInfo::default_1080p()]
} else {
modes
},
},
edid,
}
}
}
pub fn synthetic_edid() -> Vec<u8> {
vec![
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0xfa, 0x12, 0x01, 0x00, 0x00,
0x00, 0x01, 0x1e, 0x01, 0x04, 0xa5, 0x3c, 0x22, 0x78, 0x3a, 0xee, 0x95, 0xa3, 0x54, 0x4c,
0x99, 0x26, 0x0f, 0x50, 0x54, 0xbf, 0xef, 0x80, 0x71, 0x4f, 0x81, 0x80, 0x81, 0x40, 0x81,
0xc0, 0x95, 0x00, 0xa9, 0xc0, 0xb3, 0x00, 0xd1, 0xc0, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x55, 0x50, 0x21, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00,
0xfd, 0x00, 0x32, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x20,
0x44, 0x50, 0x0a, 0x20, 0x20, 0x00, 0xa7,
]
}
@@ -0,0 +1,43 @@
use crate::driver::{DriverError, Result};
use crate::kms::ModeInfo;
#[derive(Clone, Debug)]
pub struct Crtc {
pub id: u32,
pub current_fb: u32,
pub connectors: Vec<u32>,
pub mode: Option<ModeInfo>,
#[allow(dead_code)]
pub x: u32,
#[allow(dead_code)]
pub y: u32,
#[allow(dead_code)]
pub gamma_size: u32,
}
impl Crtc {
pub fn new(id: u32) -> Self {
Self {
id,
current_fb: 0,
connectors: Vec::new(),
mode: None,
x: 0,
y: 0,
gamma_size: 256,
}
}
pub fn program(&mut self, fb_handle: u32, connectors: &[u32], mode: &ModeInfo) -> Result<()> {
if connectors.is_empty() {
return Err(DriverError::InvalidArgument(
"set_crtc requires at least one connector",
));
}
self.current_fb = fb_handle;
self.connectors = connectors.to_vec();
self.mode = Some(mode.clone());
Ok(())
}
}
@@ -0,0 +1,21 @@
use crate::kms::EncoderInfo;
#[derive(Clone, Debug)]
pub struct Encoder {
#[allow(dead_code)]
pub info: EncoderInfo,
}
impl Encoder {
pub fn new(id: u32, crtc_id: u32) -> Self {
Self {
info: EncoderInfo {
id,
encoder_type: 0,
crtc_id,
possible_crtcs: 1,
possible_clones: 0,
},
}
}
}
@@ -0,0 +1,182 @@
pub mod connector;
pub mod crtc;
pub mod encoder;
pub mod plane;
#[derive(Clone, Debug)]
pub struct ModeInfo {
pub clock: u32,
pub hdisplay: u16,
pub hsync_start: u16,
pub hsync_end: u16,
pub htotal: u16,
pub hskew: u16,
pub vdisplay: u16,
pub vsync_start: u16,
pub vsync_end: u16,
pub vtotal: u16,
pub vscan: u16,
pub vrefresh: u32,
pub flags: u32,
pub type_: u32,
pub name: String,
}
impl ModeInfo {
pub fn default_1080p() -> Self {
Self {
clock: 148_500,
hdisplay: 1920,
hsync_start: 2008,
hsync_end: 2052,
htotal: 2200,
hskew: 0,
vdisplay: 1080,
vsync_start: 1084,
vsync_end: 1089,
vtotal: 1125,
vscan: 0,
vrefresh: 60,
flags: 0,
type_: 0,
name: "1920x1080@60".to_string(),
}
}
pub fn from_edid(edid: &[u8]) -> Vec<Self> {
const EDID_HEADER: [u8; 8] = [0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00];
if edid.len() < 128 || edid.get(0..8) != Some(&EDID_HEADER) {
return Vec::new();
}
let mut modes = Vec::new();
for descriptor in edid[54..126].chunks_exact(18) {
let pixel_clock = u16::from_le_bytes([descriptor[0], descriptor[1]]) as u32;
if pixel_clock == 0 {
continue;
}
let hdisplay = descriptor[2] as u16 | (((descriptor[4] >> 4) as u16) << 8);
let hblank = descriptor[3] as u16 | (((descriptor[4] & 0x0f) as u16) << 8);
let vdisplay = descriptor[5] as u16 | (((descriptor[7] >> 4) as u16) << 8);
let vblank = descriptor[6] as u16 | (((descriptor[7] & 0x0f) as u16) << 8);
let hsync_offset =
descriptor[8] as u16 | ((((descriptor[11] >> 6) & 0x03) as u16) << 8);
let hsync_width = descriptor[9] as u16 | ((((descriptor[11] >> 4) & 0x03) as u16) << 8);
let vsync_offset =
((descriptor[10] >> 4) as u16) | ((((descriptor[11] >> 2) & 0x03) as u16) << 4);
let vsync_width =
(descriptor[10] & 0x0f) as u16 | (((descriptor[11] & 0x03) as u16) << 4);
if hdisplay == 0 || vdisplay == 0 {
continue;
}
let htotal = hdisplay.saturating_add(hblank);
let vtotal = vdisplay.saturating_add(vblank);
let clock = pixel_clock.saturating_mul(10);
let vrefresh = if htotal != 0 && vtotal != 0 {
clock.saturating_mul(1000) / (htotal as u32).saturating_mul(vtotal as u32)
} else {
0
};
modes.push(Self {
clock,
hdisplay,
hsync_start: hdisplay.saturating_add(hsync_offset),
hsync_end: hdisplay
.saturating_add(hsync_offset)
.saturating_add(hsync_width),
htotal,
hskew: 0,
vdisplay,
vsync_start: vdisplay.saturating_add(vsync_offset),
vsync_end: vdisplay
.saturating_add(vsync_offset)
.saturating_add(vsync_width),
vtotal,
vscan: 0,
vrefresh,
flags: if (descriptor[17] & 0x80) != 0 { 1 } else { 0 },
type_: 0,
name: format!("{}x{}@{}", hdisplay, vdisplay, vrefresh),
});
}
modes
}
}
#[derive(Clone, Debug)]
pub struct ConnectorInfo {
pub id: u32,
pub connector_type: ConnectorType,
#[allow(dead_code)]
pub connector_type_id: u32,
pub connection: ConnectorStatus,
pub mm_width: u32,
pub mm_height: u32,
pub encoder_id: u32,
pub modes: Vec<ModeInfo>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ConnectorType {
Unknown,
VGA,
DVII,
DVID,
DVIA,
#[allow(dead_code)]
Composite,
#[allow(dead_code)]
SVideo,
#[allow(dead_code)]
LVDS,
#[allow(dead_code)]
Component,
#[allow(dead_code)]
NinePinDIN,
DisplayPort,
HDMIA,
#[allow(dead_code)]
HDMIB,
#[allow(dead_code)]
TV,
EDP,
Virtual,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ConnectorStatus {
Connected,
Disconnected,
Unknown,
}
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub struct CrtcInfo {
pub id: u32,
pub fb_id: u32,
pub x: u32,
pub y: u32,
pub gamma_size: u32,
pub mode: Option<ModeInfo>,
}
#[derive(Clone, Debug)]
pub struct EncoderInfo {
#[allow(dead_code)]
pub id: u32,
#[allow(dead_code)]
pub encoder_type: u32,
#[allow(dead_code)]
pub crtc_id: u32,
#[allow(dead_code)]
pub possible_crtcs: u32,
#[allow(dead_code)]
pub possible_clones: u32,
}
@@ -0,0 +1,42 @@
use crate::driver::{DriverError, Result};
#[allow(dead_code)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PlaneKind {
Primary,
Cursor,
}
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub struct Plane {
pub id: u32,
pub kind: PlaneKind,
pub fb_handle: Option<u32>,
pub crtc_id: Option<u32>,
}
impl Plane {
#[allow(dead_code)]
pub fn new(id: u32, kind: PlaneKind) -> Self {
Self {
id,
kind,
fb_handle: None,
crtc_id: None,
}
}
#[allow(dead_code)]
pub fn attach(&mut self, crtc_id: u32, fb_handle: u32) -> Result<()> {
if fb_handle == 0 {
return Err(DriverError::InvalidArgument(
"plane attach requires a framebuffer handle",
));
}
self.crtc_id = Some(crtc_id);
self.fb_handle = Some(fb_handle);
Ok(())
}
}
@@ -0,0 +1,312 @@
#![allow(dead_code)]
mod dmabuf;
mod driver;
mod drivers;
mod gem;
mod kms;
mod scheme;
use std::collections::HashMap;
use std::env;
use std::fs::File;
use std::io::Read;
use std::process;
use std::sync::mpsc;
use std::sync::{Arc, Mutex};
use log::{error, info, LevelFilter, Metadata, Record};
use redox_driver_sys::pci::{
enumerate_pci_class, PciDevice, PciDeviceInfo, PciLocation, PCI_CLASS_DISPLAY,
PCI_VENDOR_ID_AMD, PCI_VENDOR_ID_INTEL,
};
use redox_scheme::{SignalBehavior, Socket};
use crate::driver::{DriverError, GpuDriver, Result};
use crate::drivers::DriverRegistry;
use crate::scheme::DrmScheme;
struct StderrLogger {
level: LevelFilter,
}
impl log::Log for StderrLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= self.level
}
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
eprintln!("[{}] {}", record.level(), record.args());
}
}
fn flush(&self) {}
}
fn init_logging(level: LevelFilter) {
let logger = Box::leak(Box::new(StderrLogger { level }));
if log::set_logger(logger).is_err() {
return;
}
log::set_max_level(level);
}
fn run() -> Result<()> {
let info = select_gpu_from_args()?;
verify_supported_gpu(&info)?;
let firmware = FirmwareCache::load_for_device(&info)?;
let driver = DriverRegistry::probe(info.clone(), firmware.into_blobs())?;
info!(
"redox-drm: initialized driver {} ({}) for {}",
driver.driver_name(),
driver.driver_desc(),
info.location
);
let socket = Socket::create("drm")
.map_err(|e| DriverError::Initialization(format!("failed to register drm scheme: {e}")))?;
info!("redox-drm: registered scheme:drm");
let (vblank_tx, vblank_rx) = mpsc::sync_channel::<(u32, u64)>(8);
let irq_driver: Arc<dyn GpuDriver> = driver.clone();
std::thread::spawn(move || loop {
match irq_driver.handle_irq() {
Ok(Some((crtc_id, count))) => {
let _ = vblank_tx.try_send((crtc_id, count));
}
Ok(None) => {}
Err(e) => {
error!("redox-drm: IRQ handler error: {}", e);
}
}
std::thread::sleep(std::time::Duration::from_millis(16));
});
let drm_scheme = Arc::new(Mutex::new(DrmScheme::new(driver)));
let vblank_scheme = drm_scheme.clone();
std::thread::spawn(move || loop {
if let Ok((crtc_id, vblank_count)) = vblank_rx.recv() {
if let Ok(mut scheme) = vblank_scheme.lock() {
scheme.retire_vblank(crtc_id, vblank_count);
}
}
});
loop {
let request = match socket.next_request(SignalBehavior::Restart) {
Ok(Some(request)) => request,
Ok(None) => {
info!("redox-drm: scheme unmounted, exiting");
break;
}
Err(e) => {
error!("redox-drm: failed to receive scheme request: {}", e);
continue;
}
};
let response = {
let mut scheme = match drm_scheme.lock() {
Ok(scheme) => scheme,
Err(_) => {
error!("redox-drm: DRM scheme state poisoned");
continue;
}
};
request.handle_scheme_block_mut(&mut *scheme)
};
let response = match response {
Ok(response) => response,
Err(_request) => {
error!("redox-drm: failed to handle request");
continue;
}
};
if let Err(e) = socket.write_response(response, SignalBehavior::Restart) {
error!("redox-drm: failed to write scheme response: {}", e);
}
}
Ok(())
}
fn select_gpu_from_args() -> Result<PciDeviceInfo> {
let mut args = env::args().skip(1);
let parsed = match (args.next(), args.next(), args.next()) {
(Some(bus), Some(device), Some(function)) => {
Some(parse_location(&bus, &device, &function)?)
}
_ => None,
};
if let Some(location) = parsed {
let mut pci = PciDevice::open_location(&location).map_err(|e| {
DriverError::Pci(format!("failed to open PCI device {}: {e}", location))
})?;
return pci.full_info().map_err(|e| {
DriverError::Pci(format!("failed to read PCI info for {}: {e}", location))
});
}
let devices = enumerate_pci_class(PCI_CLASS_DISPLAY)
.map_err(|e| DriverError::Pci(format!("PCI scan failed: {e}")))?;
let first = devices
.into_iter()
.find(|d| d.vendor_id == PCI_VENDOR_ID_AMD || d.vendor_id == PCI_VENDOR_ID_INTEL)
.ok_or_else(|| {
DriverError::NotFound("no AMD or Intel GPU found via scheme:pci".to_string())
})?;
let mut pci = PciDevice::open_location(&first.location)
.map_err(|e| DriverError::Pci(format!("failed to open GPU {}: {e}", first.location)))?;
pci.full_info()
.map_err(|e| DriverError::Pci(format!("failed to read GPU {}: {e}", first.location)))
}
fn parse_location(bus: &str, device: &str, function: &str) -> Result<PciLocation> {
let bus = parse_u8(bus)?;
let device = parse_u8(device)?;
let function = parse_u8(function)?;
Ok(PciLocation {
segment: 0,
bus,
device,
function,
})
}
fn parse_u8(value: &str) -> Result<u8> {
let trimmed = value.trim_start_matches("0x");
u8::from_str_radix(trimmed, 16)
.or_else(|_| trimmed.parse::<u8>())
.map_err(|_| DriverError::InvalidArgument("invalid PCI coordinate"))
}
fn verify_supported_gpu(info: &PciDeviceInfo) -> Result<()> {
if info.class_code != PCI_CLASS_DISPLAY {
return Err(DriverError::Pci(format!(
"device {} is class {:#04x}, expected display class {:#04x}",
info.location, info.class_code, PCI_CLASS_DISPLAY
)));
}
if info.vendor_id != PCI_VENDOR_ID_AMD && info.vendor_id != PCI_VENDOR_ID_INTEL {
return Err(DriverError::Pci(format!(
"device {} is vendor {:#06x}, expected AMD {:#06x} or Intel {:#06x}",
info.location, info.vendor_id, PCI_VENDOR_ID_AMD, PCI_VENDOR_ID_INTEL
)));
}
Ok(())
}
struct FirmwareCache {
blobs: HashMap<String, Vec<u8>>,
}
impl FirmwareCache {
fn load_for_device(info: &PciDeviceInfo) -> Result<Self> {
if info.vendor_id != PCI_VENDOR_ID_AMD {
info!(
"redox-drm: skipping firmware load for Intel GPU {}",
info.location
);
return Ok(Self {
blobs: HashMap::new(),
});
}
let firmware_keys: &[&str] = if info.vendor_id == PCI_VENDOR_ID_AMD {
&[
"amdgpu/psp_13_0_0_sos",
"amdgpu/psp_13_0_0_ta",
"amdgpu/gc_11_0_0_pfp",
"amdgpu/gc_11_0_0_me",
"amdgpu/gc_11_0_0_ce",
"amdgpu/gc_11_0_0_rlc",
"amdgpu/gc_11_0_0_mec",
"amdgpu/gc_11_0_0_mec2",
"amdgpu/dcn_3_1_dmcub",
"amdgpu/dmcub_dcn20.bin",
"amdgpu/dmcub_dcn31.bin",
"amdgpu/sdma_5_0",
"amdgpu/sdma_5_2",
"amdgpu/vcn_3_0_0",
"amdgpu/vcn_3_1_0",
]
} else {
&[]
};
let mut blobs = HashMap::new();
let mut loaded_any = false;
for &key in firmware_keys {
let path = format!("/scheme/firmware/{}", key);
match File::open(&path) {
Ok(mut file) => {
let metadata = file.metadata();
let estimated_size = metadata.map(|m| m.len() as usize).unwrap_or(1024 * 1024);
let mut buf = Vec::with_capacity(estimated_size);
match file.read_to_end(&mut buf) {
Ok(bytes_read) => {
info!("redox-drm: loaded firmware {} ({} bytes)", key, bytes_read);
loaded_any = true;
blobs.insert(key.to_string(), buf);
}
Err(e) => {
info!("redox-drm: failed to read firmware {}: {}", key, e);
}
}
}
Err(e) => {
info!("redox-drm: firmware {} not available: {}", key, e);
}
}
}
if !loaded_any && info.vendor_id == PCI_VENDOR_ID_AMD {
return Err(DriverError::NotFound(
"no AMD firmware blobs available from scheme:firmware".to_string(),
));
}
info!(
"redox-drm: firmware cache populated with {} blob(s)",
blobs.len()
);
Ok(Self { blobs })
}
#[allow(dead_code)]
fn get(&self, key: &str) -> Option<&[u8]> {
self.blobs.get(key).map(|v| v.as_slice())
}
fn into_blobs(self) -> HashMap<String, Vec<u8>> {
self.blobs
}
}
fn main() {
let log_level = match env::var("REDOX_DRM_LOG").as_deref() {
Ok("trace") => LevelFilter::Trace,
Ok("debug") => LevelFilter::Debug,
Ok("warn") => LevelFilter::Warn,
Ok("error") => LevelFilter::Error,
_ => LevelFilter::Info,
};
init_logging(log_level);
if let Err(error) = run() {
error!("redox-drm: fatal error: {}", error);
process::exit(1);
}
}
@@ -0,0 +1,975 @@
use std::collections::{BTreeMap, HashSet};
use std::mem::size_of;
use std::sync::Arc;
use log::{debug, warn};
use redox_scheme::SchemeBlockMut;
use syscall04::data::Stat;
use syscall04::error::{Error, Result, EBADF, EBUSY, EINVAL, ENOENT, EOPNOTSUPP};
use syscall04::flag::{EventFlags, MapFlags, MunmapFlags, MODE_FILE};
use crate::driver::GpuDriver;
use crate::gem::GemHandle;
use crate::kms::ModeInfo;
#[derive(Clone, Debug)]
struct FbInfo {
gem_handle: GemHandle,
width: u32,
height: u32,
pitch: u32,
bpp: u32,
}
// ---- DRM ioctl request codes ----
const DRM_IOCTL_BASE: usize = 0x00A0;
const DRM_IOCTL_MODE_GETRESOURCES: usize = DRM_IOCTL_BASE;
const DRM_IOCTL_MODE_GETCONNECTOR: usize = DRM_IOCTL_BASE + 7;
const DRM_IOCTL_MODE_GETMODES: usize = DRM_IOCTL_BASE + 8;
const DRM_IOCTL_MODE_SETCRTC: usize = DRM_IOCTL_BASE + 2;
const DRM_IOCTL_MODE_GETCRTC: usize = DRM_IOCTL_BASE + 3;
const DRM_IOCTL_MODE_GETENCODER: usize = DRM_IOCTL_BASE + 6;
const DRM_IOCTL_MODE_PAGE_FLIP: usize = DRM_IOCTL_BASE + 16;
const DRM_IOCTL_MODE_CREATE_DUMB: usize = DRM_IOCTL_BASE + 18;
const DRM_IOCTL_MODE_MAP_DUMB: usize = DRM_IOCTL_BASE + 19;
const DRM_IOCTL_MODE_DESTROY_DUMB: usize = DRM_IOCTL_BASE + 20;
const DRM_IOCTL_MODE_ADDFB: usize = DRM_IOCTL_BASE + 21;
const DRM_IOCTL_MODE_RMFB: usize = DRM_IOCTL_BASE + 22;
const DRM_IOCTL_GET_CAP: usize = DRM_IOCTL_BASE + 23;
const DRM_IOCTL_SET_CLIENT_CAP: usize = DRM_IOCTL_BASE + 24;
const DRM_IOCTL_VERSION: usize = DRM_IOCTL_BASE + 25;
// ---- Wire types for DRM ioctls ----
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmResourcesWire {
connector_count: u32,
crtc_count: u32,
encoder_count: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmConnectorWire {
connector_id: u32,
connection: u32,
connector_type: u32,
mm_width: u32,
mm_height: u32,
encoder_id: u32,
mode_count: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmModeWire {
clock: u32,
hdisplay: u16,
hsync_start: u16,
hsync_end: u16,
htotal: u16,
hskew: u16,
vdisplay: u16,
vsync_start: u16,
vsync_end: u16,
vtotal: u16,
vscan: u16,
vrefresh: u32,
flags: u32,
type_: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmSetCrtcWire {
crtc_id: u32,
fb_handle: u32,
connector_count: u32,
connectors: [u32; 8],
mode: DrmModeWire,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmPageFlipWire {
crtc_id: u32,
fb_handle: u32,
flags: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmCreateDumbWire {
width: u32,
height: u32,
bpp: u32,
flags: u32,
pitch: u32,
size: u64,
handle: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmMapDumbWire {
handle: u32,
offset: u64,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmDestroyDumbWire {
handle: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmGetEncoderWire {
encoder_id: u32,
encoder_type: u32,
crtc_id: u32,
possible_crtcs: u32,
possible_clones: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmAddFbWire {
width: u32,
height: u32,
pitch: u32,
bpp: u32,
depth: u32,
handle: u32,
fb_id: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmRmFbWire {
fb_id: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmGetCrtcWire {
crtc_id: u32,
fb_id: u32,
x: u32,
y: u32,
mode_valid: u32,
mode: DrmModeWire,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmVersionWire {
major: i32,
minor: i32,
patch: i32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmGetCapWire {
capability: u64,
value: u64,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmSetClientCapWire {
capability: u64,
value: u64,
}
// ---- Internal handle types ----
#[derive(Clone, Debug)]
enum NodeKind {
Card,
Connector(u32),
}
struct Handle {
node: NodeKind,
response: Vec<u8>,
mapped_gem: Option<GemHandle>,
owned_fbs: Vec<u32>,
owned_gems: Vec<GemHandle>,
}
pub struct DrmScheme {
driver: Arc<dyn GpuDriver>,
next_id: usize,
next_fb_id: u32,
handles: BTreeMap<usize, Handle>,
active_crtc_fb: BTreeMap<u32, u32>,
active_crtc_mode: BTreeMap<u32, ModeInfo>,
pending_flip_fb: BTreeMap<u32, (u64, u32)>,
fb_registry: BTreeMap<u32, FbInfo>,
}
impl DrmScheme {
pub fn new(driver: Arc<dyn GpuDriver>) -> Self {
Self {
driver,
next_id: 0,
next_fb_id: 1,
handles: BTreeMap::new(),
active_crtc_fb: BTreeMap::new(),
active_crtc_mode: BTreeMap::new(),
pending_flip_fb: BTreeMap::new(),
fb_registry: BTreeMap::new(),
}
}
#[allow(dead_code)]
pub fn on_close(&mut self, id: usize) {
self.handles.remove(&id);
}
fn is_fb_active(&self, fb_id: u32) -> bool {
self.active_crtc_fb.values().any(|&id| id == fb_id)
|| self.pending_flip_fb.values().any(|&(_, id)| id == fb_id)
}
pub fn retire_vblank(&mut self, crtc_id: u32, vblank_count: u64) {
if let Some((expected, fb_id)) = self.pending_flip_fb.get(&crtc_id).copied() {
if expected <= vblank_count {
self.pending_flip_fb.remove(&crtc_id);
self.try_reap_fb(fb_id);
}
}
}
fn try_reap_fb(&mut self, fb_id: u32) {
let gem_handle = match self.fb_registry.get(&fb_id) {
Some(info) => info.gem_handle,
None => return,
};
let still_owned = self.handles.values().any(|h| h.owned_fbs.contains(&fb_id));
if still_owned {
return;
}
self.fb_registry.remove(&fb_id);
let still_referenced = self
.fb_registry
.values()
.any(|i| i.gem_handle == gem_handle);
let gem_owned = self
.handles
.values()
.any(|h| h.owned_gems.contains(&gem_handle));
if !still_referenced && !gem_owned {
if let Err(e) = self.driver.gem_close(gem_handle) {
warn!(
"redox-drm: try_reap_fb gem_close({}) failed: {}",
gem_handle, e
);
}
}
}
// ---- Encode helpers ----
fn encode_resources(&self) -> Vec<u8> {
let connectors = self.driver.detect_connectors();
let payload = DrmResourcesWire {
connector_count: connectors.len() as u32,
crtc_count: 1,
encoder_count: connectors.len() as u32,
};
bytes_of(&payload)
}
fn encode_connector(&self, connector_id: u32) -> Result<Vec<u8>> {
let connector = self
.driver
.detect_connectors()
.into_iter()
.find(|c| c.id == connector_id)
.ok_or_else(|| Error::new(ENOENT))?;
let header = DrmConnectorWire {
connector_id: connector.id,
connection: match connector.connection {
crate::kms::ConnectorStatus::Connected => 1,
crate::kms::ConnectorStatus::Disconnected => 2,
crate::kms::ConnectorStatus::Unknown => 0,
},
connector_type: connector_type_to_u32(connector.connector_type),
mm_width: connector.mm_width,
mm_height: connector.mm_height,
encoder_id: connector.encoder_id,
mode_count: connector.modes.len() as u32,
};
let mut out = bytes_of(&header);
for mode in &connector.modes {
out.extend_from_slice(&bytes_of(&mode_to_wire(mode)));
out.extend_from_slice(mode.name.as_bytes());
out.push(0);
}
Ok(out)
}
// ---- ioctl dispatch ----
fn handle_ioctl(&mut self, id: usize, request: usize, payload: &[u8]) -> Result<usize> {
let response = match request {
DRM_IOCTL_MODE_GETRESOURCES => self.encode_resources(),
DRM_IOCTL_MODE_GETCONNECTOR => {
let connector_id = if payload.len() >= size_of::<u32>() {
read_u32(payload, 0)?
} else {
match self.handles.get(&id).map(|h| &h.node) {
Some(NodeKind::Connector(cid)) => *cid,
_ => return Err(Error::new(EINVAL)),
}
};
self.encode_connector(connector_id)?
}
DRM_IOCTL_MODE_GETMODES => {
let connector_id = read_u32(payload, 0)?;
let modes = self.driver.get_modes(connector_id);
encode_modes(&modes)
}
DRM_IOCTL_MODE_SETCRTC => {
let req = decode_wire::<DrmSetCrtcWire>(payload)?;
if req.fb_handle == 0 && req.connector_count == 0 {
let completed_flip = self.pending_flip_fb.remove(&req.crtc_id);
let prev_fb_id = self.active_crtc_fb.remove(&req.crtc_id);
self.active_crtc_mode.remove(&req.crtc_id);
if let Some((_, fb_id)) = completed_flip {
self.try_reap_fb(fb_id);
}
if let Some(fb_id) = prev_fb_id {
self.try_reap_fb(fb_id);
}
return Ok(1);
}
let count = req.connector_count as usize;
if count > req.connectors.len() {
return Err(Error::new(EINVAL));
}
let conns = req.connectors[..count].to_vec();
let fb_info = self.fb_registry.get(&req.fb_handle).ok_or_else(|| {
warn!("redox-drm: SETCRTC with unknown fb_id {}", req.fb_handle);
Error::new(ENOENT)
})?;
let mode = wire_to_mode(&req.mode);
let fb_pitch = fb_info.pitch as u64;
let required_fb_lines = mode.vdisplay as u64;
let fb_height = fb_info.height as u64;
let fb_width = fb_info.width as u64;
let mode_width = mode.hdisplay as u64;
if fb_pitch.checked_mul(required_fb_lines).is_none() {
warn!("redox-drm: SETCRTC FB pitch * mode_height overflows");
return Err(Error::new(EINVAL));
}
if fb_pitch == 0 || fb_height < required_fb_lines || fb_width < mode_width {
warn!(
"redox-drm: SETCRTC FB {}x{} pitch={} too small for mode {}x{}",
fb_info.width, fb_info.height, fb_info.pitch, mode.hdisplay, mode.vdisplay
);
return Err(Error::new(EINVAL));
}
let gem_handle = fb_info.gem_handle;
self.driver
.set_crtc(req.crtc_id, gem_handle, &conns, &mode)
.map_err(driver_to_syscall)?;
let completed_flip = self.pending_flip_fb.remove(&req.crtc_id);
let prev_fb = self.active_crtc_fb.insert(req.crtc_id, req.fb_handle);
self.active_crtc_mode.insert(req.crtc_id, mode);
if let Some((_, fb_id)) = completed_flip {
self.try_reap_fb(fb_id);
}
if let Some(prev) = prev_fb {
if prev != req.fb_handle {
self.try_reap_fb(prev);
}
}
Vec::new()
}
DRM_IOCTL_MODE_PAGE_FLIP => {
let req = decode_wire::<DrmPageFlipWire>(payload)?;
if self.pending_flip_fb.contains_key(&req.crtc_id) {
warn!(
"redox-drm: PAGE_FLIP rejected — flip already pending on CRTC {}",
req.crtc_id
);
return Err(Error::new(EBUSY));
}
let fb_info = self.fb_registry.get(&req.fb_handle).ok_or_else(|| {
warn!("redox-drm: PAGE_FLIP with unknown fb_id {}", req.fb_handle);
Error::new(ENOENT)
})?;
if let Some(active_mode) = self.active_crtc_mode.get(&req.crtc_id) {
let fb_pitch = fb_info.pitch as u64;
let required_lines = active_mode.vdisplay as u64;
let required_width = active_mode.hdisplay as u64;
if fb_pitch == 0
|| (fb_info.height as u64) < required_lines
|| (fb_info.width as u64) < required_width
{
warn!(
"redox-drm: PAGE_FLIP FB {}x{} pitch={} too small for active mode {}x{}",
fb_info.width, fb_info.height, fb_info.pitch,
active_mode.hdisplay, active_mode.vdisplay
);
return Err(Error::new(EINVAL));
}
}
let gem_handle = fb_info.gem_handle;
let seqno = self
.driver
.page_flip(req.crtc_id, gem_handle, req.flags)
.map_err(driver_to_syscall)?;
let current_vblank = self.driver.get_vblank(req.crtc_id).unwrap_or(0);
let prev = self.active_crtc_fb.insert(req.crtc_id, req.fb_handle);
if let Some(old_fb) = prev {
if old_fb != req.fb_handle {
self.pending_flip_fb
.insert(req.crtc_id, (current_vblank.saturating_add(1), old_fb));
}
}
seqno.to_le_bytes().to_vec()
}
DRM_IOCTL_MODE_CREATE_DUMB => {
let mut req = decode_wire::<DrmCreateDumbWire>(payload)?;
let pitch = (req.width.saturating_mul(req.bpp).saturating_add(7)) / 8;
req.pitch = pitch;
req.size = (pitch as u64).saturating_mul(req.height as u64);
req.handle = self
.driver
.gem_create(req.size)
.map_err(driver_to_syscall)?;
if let Some(handle) = self.handles.get_mut(&id) {
handle.owned_gems.push(req.handle);
}
bytes_of(&req)
}
DRM_IOCTL_MODE_MAP_DUMB => {
let mut req = decode_wire::<DrmMapDumbWire>(payload)?;
let owned = self
.handles
.get(&id)
.map(|h| h.owned_gems.contains(&req.handle))
.unwrap_or(false);
if !owned {
warn!(
"redox-drm: MAP_DUMB handle {} not owned by this fd",
req.handle
);
return Err(Error::new(EBADF));
}
req.offset = self
.driver
.gem_mmap(req.handle)
.map_err(driver_to_syscall)? as u64;
if let Some(handle) = self.handles.get_mut(&id) {
handle.mapped_gem = Some(req.handle);
}
bytes_of(&req)
}
DRM_IOCTL_MODE_DESTROY_DUMB => {
let req = decode_wire::<DrmDestroyDumbWire>(payload)?;
let owned = self
.handles
.get(&id)
.map(|h| h.owned_gems.contains(&req.handle))
.unwrap_or(false);
if !owned {
warn!(
"redox-drm: DESTROY_DUMB handle {} not owned by this fd",
req.handle
);
return Err(Error::new(EBADF));
}
let backs_fb = self
.fb_registry
.values()
.any(|info| info.gem_handle == req.handle);
if backs_fb {
warn!(
"redox-drm: DESTROY_DUMB handle {} rejected — backs an active framebuffer",
req.handle
);
return Err(Error::new(EBUSY));
}
self.driver
.gem_close(req.handle)
.map_err(driver_to_syscall)?;
if let Some(handle) = self.handles.get_mut(&id) {
handle.owned_gems.retain(|&h| h != req.handle);
}
Vec::new()
}
DRM_IOCTL_MODE_GETENCODER => {
let _req = decode_wire::<DrmGetEncoderWire>(payload)?;
let resp = DrmGetEncoderWire {
encoder_id: _req.encoder_id,
encoder_type: 0,
crtc_id: 1,
possible_crtcs: 1,
possible_clones: 0,
};
bytes_of(&resp)
}
DRM_IOCTL_MODE_GETCRTC => {
let req = decode_wire::<DrmGetCrtcWire>(payload)?;
let (fb_id, mode_valid, mode) = match (
self.active_crtc_fb.get(&req.crtc_id),
self.active_crtc_mode.get(&req.crtc_id),
) {
(Some(&fb), Some(m)) if self.fb_registry.contains_key(&fb) => {
(fb, 1u32, mode_to_wire(m))
}
_ => (0u32, 0u32, DrmModeWire::default()),
};
let resp = DrmGetCrtcWire {
crtc_id: req.crtc_id,
fb_id,
x: 0,
y: 0,
mode_valid,
mode,
};
bytes_of(&resp)
}
DRM_IOCTL_MODE_ADDFB => {
let req = decode_wire::<DrmAddFbWire>(payload)?;
if req.handle == 0 {
return Err(Error::new(EINVAL));
}
if req.width == 0 || req.height == 0 || req.bpp == 0 {
warn!(
"redox-drm: ADDFB zero dimension width={} height={} bpp={}",
req.width, req.height, req.bpp
);
return Err(Error::new(EINVAL));
}
let min_stride = (req.width.saturating_mul(req.bpp).saturating_add(7)) / 8;
let pitch = if req.pitch != 0 {
req.pitch
} else {
min_stride
};
if pitch == 0 || pitch < min_stride {
warn!(
"redox-drm: ADDFB pitch {} below minimum stride {} ({}x{})",
pitch, min_stride, req.width, req.bpp
);
return Err(Error::new(EINVAL));
}
let required_size = (pitch as u64).checked_mul(req.height as u64);
if required_size.is_none() {
warn!(
"redox-drm: ADDFB pitch * height overflows pitch={} height={}",
pitch, req.height
);
return Err(Error::new(EINVAL));
}
let owned = self
.handles
.get(&id)
.map(|h| h.owned_gems.contains(&req.handle))
.unwrap_or(false);
if !owned {
warn!(
"redox-drm: ADDFB handle {} not owned by this fd",
req.handle
);
return Err(Error::new(EBADF));
}
let actual_size = self.driver.gem_size(req.handle).map_err(|e| {
warn!("redox-drm: ADDFB handle {} not found: {}", req.handle, e);
Error::new(ENOENT)
})?;
if required_size.unwrap() > actual_size {
warn!(
"redox-drm: ADDFB requires {} bytes but GEM {} is {} bytes",
required_size.unwrap(),
req.handle,
actual_size
);
return Err(Error::new(EINVAL));
}
let fb_id = self.next_fb_id;
self.next_fb_id = self.next_fb_id.saturating_add(1);
self.fb_registry.insert(
fb_id,
FbInfo {
gem_handle: req.handle,
width: req.width,
height: req.height,
pitch,
bpp: req.bpp,
},
);
if let Some(handle) = self.handles.get_mut(&id) {
handle.owned_fbs.push(fb_id);
}
let mut resp = req;
resp.fb_id = fb_id;
bytes_of(&resp)
}
DRM_IOCTL_MODE_RMFB => {
let req = decode_wire::<DrmRmFbWire>(payload)?;
let owned = self
.handles
.get(&id)
.map(|h| h.owned_fbs.contains(&req.fb_id))
.unwrap_or(false);
if !owned {
warn!("redox-drm: RMFB {} not owned by this fd", req.fb_id);
return Err(Error::new(EBADF));
}
let in_use = self.is_fb_active(req.fb_id);
if in_use {
warn!(
"redox-drm: RMFB {} rejected — still active on a CRTC",
req.fb_id
);
return Err(Error::new(EBUSY));
}
if let Some(fb_info) = self.fb_registry.remove(&req.fb_id) {
let still_referenced = self
.fb_registry
.values()
.any(|i| i.gem_handle == fb_info.gem_handle);
let still_owned = self
.handles
.values()
.any(|h| h.owned_gems.contains(&fb_info.gem_handle));
if !still_referenced && !still_owned {
if let Err(e) = self.driver.gem_close(fb_info.gem_handle) {
warn!(
"redox-drm: RMFB gem_close({}) failed: {}",
fb_info.gem_handle, e
);
}
}
}
if let Some(handle) = self.handles.get_mut(&id) {
handle.owned_fbs.retain(|&fb| fb != req.fb_id);
}
Vec::new()
}
DRM_IOCTL_GET_CAP => {
let mut req = decode_wire::<DrmGetCapWire>(payload)?;
req.value = match req.capability {
0 => 1,
1 => 1,
_ => 0,
};
bytes_of(&req)
}
DRM_IOCTL_SET_CLIENT_CAP => Vec::new(),
DRM_IOCTL_VERSION => {
let resp = DrmVersionWire {
major: 1,
minor: 0,
patch: 0,
};
bytes_of(&resp)
}
_ => {
warn!("redox-drm: unsupported ioctl {:#x}", request);
return Err(Error::new(EOPNOTSUPP));
}
};
let response = if response.is_empty() {
vec![0]
} else {
response
};
let handle = self.handles.get_mut(&id).ok_or_else(|| Error::new(EBADF))?;
let len = response.len();
handle.response = response;
Ok(len)
}
}
// ---- SchemeBlockMut implementation ----
impl SchemeBlockMut for DrmScheme {
fn open(&mut self, path: &str, _flags: usize, _uid: u32, _gid: u32) -> Result<Option<usize>> {
let node = match path.trim_matches('/') {
"card0" => NodeKind::Card,
p if p.starts_with("card0Connector/") => {
let tail = p.trim_start_matches("card0Connector/");
let connector_id = tail.parse::<u32>().map_err(|_| Error::new(ENOENT))?;
NodeKind::Connector(connector_id)
}
_ => return Err(Error::new(ENOENT)),
};
let id = self.next_id;
self.next_id = self.next_id.saturating_add(1);
self.handles.insert(
id,
Handle {
node,
response: Vec::new(),
mapped_gem: None,
owned_fbs: Vec::new(),
owned_gems: Vec::new(),
},
);
Ok(Some(id))
}
fn read(&mut self, id: usize, buf: &mut [u8]) -> Result<Option<usize>> {
let handle = self.handles.get_mut(&id).ok_or_else(|| Error::new(EBADF))?;
let len = handle.response.len().min(buf.len());
buf[..len].copy_from_slice(&handle.response[..len]);
Ok(Some(len))
}
fn write(&mut self, id: usize, buf: &[u8]) -> Result<Option<usize>> {
let (request_bytes, payload) = match buf.split_first_chunk::<8>() {
Some(pair) => pair,
None => {
let _ = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
return Ok(Some(0));
}
};
let request = usize::from_le_bytes(*request_bytes);
let written = self.handle_ioctl(id, request, payload)?;
Ok(Some(written))
}
fn fpath(&mut self, id: usize, buf: &mut [u8]) -> Result<Option<usize>> {
let handle = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
let path = match handle.node {
NodeKind::Card => "drm:card0".to_string(),
NodeKind::Connector(cid) => format!("drm:card0Connector/{cid}"),
};
let bytes = path.as_bytes();
let len = bytes.len().min(buf.len());
buf[..len].copy_from_slice(&bytes[..len]);
Ok(Some(len))
}
fn fstat(&mut self, id: usize, stat: &mut Stat) -> Result<Option<usize>> {
let handle = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
stat.st_mode = MODE_FILE | 0o666;
stat.st_size = handle.response.len() as u64;
stat.st_blksize = 4096;
Ok(Some(0))
}
fn fsync(&mut self, id: usize) -> Result<Option<usize>> {
let _ = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
Ok(Some(0))
}
fn fevent(&mut self, id: usize, _flags: EventFlags) -> Result<Option<EventFlags>> {
let _ = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
Ok(Some(EventFlags::empty()))
}
fn close(&mut self, id: usize) -> Result<Option<usize>> {
if let Some(handle) = self.handles.remove(&id) {
let mut auto_closed_gems = HashSet::new();
for fb_id in &handle.owned_fbs {
let in_use = self.is_fb_active(*fb_id);
if in_use {
continue;
}
if let Some(fb_info) = self.fb_registry.remove(fb_id) {
let still_referenced = self
.fb_registry
.values()
.any(|i| i.gem_handle == fb_info.gem_handle);
let still_owned = self
.handles
.values()
.any(|h| h.owned_gems.contains(&fb_info.gem_handle));
if !still_referenced && !still_owned {
match self.driver.gem_close(fb_info.gem_handle) {
Ok(()) => {
auto_closed_gems.insert(fb_info.gem_handle);
}
Err(e) => {
warn!(
"redox-drm: close gem_close({}) failed: {}",
fb_info.gem_handle, e
);
}
}
}
}
}
for gem_handle in handle.owned_gems {
if auto_closed_gems.contains(&gem_handle) {
continue;
}
let backs_fb = self
.fb_registry
.values()
.any(|info| info.gem_handle == gem_handle);
if !backs_fb {
if let Err(e) = self.driver.gem_close(gem_handle) {
warn!(
"redox-drm: close gem GEM {} cleanup failed: {}",
gem_handle, e
);
}
}
}
}
Ok(Some(0))
}
fn mmap_prep(
&mut self,
id: usize,
_offset: u64,
_size: usize,
_flags: MapFlags,
) -> Result<Option<usize>> {
let handle = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
let gem_handle = handle.mapped_gem.ok_or_else(|| Error::new(EINVAL))?;
let addr = self
.driver
.gem_mmap(gem_handle)
.map_err(driver_to_syscall)?;
debug!(
"redox-drm: mmap_prep GEM handle {} at addr={:#x}",
gem_handle, addr
);
Ok(Some(addr))
}
fn munmap(
&mut self,
id: usize,
_offset: u64,
_size: usize,
_flags: MunmapFlags,
) -> Result<Option<usize>> {
let _ = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
Ok(Some(0))
}
}
// ---- Conversion helpers ----
fn connector_type_to_u32(ct: crate::kms::ConnectorType) -> u32 {
match ct {
crate::kms::ConnectorType::Unknown => 0,
crate::kms::ConnectorType::VGA => 1,
crate::kms::ConnectorType::DVII => 2,
crate::kms::ConnectorType::DVID => 3,
crate::kms::ConnectorType::DVIA => 4,
crate::kms::ConnectorType::Composite => 5,
crate::kms::ConnectorType::SVideo => 6,
crate::kms::ConnectorType::LVDS => 7,
crate::kms::ConnectorType::Component => 8,
crate::kms::ConnectorType::NinePinDIN => 9,
crate::kms::ConnectorType::DisplayPort => 10,
crate::kms::ConnectorType::HDMIA => 11,
crate::kms::ConnectorType::HDMIB => 12,
crate::kms::ConnectorType::TV => 13,
crate::kms::ConnectorType::EDP => 14,
crate::kms::ConnectorType::Virtual => 15,
}
}
fn mode_to_wire(mode: &ModeInfo) -> DrmModeWire {
DrmModeWire {
clock: mode.clock,
hdisplay: mode.hdisplay,
hsync_start: mode.hsync_start,
hsync_end: mode.hsync_end,
htotal: mode.htotal,
hskew: mode.hskew,
vdisplay: mode.vdisplay,
vsync_start: mode.vsync_start,
vsync_end: mode.vsync_end,
vtotal: mode.vtotal,
vscan: mode.vscan,
vrefresh: mode.vrefresh,
flags: mode.flags,
type_: mode.type_,
}
}
fn wire_to_mode(w: &DrmModeWire) -> ModeInfo {
ModeInfo {
clock: w.clock,
hdisplay: w.hdisplay,
hsync_start: w.hsync_start,
hsync_end: w.hsync_end,
htotal: w.htotal,
hskew: w.hskew,
vdisplay: w.vdisplay,
vsync_start: w.vsync_start,
vsync_end: w.vsync_end,
vtotal: w.vtotal,
vscan: w.vscan,
vrefresh: w.vrefresh,
flags: w.flags,
type_: w.type_,
name: format!("{}x{}@{}", w.hdisplay, w.vdisplay, w.vrefresh),
}
}
fn encode_modes(modes: &[ModeInfo]) -> Vec<u8> {
let mut out = Vec::new();
for mode in modes {
out.extend_from_slice(&bytes_of(&mode_to_wire(mode)));
out.extend_from_slice(mode.name.as_bytes());
out.push(0);
}
if out.is_empty() {
out.push(0);
}
out
}
fn bytes_of<T>(value: &T) -> Vec<u8> {
let ptr = value as *const T as *const u8;
let len = size_of::<T>();
unsafe { std::slice::from_raw_parts(ptr, len) }.to_vec()
}
fn read_u32(buf: &[u8], offset: usize) -> Result<u32> {
let end = offset.saturating_add(size_of::<u32>());
let bytes = buf.get(offset..end).ok_or_else(|| Error::new(EINVAL))?;
let array: [u8; 4] = bytes.try_into().map_err(|_| Error::new(EINVAL))?;
Ok(u32::from_le_bytes(array))
}
fn decode_wire<T: Copy>(buf: &[u8]) -> Result<T> {
if buf.len() < size_of::<T>() {
return Err(Error::new(EINVAL));
}
let ptr = buf.as_ptr() as *const T;
Ok(unsafe { ptr.read_unaligned() })
}
fn driver_to_syscall(error: crate::driver::DriverError) -> Error {
warn!("redox-drm: driver error: {}", error);
Error::new(EINVAL)
}