Refresh redox-drm and AMD GPU driver

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-openagent)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
This commit is contained in:
2026-04-17 00:03:28 +01:00
parent 6b887e9166
commit 00cda85edd
13 changed files with 737 additions and 399 deletions
@@ -116,12 +116,30 @@ static inline u32 amdgpu_dc_hpd_status(void)
return readl((u8 __iomem *)g_mmio_base + AMDGPU_DC_HPD_STATUS_REG);
}
static void amdgpu_redox_log_irq_expectation(u64 quirk_flags)
{
const char *policy = "MSI-X first, then MSI, then legacy IRQ fallback";
if ((quirk_flags & PCI_QUIRK_FORCE_LEGACY) != 0 ||
((quirk_flags & PCI_QUIRK_NO_MSIX) != 0 &&
(quirk_flags & PCI_QUIRK_NO_MSI) != 0)) {
policy = "legacy IRQ only";
} else if ((quirk_flags & PCI_QUIRK_NO_MSIX) != 0) {
policy = "avoid MSI-X, prefer MSI with legacy fallback";
} else if ((quirk_flags & PCI_QUIRK_NO_MSI) != 0) {
policy = "avoid MSI, prefer MSI-X with legacy fallback";
}
printk("amdgpu_redox: quirk-aware IRQ expectation: %s\n", policy);
}
/* Initialize AMD Display Core */
int amdgpu_dc_init(void *mmio_base, size_t mmio_size)
{
int ret = 0;
u32 gpu_id = 0;
const char *firmware_name = NULL;
u64 quirk_flags = 0;
printk("amdgpu_redox: initializing AMD Display Core\n");
@@ -133,6 +151,28 @@ int amdgpu_dc_init(void *mmio_base, size_t mmio_size)
gpu_id = readl(mmio_base);
printk("amdgpu_redox: GPU ID = %#010x\n", gpu_id);
if (g_pci_dev) {
quirk_flags = pci_get_quirk_flags(g_pci_dev);
printk("amdgpu_redox: PCI %02x:%02x.%u quirk flags = %#llx\n",
g_pci_dev->bus_number,
g_pci_dev->dev_number,
g_pci_dev->func_number,
(unsigned long long)quirk_flags);
if (pci_has_quirk(g_pci_dev, PCI_QUIRK_NO_ASPM)) {
pr_warn("amdgpu_redox: NO_ASPM quirk active; skipping any future ASPM-dependent assumptions\n");
}
if (pci_has_quirk(g_pci_dev, PCI_QUIRK_NEED_IOMMU)) {
pr_warn("amdgpu_redox: NEED_IOMMU quirk active; runtime must provide a functional IOMMU path\n");
}
if (pci_has_quirk(g_pci_dev, PCI_QUIRK_NO_MSIX)) {
pr_warn("amdgpu_redox: NO_MSIX quirk active; IRQ setup must avoid MSI-X\n");
}
if (pci_has_quirk(g_pci_dev, PCI_QUIRK_NO_MSI)) {
pr_warn("amdgpu_redox: NO_MSI quirk active; IRQ setup must avoid MSI\n");
}
amdgpu_redox_log_irq_expectation(quirk_flags);
}
switch (gpu_id) {
case ASIC_FAMILY_NAVI10:
g_asic_family = ASIC_FAMILY_NAVI10;
@@ -182,11 +222,29 @@ int amdgpu_dc_init(void *mmio_base, size_t mmio_size)
{
const struct firmware *fw = NULL;
int fw_ret = request_firmware(&fw, firmware_name, NULL);
bool firmware_required =
g_pci_dev && pci_has_quirk(g_pci_dev, PCI_QUIRK_NEED_FIRMWARE);
if (fw_ret != 0 || !fw) {
pr_warn("amdgpu_redox: firmware %s not available (err=%d), continuing without\n",
firmware_name, fw_ret);
if (firmware_required) {
pr_err("amdgpu_redox: firmware %s is required by quirk policy (flags=%#llx, err=%d)\n",
firmware_name,
(unsigned long long)quirk_flags,
fw_ret);
return fw_ret != 0 ? fw_ret : -ENOENT;
}
pr_warn("amdgpu_redox: firmware %s not available (err=%d), continuing without (quirks=%#llx)\n",
firmware_name,
fw_ret,
(unsigned long long)quirk_flags);
} else {
printk("amdgpu_redox: firmware %s loaded (%zu bytes)\n", firmware_name, fw->size);
if (firmware_required) {
printk("amdgpu_redox: firmware %s loaded (%zu bytes) to satisfy NEED_FIRMWARE quirk\n",
firmware_name,
fw->size);
} else {
printk("amdgpu_redox: firmware %s loaded (%zu bytes)\n", firmware_name, fw->size);
}
release_firmware(fw);
}
}
+28 -11
View File
@@ -204,22 +204,45 @@ extern void redox_dma_free_coherent(size_t size, void *vaddr, dma_addr_t dma_han
#define dma_mapping_error(dev, addr) 0
/* ---- PCI — maps to redox-driver-sys PCI ---- */
struct pci_dev;
struct device_driver {
const char *name;
void *owner;
};
struct device {
struct device_driver *driver;
void *driver_data;
void *platform_data;
void *of_node;
u64 dma_mask;
struct pci_dev *pci_dev;
};
struct pci_dev {
u16 vendor;
u16 device;
u8 bus_number;
u8 dev_number;
u8 func_number;
u8 revision;
u8 irq;
phys_addr_t resource_start[6];
u32 irq;
u64 resource_start[6];
u64 resource_len[6];
u32 resource_flags[6];
void *driver_data;
struct device device_obj;
bool enabled;
u32 resource_flags[6];
void __iomem *mmio_base;
int is_amdgpu;
};
extern struct pci_dev *redox_pci_find_amd_gpu(void);
extern void redox_pci_set_device_info(u16 vendor, u16 device, u8 revision,
u8 irq, u64 bar0_addr, u64 bar0_size,
extern void redox_pci_set_device_info(u16 vendor, u16 device,
u8 bus_number, u8 dev_number,
u8 func_number, u8 revision, u32 irq,
u64 bar0_addr, u64 bar0_size,
u64 bar2_addr, u64 bar2_size);
extern void redox_pci_dev_put(struct pci_dev *pdev);
extern int redox_pci_enable_device(struct pci_dev *pdev);
@@ -255,12 +278,6 @@ extern void redox_release_firmware(const struct firmware *fw);
#define request_firmware(fw, name, dev) redox_request_firmware((fw), (name), (dev))
#define release_firmware(fw) redox_release_firmware((fw))
/* ---- Device model ---- */
struct device {
void *driver_data;
struct pci_dev *pci_dev;
};
#define dev_get_drvdata(dev) ((dev)->driver_data)
#define dev_set_drvdata(dev, data) ((dev)->driver_data = (data))
+11 -3
View File
@@ -222,13 +222,18 @@ void redox_dma_free_coherent(size_t size, void *vaddr, dma_addr_t dma_handle)
static struct pci_dev g_pci_dev;
static int g_pci_dev_populated;
void redox_pci_set_device_info(u16 vendor, u16 device, u8 revision,
u8 irq, u64 bar0_addr, u64 bar0_size,
void redox_pci_set_device_info(u16 vendor, u16 device,
u8 bus_number, u8 dev_number,
u8 func_number, u8 revision, u32 irq,
u64 bar0_addr, u64 bar0_size,
u64 bar2_addr, u64 bar2_size)
{
memset(&g_pci_dev, 0, sizeof(g_pci_dev));
g_pci_dev.vendor = vendor;
g_pci_dev.device = device;
g_pci_dev.bus_number = bus_number;
g_pci_dev.dev_number = dev_number;
g_pci_dev.func_number = func_number;
g_pci_dev.revision = revision;
g_pci_dev.irq = irq;
g_pci_dev.resource_start[0] = (phys_addr_t)bar0_addr;
@@ -238,12 +243,15 @@ void redox_pci_set_device_info(u16 vendor, u16 device, u8 revision,
g_pci_dev.resource_len[2] = bar2_size;
g_pci_dev.resource_flags[2] = IORESOURCE_MEM;
g_pci_dev.driver_data = NULL;
memset(&g_pci_dev.device_obj, 0, sizeof(g_pci_dev.device_obj));
g_pci_dev.enabled = false;
g_pci_dev.mmio_base = NULL;
g_pci_dev.is_amdgpu = 1;
g_pci_dev_populated = 1;
printk("PCI device info set: vendor=%#06x device=%#06x rev=%#04x irq=%u "
printk("PCI device info set: %02x:%02x.%u vendor=%#06x device=%#06x rev=%#04x irq=%u "
"bar0=%#llx+%#llx bar2=%#llx+%#llx\n",
bus_number, dev_number, func_number,
vendor, device, revision, irq,
(unsigned long long)bar0_addr, (unsigned long long)bar0_size,
(unsigned long long)bar2_addr, (unsigned long long)bar2_size);
@@ -1,201 +0,0 @@
use std::collections::BTreeMap;
use log::{debug, warn};
use crate::driver::{DriverError, Result};
use crate::gem::GemHandle;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct DmabufInfo {
pub phys_addr: usize,
pub size: u64,
pub gem_handle: GemHandle,
}
#[derive(Clone, Debug)]
struct DmabufEntry {
#[allow(dead_code)]
info: DmabufInfo,
#[allow(dead_code)]
scheme_path: String,
#[allow(dead_code)]
refcount: usize,
}
pub struct DmabufManager {
#[allow(dead_code)]
next_fd: i32,
#[allow(dead_code)]
exported: BTreeMap<i32, GemHandle>,
#[allow(dead_code)]
entries: BTreeMap<GemHandle, DmabufEntry>,
}
impl DmabufManager {
pub fn new() -> Self {
Self {
next_fd: 10_000,
exported: BTreeMap::new(),
entries: BTreeMap::new(),
}
}
#[allow(dead_code)]
pub fn export(&mut self, handle: GemHandle) -> Result<i32> {
self.export_with_info(handle, 0, 0)
}
#[allow(dead_code)]
pub fn export_with_info(
&mut self,
handle: GemHandle,
phys_addr: usize,
size: u64,
) -> Result<i32> {
if handle == 0 {
return Err(DriverError::InvalidArgument(
"DMA-BUF export requires a non-zero GEM handle",
));
}
let fd = self.allocate_fd()?;
let scheme_path = Self::scheme_path(handle);
if let Some(entry) = self.entries.get_mut(&handle) {
entry.info.phys_addr = Self::merge_phys_addr(entry.info.phys_addr, phys_addr)?;
entry.info.size = Self::merge_size(entry.info.size, size)?;
entry.refcount = entry.refcount.checked_add(1).ok_or_else(|| {
DriverError::Buffer(format!(
"DMA-BUF refcount overflow for GEM handle {}",
handle
))
})?;
debug!(
"redox-drm: dup() DMA-BUF export fd {} -> {} (GEM handle {}, refs={})",
entry.scheme_path, fd, handle, entry.refcount
);
} else {
self.entries.insert(
handle,
DmabufEntry {
info: DmabufInfo {
phys_addr,
size,
gem_handle: handle,
},
scheme_path: scheme_path.clone(),
refcount: 1,
},
);
warn!(
"redox-drm: exported DMA-BUF {} as synthetic fd {} for GEM handle {} \
(phys={:#x}, size={})",
scheme_path, fd, handle, phys_addr, size
);
}
self.exported.insert(fd, handle);
Ok(fd)
}
pub fn import(&self, fd: i32) -> Result<GemHandle> {
let info = self
.lookup(fd)
.ok_or_else(|| DriverError::NotFound(format!("unknown synthetic dma-buf fd {fd}")))?;
debug!(
"redox-drm: imported DMA-BUF fd {} -> GEM handle {} (phys={:#x}, size={})",
fd, info.gem_handle, info.phys_addr, info.size
);
Ok(info.gem_handle)
}
pub fn close(&mut self, fd: i32) -> Result<()> {
let handle = self
.exported
.remove(&fd)
.ok_or_else(|| DriverError::NotFound(format!("unknown synthetic dma-buf fd {fd}")))?;
let remove_entry = {
let entry = self.entries.get_mut(&handle).ok_or_else(|| {
DriverError::NotFound(format!(
"DMA-BUF bookkeeping missing for GEM handle {}",
handle
))
})?;
if entry.refcount == 0 {
return Err(DriverError::Buffer(format!(
"DMA-BUF refcount underflow for GEM handle {}",
handle
)));
}
entry.refcount -= 1;
debug!(
"redox-drm: closed DMA-BUF fd {} for {} (GEM handle {}, refs={})",
fd, entry.scheme_path, handle, entry.refcount
);
entry.refcount == 0
};
if remove_entry {
let _ = self.entries.remove(&handle);
warn!(
"redox-drm: released final DMA-BUF export for GEM handle {}",
handle
);
}
Ok(())
}
pub fn lookup(&self, fd: i32) -> Option<DmabufInfo> {
let handle = self.exported.get(&fd)?;
self.entries.get(handle).map(|entry| entry.info)
}
pub fn dup(&mut self, fd: i32) -> Result<i32> {
let info = self
.lookup(fd)
.ok_or_else(|| DriverError::NotFound(format!("unknown synthetic dma-buf fd {fd}")))?;
self.export_with_info(info.gem_handle, info.phys_addr, info.size)
}
fn allocate_fd(&mut self) -> Result<i32> {
let fd = self.next_fd;
self.next_fd = self.next_fd.checked_add(1).ok_or_else(|| {
DriverError::Buffer("synthetic DMA-BUF fd space exhausted".to_string())
})?;
Ok(fd)
}
fn scheme_path(handle: GemHandle) -> String {
format!("drm:card0/dmabuf/{handle}")
}
fn merge_phys_addr(current: usize, incoming: usize) -> Result<usize> {
if current == 0 || incoming == 0 || current == incoming {
return Ok(current.max(incoming));
}
Err(DriverError::Buffer(format!(
"conflicting DMA-BUF physical addresses: existing={:#x}, incoming={:#x}",
current, incoming
)))
}
fn merge_size(current: u64, incoming: u64) -> Result<u64> {
if current == 0 || incoming == 0 || current == incoming {
return Ok(current.max(incoming));
}
Err(DriverError::Buffer(format!(
"conflicting DMA-BUF sizes: existing={}, incoming={}",
current, incoming
)))
}
}
@@ -49,17 +49,12 @@ pub trait GpuDriver: Send + Sync {
mode: &ModeInfo,
) -> Result<()>;
fn page_flip(&self, crtc_id: u32, fb_handle: u32, flags: u32) -> Result<u64>;
#[allow(dead_code)]
fn get_vblank(&self, crtc_id: u32) -> Result<u64>;
fn gem_create(&self, size: u64) -> Result<GemHandle>;
fn gem_close(&self, handle: GemHandle) -> Result<()>;
fn gem_mmap(&self, handle: GemHandle) -> Result<usize>;
fn gem_size(&self, handle: GemHandle) -> Result<u64>;
#[allow(dead_code)]
fn gem_export_dmafd(&self, handle: GemHandle) -> Result<i32>;
#[allow(dead_code)]
fn gem_import_dmafd(&self, fd: i32) -> Result<GemHandle>;
#[allow(dead_code)]
fn get_edid(&self, connector_id: u32) -> Vec<u8>;
@@ -44,8 +44,11 @@ unsafe extern "C" {
fn ffi_redox_pci_set_device_info(
vendor: u16,
device: u16,
bus_number: u8,
dev_number: u8,
func_number: u8,
revision: u8,
irq: u8,
irq: u32,
bar0_addr: u64,
bar0_size: u64,
bar2_addr: u64,
@@ -105,6 +108,9 @@ fn amdgpu_dc_cleanup() {
pub fn set_pci_device_info(
vendor: u16,
device: u16,
bus_number: u8,
dev_number: u8,
func_number: u8,
revision: u8,
irq: u32,
bar0_addr: u64,
@@ -115,11 +121,31 @@ pub fn set_pci_device_info(
#[cfg(not(no_amdgpu_c))]
unsafe {
ffi_redox_pci_set_device_info(
vendor, device, revision, irq as u8, bar0_addr, bar0_size, bar2_addr, bar2_size,
vendor,
device,
bus_number,
dev_number,
func_number,
revision,
irq,
bar0_addr,
bar0_size,
bar2_addr,
bar2_size,
);
}
let _ = (
vendor, device, revision, irq, bar0_addr, bar0_size, bar2_addr, bar2_size,
vendor,
device,
bus_number,
dev_number,
func_number,
revision,
irq,
bar0_addr,
bar0_size,
bar2_addr,
bar2_size,
);
}
@@ -102,6 +102,9 @@ impl AmdDriver {
display::set_pci_device_info(
info.vendor_id,
info.device_id,
info.location.bus,
info.location.device,
info.location.function,
info.revision,
info.irq.unwrap_or(0),
bar0.addr,
@@ -522,22 +525,6 @@ impl GpuDriver for AmdDriver {
Ok(gem.object(handle)?.size)
}
fn gem_export_dmafd(&self, handle: GemHandle) -> Result<i32> {
let mut gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
gem.export_dmafd(handle)
}
fn gem_import_dmafd(&self, fd: i32) -> Result<GemHandle> {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("GEM manager poisoned".to_string()))?;
gem.import_dmafd(fd)
}
fn get_edid(&self, connector_id: u32) -> Vec<u8> {
match self.connectors.lock() {
Ok(connectors) => connectors
@@ -443,27 +443,6 @@ impl GpuDriver for IntelDriver {
Ok(gem.object(handle)?.size)
}
fn gem_export_dmafd(&self, handle: GemHandle) -> Result<i32> {
let mut gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
gem.export_dmafd(handle)
}
fn gem_import_dmafd(&self, fd: i32) -> Result<GemHandle> {
let handle = {
let gem = self
.gem
.lock()
.map_err(|_| DriverError::Buffer("Intel GEM manager poisoned".into()))?;
gem.import_dmafd(fd)?
};
let _ = self.ensure_gem_gpu_mapping(handle)?;
Ok(handle)
}
fn get_edid(&self, connector_id: u32) -> Vec<u8> {
match self.connectors.lock() {
Ok(connectors) => connectors
@@ -2,7 +2,8 @@ use std::io::{Read, Write};
use log::{info, warn};
use redox_driver_sys::irq::{IrqHandle, MsixTable, MsixVector};
use redox_driver_sys::pci::{PciDevice, PciDeviceInfo, PCI_CAP_ID_MSIX};
use redox_driver_sys::pci::{PciDevice, PciDeviceInfo, PCI_CAP_ID_MSI, PCI_CAP_ID_MSIX};
use redox_driver_sys::quirks::PciQuirkFlags;
use crate::driver::{DriverError, Result};
@@ -12,6 +13,10 @@ pub enum InterruptHandle {
table: MsixTable,
cap_offset: u8,
},
Msi {
handle: IrqHandle,
irq: u32,
},
Legacy {
handle: IrqHandle,
irq: u32,
@@ -20,8 +25,35 @@ pub enum InterruptHandle {
impl InterruptHandle {
pub fn setup(device_info: &PciDeviceInfo, pci_device: &mut PciDevice) -> Result<Self> {
if let Ok(Some(handle)) = Self::try_msix(device_info, pci_device) {
return Ok(handle);
let quirks = device_info.quirks();
if !quirks.contains(PciQuirkFlags::NO_MSIX) {
if let Ok(Some(handle)) = Self::try_msix(device_info, pci_device) {
return Ok(handle);
}
} else {
info!(
"redox-drm: skipping MSI-X for {} (NO_MSIX quirk)",
device_info.location
);
}
if !quirks.contains(PciQuirkFlags::NO_MSI) {
if let Ok(Some(handle)) = Self::try_msi(device_info, pci_device) {
return Ok(handle);
}
} else {
info!(
"redox-drm: skipping MSI for {} (NO_MSI quirk)",
device_info.location
);
}
if quirks.contains(PciQuirkFlags::FORCE_LEGACY_IRQ) {
info!(
"redox-drm: forcing legacy IRQ for {} (FORCE_LEGACY_IRQ quirk)",
device_info.location
);
}
Self::try_legacy(device_info)
@@ -89,6 +121,35 @@ impl InterruptHandle {
}))
}
fn try_msi(device_info: &PciDeviceInfo, _pci_device: &mut PciDevice) -> Result<Option<Self>> {
let msi_cap = match device_info.find_capability(PCI_CAP_ID_MSI) {
Some(cap) => cap,
None => return Ok(None),
};
let irq = device_info.irq.ok_or_else(|| {
DriverError::Io(format!("no IRQ for MSI on {}", device_info.location))
})?;
let handle = match IrqHandle::request(irq) {
Ok(h) => h,
Err(e) => {
warn!(
"redox-drm: MSI IRQ request failed for {}: {e}",
device_info.location
);
return Ok(None);
}
};
info!(
"redox-drm: MSI enabled for {} cap_offset={:#x} irq {}",
device_info.location, msi_cap.offset, irq
);
Ok(Some(InterruptHandle::Msi { handle, irq }))
}
fn try_legacy(device_info: &PciDeviceInfo) -> Result<Self> {
let irq = device_info
.irq
@@ -114,7 +175,7 @@ impl InterruptHandle {
Err(e) => Err(DriverError::Io(e.to_string())),
}
}
InterruptHandle::Legacy { handle, .. } => handle
InterruptHandle::Msi { handle, .. } | InterruptHandle::Legacy { handle, .. } => handle
.try_wait()
.map(|ev| ev.is_some())
.map_err(|e| DriverError::Io(e.to_string())),
@@ -134,7 +195,7 @@ impl InterruptHandle {
.write_all(&buf)
.map_err(|e| DriverError::Io(e.to_string()))
}
InterruptHandle::Legacy { handle, .. } => {
InterruptHandle::Msi { handle, .. } | InterruptHandle::Legacy { handle, .. } => {
let mut buf = [0u8; 8];
let _ = handle.wait().map_err(|e| DriverError::Io(e.to_string()))?;
Ok(())
@@ -145,7 +206,7 @@ impl InterruptHandle {
pub fn irq(&self) -> u32 {
match self {
InterruptHandle::Msix { vector, .. } => vector.irq,
InterruptHandle::Legacy { irq, .. } => *irq,
InterruptHandle::Msi { irq, .. } | InterruptHandle::Legacy { irq, .. } => *irq,
}
}
@@ -5,7 +5,9 @@ pub mod interrupt;
use std::collections::HashMap;
use std::sync::Arc;
use log::info;
use redox_driver_sys::pci::{PciDevice, PciDeviceInfo, PCI_VENDOR_ID_AMD, PCI_VENDOR_ID_INTEL};
use redox_driver_sys::quirks::PciQuirkFlags;
use crate::driver::{DriverError, GpuDriver, Result};
@@ -26,6 +28,21 @@ impl DriverRegistry {
info
};
let quirks = full.quirks();
if !quirks.is_empty() {
info!(
"redox-drm: quirks for {:#06x}:{:#06x}: {:?}",
full.vendor_id, full.device_id, quirks
);
}
if quirks.contains(PciQuirkFlags::DISABLE_ACCEL) {
return Err(DriverError::Pci(format!(
"device {:#06x}:{:#06x} at {} has DISABLE_ACCEL quirk — skipping probe",
full.vendor_id, full.device_id, full.location
)));
}
match full.vendor_id {
PCI_VENDOR_ID_AMD => {
let driver = amd::AmdDriver::new(full, firmware)?;
+2 -23
View File
@@ -1,9 +1,8 @@
use std::collections::BTreeMap;
use log::{debug, warn};
use log::debug;
use redox_driver_sys::dma::DmaBuffer;
use crate::dmabuf::DmabufManager;
use crate::driver::{DriverError, Result};
pub type GemHandle = u32;
@@ -28,7 +27,6 @@ struct GemAllocation {
pub struct GemManager {
next_handle: GemHandle,
objects: BTreeMap<GemHandle, GemAllocation>,
dmabuf: DmabufManager,
}
impl GemManager {
@@ -36,7 +34,6 @@ impl GemManager {
Self {
next_handle: 1,
objects: BTreeMap::new(),
dmabuf: DmabufManager::new(),
}
}
@@ -53,7 +50,7 @@ impl GemManager {
let dma = DmaBuffer::allocate(size as usize, 4096)
.map_err(|e| DriverError::Buffer(format!("DMA allocation failed: {e}")))?;
if !dma.is_physically_contiguous() {
warn!(
debug!(
"redox-drm: GEM handle {} allocated without physically contiguous backing",
handle
);
@@ -93,24 +90,6 @@ impl GemManager {
Ok(allocation.object.virt_addr)
}
#[allow(dead_code)]
pub fn export_dmafd(&mut self, handle: GemHandle) -> Result<i32> {
let allocation = self
.objects
.get(&handle)
.ok_or_else(|| DriverError::NotFound(format!("unknown GEM handle {handle}")))?;
self.dmabuf
.export_with_info(handle, allocation.object.phys_addr, allocation.object.size)
}
#[allow(dead_code)]
pub fn import_dmafd(&self, fd: i32) -> Result<GemHandle> {
let handle = self.dmabuf.import(fd)?;
let _ = self.object(handle)?;
Ok(handle)
}
pub fn object(&self, handle: GemHandle) -> Result<&GemObject> {
self.objects
.get(&handle)
@@ -1,6 +1,5 @@
#![allow(dead_code)]
mod dmabuf;
mod driver;
mod drivers;
mod gem;
+519 -106
View File
@@ -38,6 +38,11 @@ const DRM_IOCTL_MODE_RMFB: usize = DRM_IOCTL_BASE + 22;
const DRM_IOCTL_GET_CAP: usize = DRM_IOCTL_BASE + 23;
const DRM_IOCTL_SET_CLIENT_CAP: usize = DRM_IOCTL_BASE + 24;
const DRM_IOCTL_VERSION: usize = DRM_IOCTL_BASE + 25;
const DRM_IOCTL_GEM_CREATE: usize = DRM_IOCTL_BASE + 26;
const DRM_IOCTL_GEM_CLOSE: usize = DRM_IOCTL_BASE + 27;
const DRM_IOCTL_GEM_MMAP: usize = DRM_IOCTL_BASE + 28;
const DRM_IOCTL_PRIME_HANDLE_TO_FD: usize = DRM_IOCTL_BASE + 29;
const DRM_IOCTL_PRIME_FD_TO_HANDLE: usize = DRM_IOCTL_BASE + 30;
// ---- Wire types for DRM ioctls ----
#[repr(C)]
@@ -183,31 +188,91 @@ struct DrmSetClientCapWire {
value: u64,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmGemCreateWire {
size: u64,
handle: u32,
_pad: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmGemCloseWire {
handle: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmGemMmapWire {
handle: u32,
_pad: u32,
offset: u64,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmPrimeHandleToFdWire {
handle: u32,
flags: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmPrimeFdToHandleWire {
fd: i32,
_pad: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmPrimeHandleToFdResponseWire {
fd: i32,
_pad: u32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Default)]
struct DrmPrimeFdToHandleResponseWire {
handle: u32,
_pad: u32,
}
// ---- Internal handle types ----
#[derive(Clone, Debug)]
enum NodeKind {
Card,
Connector(u32),
DmaBuf {
gem_handle: GemHandle,
export_token: u32,
},
}
struct Handle {
node: NodeKind,
response: Vec<u8>,
mapped_gem: Option<GemHandle>,
mapped_gem_refs: usize,
owned_fbs: Vec<u32>,
owned_gems: Vec<GemHandle>,
closing: bool,
}
pub struct DrmScheme {
driver: Arc<dyn GpuDriver>,
next_id: usize,
next_fb_id: u32,
next_export_token: u32,
handles: BTreeMap<usize, Handle>,
active_crtc_fb: BTreeMap<u32, u32>,
active_crtc_mode: BTreeMap<u32, ModeInfo>,
pending_flip_fb: BTreeMap<u32, (u64, u32)>,
fb_registry: BTreeMap<u32, FbInfo>,
active_gem_maps: BTreeMap<GemHandle, usize>,
gem_export_refs: BTreeMap<GemHandle, usize>,
prime_exports: BTreeMap<u32, GemHandle>,
}
impl DrmScheme {
@@ -216,17 +281,35 @@ impl DrmScheme {
driver,
next_id: 0,
next_fb_id: 1,
next_export_token: 1,
handles: BTreeMap::new(),
active_crtc_fb: BTreeMap::new(),
active_crtc_mode: BTreeMap::new(),
pending_flip_fb: BTreeMap::new(),
fb_registry: BTreeMap::new(),
active_gem_maps: BTreeMap::new(),
gem_export_refs: BTreeMap::new(),
prime_exports: BTreeMap::new(),
}
}
#[allow(dead_code)]
pub fn on_close(&mut self, id: usize) {
self.handles.remove(&id);
let mapped = self
.handles
.get(&id)
.map(|handle| handle.mapped_gem_refs != 0)
.unwrap_or(false);
if mapped {
if let Some(handle) = self.handles.get_mut(&id) {
handle.closing = true;
}
return;
}
if let Some(handle) = self.handles.remove(&id) {
self.finalize_handle_close(handle);
}
}
fn is_fb_active(&self, fb_id: u32) -> bool {
@@ -234,6 +317,165 @@ impl DrmScheme {
|| self.pending_flip_fb.values().any(|&(_, id)| id == fb_id)
}
fn handle_has_gem_ref(handle: &Handle, gem_handle: GemHandle) -> bool {
handle.owned_gems.contains(&gem_handle)
}
fn gem_is_still_referenced(&self, gem_handle: GemHandle) -> bool {
self.handles
.values()
.any(|handle| Self::handle_has_gem_ref(handle, gem_handle))
}
fn gem_has_other_refs(&self, current_id: usize, gem_handle: GemHandle) -> bool {
self.handles.iter().any(|(&other_id, handle)| {
other_id != current_id && Self::handle_has_gem_ref(handle, gem_handle)
})
}
fn gem_is_mapped(&self, gem_handle: GemHandle) -> bool {
self.active_gem_maps.get(&gem_handle).copied().unwrap_or(0) != 0
}
fn gem_export_refcount(&self, gem_handle: GemHandle) -> usize {
self.gem_export_refs.get(&gem_handle).copied().unwrap_or(0)
}
fn bump_export_ref(&mut self, gem_handle: GemHandle) {
let entry = self.gem_export_refs.entry(gem_handle).or_insert(0);
*entry = entry.saturating_add(1);
}
fn drop_export_ref(&mut self, gem_handle: GemHandle) {
let remove_entry = match self.gem_export_refs.get_mut(&gem_handle) {
Some(count) if *count > 1 => {
*count -= 1;
false
}
Some(_) => true,
None => false,
};
if remove_entry {
self.gem_export_refs.remove(&gem_handle);
self.prime_exports.retain(|_, &mut h| h != gem_handle);
}
}
fn gem_can_close(&self, gem_handle: GemHandle) -> bool {
let backs_fb = self
.fb_registry
.values()
.any(|info| info.gem_handle == gem_handle);
!backs_fb
&& !self.gem_is_still_referenced(gem_handle)
&& !self.gem_is_mapped(gem_handle)
&& self.gem_export_refcount(gem_handle) == 0
}
fn maybe_close_gem(&mut self, gem_handle: GemHandle, context: &str) -> bool {
if !self.gem_can_close(gem_handle) {
return false;
}
match self.driver.gem_close(gem_handle) {
Ok(()) => {
self.prime_exports.retain(|_, &mut h| h != gem_handle);
true
}
Err(e) => {
warn!(
"redox-drm: {} gem_close({}) failed: {}",
context, gem_handle, e
);
false
}
}
}
fn allocate_handle(&mut self, node: NodeKind) -> usize {
let id = self.next_id;
self.next_id = self.next_id.saturating_add(1);
self.handles.insert(
id,
Handle {
node,
response: Vec::new(),
mapped_gem: None,
mapped_gem_refs: 0,
owned_fbs: Vec::new(),
owned_gems: Vec::new(),
closing: false,
},
);
id
}
fn finalize_handle_close(&mut self, handle: Handle) {
if let NodeKind::DmaBuf { gem_handle, .. } = handle.node {
self.drop_export_ref(gem_handle);
let _ = self.maybe_close_gem(gem_handle, "close dmabuf");
return;
}
let mut auto_closed_gems = HashSet::new();
for fb_id in &handle.owned_fbs {
if self.is_fb_active(*fb_id) {
continue;
}
if let Some(fb_info) = self.fb_registry.remove(fb_id) {
if self.maybe_close_gem(fb_info.gem_handle, "close") {
auto_closed_gems.insert(fb_info.gem_handle);
}
}
}
for gem_handle in handle.owned_gems {
if auto_closed_gems.contains(&gem_handle) {
continue;
}
let backs_fb = self
.fb_registry
.values()
.any(|info| info.gem_handle == gem_handle);
if !backs_fb && self.maybe_close_gem(gem_handle, "close gem") {
auto_closed_gems.insert(gem_handle);
}
}
}
fn pin_mapped_gem(&mut self, id: usize, gem_handle: GemHandle) -> Result<()> {
let handle = self.handles.get_mut(&id).ok_or_else(|| Error::new(EBADF))?;
handle.mapped_gem = Some(gem_handle);
handle.mapped_gem_refs = handle.mapped_gem_refs.saturating_add(1);
let entry = self.active_gem_maps.entry(gem_handle).or_insert(0);
*entry = entry.saturating_add(1);
Ok(())
}
fn unpin_mapped_gem(&mut self, id: usize) -> Result<()> {
let handle = self.handles.get_mut(&id).ok_or_else(|| Error::new(EBADF))?;
let gem_handle = match handle.mapped_gem {
Some(gem_handle) if handle.mapped_gem_refs != 0 => gem_handle,
_ => return Ok(()),
};
handle.mapped_gem_refs -= 1;
if handle.mapped_gem_refs == 0 {
handle.mapped_gem = None;
}
let remove_entry = match self.active_gem_maps.get_mut(&gem_handle) {
Some(count) if *count > 1 => {
*count -= 1;
false
}
Some(_) => true,
None => false,
};
if remove_entry {
self.active_gem_maps.remove(&gem_handle);
}
Ok(())
}
pub fn retire_vblank(&mut self, crtc_id: u32, vblank_count: u64) {
if let Some((expected, fb_id)) = self.pending_flip_fb.get(&crtc_id).copied() {
if expected <= vblank_count {
@@ -253,22 +495,7 @@ impl DrmScheme {
return;
}
self.fb_registry.remove(&fb_id);
let still_referenced = self
.fb_registry
.values()
.any(|i| i.gem_handle == gem_handle);
let gem_owned = self
.handles
.values()
.any(|h| h.owned_gems.contains(&gem_handle));
if !still_referenced && !gem_owned {
if let Err(e) = self.driver.gem_close(gem_handle) {
warn!(
"redox-drm: try_reap_fb gem_close({}) failed: {}",
gem_handle, e
);
}
}
let _ = self.maybe_close_gem(gem_handle, "try_reap_fb");
}
// ---- Encode helpers ----
@@ -461,7 +688,7 @@ impl DrmScheme {
let owned = self
.handles
.get(&id)
.map(|h| h.owned_gems.contains(&req.handle))
.map(|h| Self::handle_has_gem_ref(h, req.handle))
.unwrap_or(false);
if !owned {
warn!(
@@ -470,6 +697,15 @@ impl DrmScheme {
);
return Err(Error::new(EBADF));
}
if let Some(handle) = self.handles.get(&id) {
if handle.mapped_gem_refs != 0 && handle.mapped_gem != Some(req.handle) {
warn!(
"redox-drm: MAP_DUMB handle {} rejected — another GEM is still mapped",
req.handle
);
return Err(Error::new(EBUSY));
}
}
req.offset = self
.driver
.gem_mmap(req.handle)
@@ -505,9 +741,21 @@ impl DrmScheme {
);
return Err(Error::new(EBUSY));
}
self.driver
.gem_close(req.handle)
.map_err(driver_to_syscall)?;
if self.gem_is_mapped(req.handle) {
warn!(
"redox-drm: DESTROY_DUMB handle {} rejected — still mapped",
req.handle
);
return Err(Error::new(EBUSY));
}
let close_now = !self.gem_has_other_refs(id, req.handle)
&& self.gem_export_refcount(req.handle) == 0;
if close_now {
self.driver
.gem_close(req.handle)
.map_err(driver_to_syscall)?;
self.prime_exports.retain(|_, &mut h| h != req.handle);
}
if let Some(handle) = self.handles.get_mut(&id) {
handle.owned_gems.retain(|&h| h != req.handle);
}
@@ -584,7 +832,7 @@ impl DrmScheme {
let owned = self
.handles
.get(&id)
.map(|h| h.owned_gems.contains(&req.handle))
.map(|h| Self::handle_has_gem_ref(h, req.handle))
.unwrap_or(false);
if !owned {
warn!(
@@ -646,22 +894,7 @@ impl DrmScheme {
return Err(Error::new(EBUSY));
}
if let Some(fb_info) = self.fb_registry.remove(&req.fb_id) {
let still_referenced = self
.fb_registry
.values()
.any(|i| i.gem_handle == fb_info.gem_handle);
let still_owned = self
.handles
.values()
.any(|h| h.owned_gems.contains(&fb_info.gem_handle));
if !still_referenced && !still_owned {
if let Err(e) = self.driver.gem_close(fb_info.gem_handle) {
warn!(
"redox-drm: RMFB gem_close({}) failed: {}",
fb_info.gem_handle, e
);
}
}
let _ = self.maybe_close_gem(fb_info.gem_handle, "RMFB");
}
if let Some(handle) = self.handles.get_mut(&id) {
handle.owned_fbs.retain(|&fb| fb != req.fb_id);
@@ -690,6 +923,169 @@ impl DrmScheme {
bytes_of(&resp)
}
DRM_IOCTL_GEM_CREATE => {
let mut req = decode_wire::<DrmGemCreateWire>(payload)?;
if req.size == 0 {
return Err(Error::new(EINVAL));
}
req.handle = self
.driver
.gem_create(req.size)
.map_err(driver_to_syscall)?;
if let Some(handle) = self.handles.get_mut(&id) {
handle.owned_gems.push(req.handle);
}
bytes_of(&req)
}
DRM_IOCTL_GEM_CLOSE => {
let req = decode_wire::<DrmGemCloseWire>(payload)?;
let owned = self
.handles
.get(&id)
.map(|h| h.owned_gems.contains(&req.handle))
.unwrap_or(false);
if !owned {
warn!(
"redox-drm: GEM_CLOSE handle {} not owned by this fd",
req.handle
);
return Err(Error::new(EBADF));
}
let backs_fb = self
.fb_registry
.values()
.any(|info| info.gem_handle == req.handle);
if backs_fb {
warn!(
"redox-drm: GEM_CLOSE handle {} rejected — backs an active framebuffer",
req.handle
);
return Err(Error::new(EBUSY));
}
if self.gem_is_mapped(req.handle) {
warn!(
"redox-drm: GEM_CLOSE handle {} rejected — still mapped",
req.handle
);
return Err(Error::new(EBUSY));
}
let close_now = !self.gem_has_other_refs(id, req.handle)
&& self.gem_export_refcount(req.handle) == 0;
if close_now {
self.driver
.gem_close(req.handle)
.map_err(driver_to_syscall)?;
self.prime_exports.retain(|_, &mut h| h != req.handle);
}
if let Some(handle) = self.handles.get_mut(&id) {
handle.owned_gems.retain(|&h| h != req.handle);
}
Vec::new()
}
DRM_IOCTL_GEM_MMAP => {
let mut req = decode_wire::<DrmGemMmapWire>(payload)?;
let owned = self
.handles
.get(&id)
.map(|h| Self::handle_has_gem_ref(h, req.handle))
.unwrap_or(false);
if !owned {
warn!(
"redox-drm: GEM_MMAP handle {} not owned by this fd",
req.handle
);
return Err(Error::new(EBADF));
}
if let Some(handle) = self.handles.get(&id) {
if handle.mapped_gem_refs != 0 && handle.mapped_gem != Some(req.handle) {
warn!(
"redox-drm: GEM_MMAP handle {} rejected — another GEM is still mapped",
req.handle
);
return Err(Error::new(EBUSY));
}
}
req.offset = self
.driver
.gem_mmap(req.handle)
.map_err(driver_to_syscall)? as u64;
if let Some(handle) = self.handles.get_mut(&id) {
handle.mapped_gem = Some(req.handle);
}
bytes_of(&req)
}
DRM_IOCTL_PRIME_HANDLE_TO_FD => {
let req = decode_wire::<DrmPrimeHandleToFdWire>(payload)?;
let owned = self
.handles
.get(&id)
.map(|h| Self::handle_has_gem_ref(h, req.handle))
.unwrap_or(false);
if !owned {
warn!(
"redox-drm: PRIME_HANDLE_TO_FD handle {} not owned by this fd",
req.handle
);
return Err(Error::new(EBADF));
}
let token = self.next_export_token;
self.next_export_token = self.next_export_token.saturating_add(1);
self.prime_exports.insert(token, req.handle);
let resp = DrmPrimeHandleToFdResponseWire {
fd: token as i32,
_pad: 0,
};
bytes_of(&resp)
}
DRM_IOCTL_PRIME_FD_TO_HANDLE => {
let req = decode_wire::<DrmPrimeFdToHandleWire>(payload)?;
let token = if req.fd >= 0 {
req.fd as u32
} else {
warn!("redox-drm: PRIME_FD_TO_HANDLE invalid token {}", req.fd);
return Err(Error::new(EBADF));
};
// The token comes from fpath() on the dmabuf fd, which embeds
// the opaque export token (not the raw GEM handle).
let gem_handle = match self.prime_exports.get(&token).copied() {
Some(h) => h,
None => {
warn!("redox-drm: PRIME_FD_TO_HANDLE token {} not found", token);
return Err(Error::new(ENOENT));
}
};
// Verify the GEM is still live — the exporter may have closed it
// before any dmabuf fd was opened, leaving a stale token.
self.driver.gem_size(gem_handle).map_err(|_| {
warn!(
"redox-drm: PRIME_FD_TO_HANDLE token {} maps to dead GEM {}",
token, gem_handle
);
// Clean up the stale token so future calls fail fast.
self.prime_exports.remove(&token);
Error::new(ENOENT)
})?;
let handle = self.handles.get_mut(&id).ok_or_else(|| Error::new(EBADF))?;
if !handle.owned_gems.contains(&gem_handle) {
handle.owned_gems.push(gem_handle);
}
let resp = DrmPrimeFdToHandleResponseWire {
handle: gem_handle,
_pad: 0,
};
bytes_of(&resp)
}
_ => {
warn!("redox-drm: unsupported ioctl {:#x}", request);
return Err(Error::new(EOPNOTSUPP));
@@ -720,21 +1116,34 @@ impl SchemeBlockMut for DrmScheme {
let connector_id = tail.parse::<u32>().map_err(|_| Error::new(ENOENT))?;
NodeKind::Connector(connector_id)
}
p if p.starts_with("card0/dmabuf/") => {
let tail = p.trim_start_matches("card0/dmabuf/");
let token = tail.parse::<u32>().map_err(|_| Error::new(ENOENT))?;
let gem_handle = match self.prime_exports.get(&token).copied() {
Some(h) => h,
None => return Err(Error::new(ENOENT)),
};
self.driver.gem_size(gem_handle).map_err(|_| {
warn!(
"redox-drm: open dmabuf token {} maps to dead GEM {}",
token, gem_handle
);
self.prime_exports.remove(&token);
Error::new(ENOENT)
})?;
NodeKind::DmaBuf {
gem_handle,
export_token: token,
}
}
_ => return Err(Error::new(ENOENT)),
};
let id = self.next_id;
self.next_id = self.next_id.saturating_add(1);
self.handles.insert(
id,
Handle {
node,
response: Vec::new(),
mapped_gem: None,
owned_fbs: Vec::new(),
owned_gems: Vec::new(),
},
);
if let NodeKind::DmaBuf { gem_handle, .. } = &node {
self.bump_export_ref(*gem_handle);
}
let id = self.allocate_handle(node);
Ok(Some(id))
}
@@ -763,6 +1172,7 @@ impl SchemeBlockMut for DrmScheme {
let path = match handle.node {
NodeKind::Card => "drm:card0".to_string(),
NodeKind::Connector(cid) => format!("drm:card0Connector/{cid}"),
NodeKind::DmaBuf { export_token, .. } => format!("drm:card0/dmabuf/{export_token}"),
};
let bytes = path.as_bytes();
let len = bytes.len().min(buf.len());
@@ -789,54 +1199,21 @@ impl SchemeBlockMut for DrmScheme {
}
fn close(&mut self, id: usize) -> Result<Option<usize>> {
let mapped = self
.handles
.get(&id)
.ok_or_else(|| Error::new(EBADF))?
.mapped_gem_refs;
if mapped != 0 {
let handle = self.handles.get_mut(&id).ok_or_else(|| Error::new(EBADF))?;
handle.closing = true;
return Ok(Some(0));
}
if let Some(handle) = self.handles.remove(&id) {
let mut auto_closed_gems = HashSet::new();
for fb_id in &handle.owned_fbs {
let in_use = self.is_fb_active(*fb_id);
if in_use {
continue;
}
if let Some(fb_info) = self.fb_registry.remove(fb_id) {
let still_referenced = self
.fb_registry
.values()
.any(|i| i.gem_handle == fb_info.gem_handle);
let still_owned = self
.handles
.values()
.any(|h| h.owned_gems.contains(&fb_info.gem_handle));
if !still_referenced && !still_owned {
match self.driver.gem_close(fb_info.gem_handle) {
Ok(()) => {
auto_closed_gems.insert(fb_info.gem_handle);
}
Err(e) => {
warn!(
"redox-drm: close gem_close({}) failed: {}",
fb_info.gem_handle, e
);
}
}
}
}
}
for gem_handle in handle.owned_gems {
if auto_closed_gems.contains(&gem_handle) {
continue;
}
let backs_fb = self
.fb_registry
.values()
.any(|info| info.gem_handle == gem_handle);
if !backs_fb {
if let Err(e) = self.driver.gem_close(gem_handle) {
warn!(
"redox-drm: close gem GEM {} cleanup failed: {}",
gem_handle, e
);
}
}
}
self.finalize_handle_close(handle);
} else {
return Err(Error::new(EBADF));
}
Ok(Some(0))
}
@@ -844,19 +1221,40 @@ impl SchemeBlockMut for DrmScheme {
fn mmap_prep(
&mut self,
id: usize,
_offset: u64,
_size: usize,
offset: u64,
size: usize,
_flags: MapFlags,
) -> Result<Option<usize>> {
let handle = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
let gem_handle = handle.mapped_gem.ok_or_else(|| Error::new(EINVAL))?;
let addr = self
let gem_handle = {
let handle = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
match handle.node {
NodeKind::DmaBuf { gem_handle, .. } => gem_handle,
_ => handle.mapped_gem.ok_or_else(|| Error::new(EINVAL))?,
}
};
let gem_size = self
.driver
.gem_size(gem_handle)
.map_err(driver_to_syscall)?;
if offset > gem_size {
return Err(Error::new(EINVAL));
}
let remaining = gem_size - offset;
if size as u64 > remaining {
return Err(Error::new(EINVAL));
}
let base_addr = self
.driver
.gem_mmap(gem_handle)
.map_err(driver_to_syscall)?;
let addr = base_addr + offset as usize;
self.pin_mapped_gem(id, gem_handle)?;
debug!(
"redox-drm: mmap_prep GEM handle {} at addr={:#x}",
gem_handle, addr
"redox-drm: mmap_prep GEM handle {} offset={} size={} at addr={:#x}",
gem_handle, offset, size, addr
);
Ok(Some(addr))
}
@@ -864,11 +1262,26 @@ impl SchemeBlockMut for DrmScheme {
fn munmap(
&mut self,
id: usize,
_offset: u64,
_size: usize,
offset: u64,
size: usize,
_flags: MunmapFlags,
) -> Result<Option<usize>> {
let _ = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?;
self.unpin_mapped_gem(id)?;
debug!(
"redox-drm: munmap id={} offset={} size={}",
id, offset, size
);
let should_finalize = self
.handles
.get(&id)
.map(|handle| handle.closing && handle.mapped_gem_refs == 0)
.unwrap_or(false);
if should_finalize {
if let Some(handle) = self.handles.remove(&id) {
self.finalize_handle_close(handle);
}
}
Ok(Some(0))
}
}