milestone: desktop path Phases 1-5

Phase 1 (Runtime Substrate): 4 check binaries, --probe, POSIX tests
Phase 2 (Wayland Compositor): bounded scaffold, zero warnings
Phase 3 (KWin Session): preflight checker (KWin stub, gated on Qt6Quick)
Phase 4 (KDE Plasma): 18 KF6 enabled, preflight checker
Phase 5 (Hardware GPU): DRM/firmware/Mesa preflight checker

Build: zero warnings, all scripts syntax-clean. Oracle-verified.
This commit is contained in:
2026-04-29 09:54:06 +01:00
parent b23714f542
commit 8acc73d774
508 changed files with 76526 additions and 396 deletions
@@ -0,0 +1,28 @@
[package]
name = "virtio-gpud"
description = "VirtIO-GPU driver"
version = "0.1.0"
edition = "2021"
authors = ["Anhad Singh <andypython@protonmail.com>"]
[dependencies]
drm-sys.workspace = true
log.workspace = true
static_assertions.workspace = true
futures = { version = "0.3.28", features = ["executor"] }
anyhow.workspace = true
common = { path = "../../common" }
daemon = { path = "../../../daemon" }
driver-graphics = { path = "../driver-graphics" }
virtio-core = { path = "../../virtio-core" }
pcid = { path = "../../pcid" }
redox_event.workspace = true
redox_syscall.workspace = true
orbclient.workspace = true
spin.workspace = true
libredox.workspace = true
[lints]
workspace = true
@@ -0,0 +1,615 @@
//! `virtio-gpu` is a virtio based graphics adapter. It can operate in 2D mode and in 3D mode.
//!
//! XXX: 3D mode will offload rendering ops to the host gpu and therefore requires a GPU with 3D support
//! on the host machine.
// Notes for the future:
//
// `virtio-gpu` 2D acceleration is just blitting. 3D acceleration has 2 kinds:
// - virgl - OpenGL
// - venus - Vulkan
//
// The Venus driver requires support for the following from the `virtio-gpu` kernel driver:
// - VIRTGPU_PARAM_3D_FEATURES
// - VIRTGPU_PARAM_CAPSET_QUERY_FIX
// - VIRTGPU_PARAM_RESOURCE_BLOB
// - VIRTGPU_PARAM_HOST_VISIBLE
// - VIRTGPU_PARAM_CROSS_DEVICE
// - VIRTGPU_PARAM_CONTEXT_INIT
//
// cc https://docs.mesa3d.org/drivers/venus.html
// cc https://docs.mesa3d.org/drivers/virgl.html
use std::os::fd::AsRawFd;
use std::sync::atomic::{AtomicU32, Ordering};
use driver_graphics::GraphicsAdapter;
use event::{user_data, EventQueue};
use pcid_interface::PciFunctionHandle;
use virtio_core::utils::VolatileCell;
use virtio_core::MSIX_PRIMARY_VECTOR;
mod scheme;
//const VIRTIO_GPU_F_VIRGL: u32 = 0;
const VIRTIO_GPU_F_EDID: u32 = 1;
//const VIRTIO_GPU_F_RESOURCE_UUID: u32 = 2;
//const VIRTIO_GPU_F_RESOURCE_BLOB: u32 = 3;
//const VIRTIO_GPU_F_CONTEXT_INIT: u32 = 4;
const VIRTIO_GPU_EVENT_DISPLAY: u32 = 1 << 0;
const VIRTIO_GPU_MAX_SCANOUTS: usize = 16;
#[repr(C)]
pub struct GpuConfig {
/// Signals pending events to the driver.
pub events_read: VolatileCell<u32>, // read-only
/// Clears pending events in the device (write-to-clear).
pub events_clear: VolatileCell<u32>, // write-only
pub num_scanouts: VolatileCell<u32>,
pub num_capsets: VolatileCell<u32>,
}
impl GpuConfig {
#[inline]
pub fn num_scanouts(&self) -> u32 {
self.num_scanouts.get()
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
#[repr(u32)]
pub enum CommandTy {
Undefined = 0,
// 2D commands
GetDisplayInfo = 0x0100,
ResourceCreate2d,
ResourceUnref,
SetScanout,
ResourceFlush,
TransferToHost2d,
ResourceAttachBacking,
ResourceDetachBacking,
GetCapsetInfo,
GetCapset,
GetEdid,
ResourceAssignUuid,
ResourceCreateBlob,
SetScanoutBlob,
// 3D commands
CtxCreate = 0x0200,
CtxDestroy,
CtxAttachResource,
CtxDetachResource,
ResourceCreate3d,
TransferToHost3d,
TransferFromHost3d,
Submit3d,
ResourceMapBlob,
ResourceUnmapBlob,
// cursor commands
UpdateCursor = 0x0300,
MoveCursor,
// success responses
RespOkNodata = 0x1100,
RespOkDisplayInfo,
RespOkCapsetInfo,
RespOkCapset,
RespOkEdid,
RespOkResourceUuid,
RespOkMapInfo,
// error responses
RespErrUnspec = 0x1200,
RespErrOutOfMemory,
RespErrInvalidScanoutId,
RespErrInvalidResourceId,
RespErrInvalidContextId,
RespErrInvalidParameter,
}
static_assertions::const_assert_eq!(core::mem::size_of::<CommandTy>(), 4);
const VIRTIO_GPU_FLAG_FENCE: u32 = 1 << 0;
//const VIRTIO_GPU_FLAG_INFO_RING_IDX: u32 = 1 << 1;
#[derive(Debug)]
#[repr(C)]
pub struct ControlHeader {
pub ty: CommandTy,
pub flags: u32,
pub fence_id: u64,
pub ctx_id: u32,
pub ring_index: u8,
padding: [u8; 3],
}
impl ControlHeader {
pub fn with_ty(ty: CommandTy) -> Self {
Self {
ty,
..Default::default()
}
}
}
impl Default for ControlHeader {
fn default() -> Self {
Self {
ty: CommandTy::Undefined,
flags: 0,
fence_id: 0,
ctx_id: 0,
ring_index: 0,
padding: [0; 3],
}
}
}
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct GpuRect {
pub x: u32,
pub y: u32,
pub width: u32,
pub height: u32,
}
impl GpuRect {
pub fn new(x: u32, y: u32, width: u32, height: u32) -> Self {
Self {
x,
y,
width,
height,
}
}
}
#[derive(Debug)]
#[repr(C)]
pub struct DisplayInfo {
rect: GpuRect,
pub enabled: u32,
pub flags: u32,
}
#[derive(Debug)]
#[repr(C)]
pub struct GetDisplayInfo {
pub header: ControlHeader,
pub display_info: [DisplayInfo; VIRTIO_GPU_MAX_SCANOUTS],
}
impl Default for GetDisplayInfo {
fn default() -> Self {
Self {
header: ControlHeader {
ty: CommandTy::GetDisplayInfo,
..Default::default()
},
display_info: unsafe { core::mem::zeroed() },
}
}
}
static RESOURCE_ALLOC: AtomicU32 = AtomicU32::new(1); // XXX: 0 is reserved for whatever that takes `resource_id`.
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
#[repr(C)]
pub struct ResourceId(u32);
impl ResourceId {
const NONE: ResourceId = ResourceId(0);
fn alloc() -> Self {
ResourceId(RESOURCE_ALLOC.fetch_add(1, Ordering::SeqCst))
}
}
#[derive(Debug, Copy, Clone)]
#[repr(u32)]
pub enum ResourceFormat {
Unknown = 0,
Bgrx = 2,
Xrgb = 4,
}
#[derive(Debug)]
#[repr(C)]
pub struct ResourceCreate2d {
pub header: ControlHeader,
resource_id: ResourceId,
format: ResourceFormat,
width: u32,
height: u32,
}
impl ResourceCreate2d {
fn new(resource_id: ResourceId, format: ResourceFormat, width: u32, height: u32) -> Self {
Self {
header: ControlHeader::with_ty(CommandTy::ResourceCreate2d),
resource_id,
format,
width,
height,
}
}
}
#[derive(Debug)]
#[repr(C)]
pub struct MemEntry {
pub address: u64,
pub length: u32,
pub padding: u32,
}
#[derive(Debug)]
#[repr(C)]
pub struct AttachBacking {
pub header: ControlHeader,
pub resource_id: ResourceId,
pub num_entries: u32,
}
impl AttachBacking {
pub fn new(resource_id: ResourceId, num_entries: u32) -> Self {
Self {
header: ControlHeader::with_ty(CommandTy::ResourceAttachBacking),
resource_id,
num_entries,
}
}
}
#[derive(Debug)]
#[repr(C)]
pub struct DetachBacking {
pub header: ControlHeader,
pub resource_id: ResourceId,
pub padding: u32,
}
impl DetachBacking {
pub fn new(resource_id: ResourceId) -> Self {
Self {
header: ControlHeader::with_ty(CommandTy::ResourceDetachBacking),
resource_id,
padding: 0,
}
}
}
#[derive(Debug)]
#[repr(C)]
pub struct ResourceFlush {
pub header: ControlHeader,
pub rect: GpuRect,
pub resource_id: ResourceId,
pub padding: u32,
}
impl ResourceFlush {
pub fn new(resource_id: ResourceId, rect: GpuRect) -> Self {
Self {
header: ControlHeader::with_ty(CommandTy::ResourceFlush),
rect,
resource_id,
padding: 0,
}
}
}
#[derive(Debug)]
#[repr(C)]
pub struct ResourceUnref {
pub header: ControlHeader,
pub resource_id: ResourceId,
pub padding: u32,
}
impl ResourceUnref {
pub fn new(resource_id: ResourceId) -> Self {
Self {
header: ControlHeader::with_ty(CommandTy::ResourceUnref),
resource_id,
padding: 0,
}
}
}
#[repr(C)]
#[derive(Debug)]
pub struct SetScanout {
pub header: ControlHeader,
pub rect: GpuRect,
pub scanout_id: u32,
pub resource_id: ResourceId,
}
impl SetScanout {
pub fn new(scanout_id: u32, resource_id: ResourceId, rect: GpuRect) -> Self {
Self {
header: ControlHeader::with_ty(CommandTy::SetScanout),
rect,
scanout_id,
resource_id,
}
}
}
#[derive(Debug)]
#[repr(C)]
pub struct XferToHost2d {
pub header: ControlHeader,
pub rect: GpuRect,
pub offset: u64,
pub resource_id: ResourceId,
pub padding: u32,
}
impl XferToHost2d {
pub fn new(resource_id: ResourceId, rect: GpuRect, offset: u64) -> Self {
Self {
header: ControlHeader::with_ty(CommandTy::TransferToHost2d),
rect,
offset,
resource_id,
padding: 0,
}
}
}
#[derive(Debug)]
#[repr(C)]
pub struct GetEdid {
pub header: ControlHeader,
pub scanout: u32,
pub padding: u32,
}
impl GetEdid {
pub fn new(scanout_id: u32) -> Self {
Self {
header: ControlHeader::with_ty(CommandTy::GetEdid),
scanout: scanout_id,
padding: 0,
}
}
}
#[derive(Debug)]
#[repr(C)]
pub struct GetEdidResp {
pub header: ControlHeader,
pub size: u32,
pub padding: u32,
pub edid: [u8; 1024],
}
impl GetEdidResp {
pub fn new() -> Self {
Self {
header: ControlHeader::with_ty(CommandTy::GetEdid),
size: 0,
padding: 0,
edid: [0; 1024],
}
}
}
#[derive(Debug)]
#[repr(C)]
pub struct CursorPos {
pub scanout_id: u32,
pub x: i32,
pub y: i32,
_padding: u32,
}
impl CursorPos {
pub fn new(scanout_id: u32, x: i32, y: i32) -> Self {
Self {
scanout_id,
x,
y,
_padding: 0,
}
}
}
/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */
#[derive(Debug)]
#[repr(C)]
pub struct UpdateCursor {
pub header: ControlHeader,
pub pos: CursorPos,
pub resource_id: ResourceId,
pub hot_x: i32,
pub hot_y: i32,
_padding: u32,
}
impl UpdateCursor {
pub fn update_cursor(x: i32, y: i32, hot_x: i32, hot_y: i32, resource_id: ResourceId) -> Self {
Self {
header: ControlHeader::with_ty(CommandTy::UpdateCursor),
pos: CursorPos::new(0, x, y),
resource_id,
hot_x,
hot_y,
_padding: 0,
}
}
}
pub struct MoveCursor {
pub header: ControlHeader,
pub pos: CursorPos,
pub resource_id: ResourceId,
pub hot_x: i32,
pub hot_y: i32,
_padding: u32,
}
impl MoveCursor {
pub fn move_cursor(x: i32, y: i32) -> Self {
Self {
header: ControlHeader::with_ty(CommandTy::MoveCursor),
pos: CursorPos::new(0, x, y),
resource_id: ResourceId(0),
hot_x: 0,
hot_y: 0,
_padding: 0,
}
}
}
static DEVICE: spin::Once<virtio_core::Device> = spin::Once::new();
fn main() {
pcid_interface::pci_daemon(daemon_runner);
}
fn daemon_runner(redox_daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! {
daemon(redox_daemon, pcid_handle).unwrap();
unreachable!();
}
fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow::Result<()> {
common::setup_logging(
"graphics",
"pci",
"virtio-gpud",
common::output_level(),
common::file_level(),
);
// Double check that we have the right device.
//
// 0x1050 - virtio-gpu
let pci_config = pcid_handle.config();
assert_eq!(pci_config.func.full_device_id.device_id, 0x1050);
log::info!("virtio-gpu: initiating startup sequence :^)");
let device = DEVICE.try_call_once(|| virtio_core::probe_device(&mut pcid_handle))?;
let config = unsafe { &mut *(device.device_space as *mut GpuConfig) };
// Negotiate features.
let has_edid = device.transport.check_device_feature(VIRTIO_GPU_F_EDID);
if has_edid {
device.transport.ack_driver_feature(VIRTIO_GPU_F_EDID);
}
device.transport.finalize_features();
// Queue for sending control commands.
let control_queue = device
.transport
.setup_queue(MSIX_PRIMARY_VECTOR, &device.irq_handle)?;
// Queue for sending cursor updates.
let cursor_queue = device
.transport
.setup_queue(MSIX_PRIMARY_VECTOR, &device.irq_handle)?;
device.transport.setup_config_notify(MSIX_PRIMARY_VECTOR);
device.transport.run_device();
// Needs to be before GpuScheme::new to avoid a deadlock due to initnsmgr blocking on
// /scheme/event as it is already blocked on opening /scheme/display.virtio-gpu.
// FIXME change the initnsmgr to not block on openat for the target scheme.
let event_queue: EventQueue<Source> =
EventQueue::new().expect("virtio-gpud: failed to create event queue");
let mut scheme = scheme::GpuScheme::new(
config,
control_queue.clone(),
cursor_queue.clone(),
device.transport.clone(),
has_edid,
)?;
daemon.ready();
user_data! {
enum Source {
Input,
Scheme,
Interrupt,
}
}
event_queue
.subscribe(
scheme.inputd_event_handle().as_raw_fd() as usize,
Source::Input,
event::EventFlags::READ,
)
.unwrap();
event_queue
.subscribe(
scheme.event_handle().raw(),
Source::Scheme,
event::EventFlags::READ,
)
.unwrap();
event_queue
.subscribe(
device.irq_handle.as_raw_fd() as usize,
Source::Interrupt,
event::EventFlags::READ,
)
.unwrap();
let all = [Source::Input, Source::Scheme, Source::Interrupt];
for event in all
.into_iter()
.chain(event_queue.map(|e| e.expect("virtio-gpud: failed to get next event").user_data))
{
match event {
Source::Input => scheme.handle_vt_events(),
Source::Scheme => {
scheme
.tick()
.expect("virtio-gpud: failed to process scheme events");
}
Source::Interrupt => loop {
let before_gen = device.transport.config_generation();
let events = scheme.adapter().config.events_read.get();
if events & VIRTIO_GPU_EVENT_DISPLAY != 0 {
let (adapter, objects) = scheme.adapter_and_kms_objects_mut();
futures::executor::block_on(async { adapter.update_displays().await.unwrap() });
for connector_id in objects.connector_ids().to_vec() {
adapter.probe_connector(objects, connector_id);
}
scheme.notify_displays_changed();
scheme
.adapter_mut()
.config
.events_clear
.set(VIRTIO_GPU_EVENT_DISPLAY);
}
let after_gen = device.transport.config_generation();
if before_gen == after_gen {
break;
}
},
}
}
std::process::exit(0);
}
@@ -0,0 +1,528 @@
use std::fmt;
use std::sync::{Arc, Mutex};
use common::{dma::Dma, sgl};
use driver_graphics::kms::connector::{KmsConnectorDriver, KmsConnectorStatus};
use driver_graphics::kms::objects::{KmsCrtc, KmsCrtcState, KmsObjectId, KmsObjects};
use driver_graphics::{Buffer as DrmBuffer, CursorPlane, Damage, GraphicsAdapter, GraphicsScheme};
use drm_sys::{
DRM_CAP_CURSOR_HEIGHT, DRM_CAP_CURSOR_WIDTH, DRM_CAP_DUMB_BUFFER, DRM_CAP_DUMB_PREFER_SHADOW,
DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT,
};
use syscall::{EINVAL, PAGE_SIZE};
use virtio_core::spec::{Buffer, ChainBuilder, DescriptorFlags};
use virtio_core::transport::{Error, Queue, Transport};
use crate::*;
impl Into<GpuRect> for Damage {
fn into(self) -> GpuRect {
GpuRect {
x: self.x,
y: self.y,
width: self.width,
height: self.height,
}
}
}
#[derive(Debug)]
pub struct VirtGpuConnector {
display_id: u32,
}
impl KmsConnectorDriver for VirtGpuConnector {
type State = ();
}
pub struct VirtGpuFramebuffer<'a> {
queue: Arc<Queue<'a>>,
id: ResourceId,
sgl: sgl::Sgl,
width: u32,
height: u32,
}
impl<'a> fmt::Debug for VirtGpuFramebuffer<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("VirtGpuFramebuffer")
.field("id", &self.id)
.field("sgl", &self.sgl)
.field("width", &self.width)
.field("height", &self.height)
.finish_non_exhaustive()
}
}
impl DrmBuffer for VirtGpuFramebuffer<'_> {
fn size(&self) -> usize {
(self.width * self.height * 4) as usize
}
}
impl Drop for VirtGpuFramebuffer<'_> {
fn drop(&mut self) {
futures::executor::block_on(async {
let request = Dma::new(ResourceUnref::new(self.id)).unwrap();
let header = Dma::new(ControlHeader::default()).unwrap();
let command = ChainBuilder::new()
.chain(Buffer::new(&request))
.chain(Buffer::new(&header).flags(DescriptorFlags::WRITE_ONLY))
.build();
self.queue.send(command).await;
});
}
}
#[derive(Debug, Clone)]
pub struct Display {
enabled: bool,
width: u32,
height: u32,
edid: Vec<u8>,
active_resource: Option<ResourceId>,
}
pub struct VirtGpuAdapter<'a> {
pub config: &'a mut GpuConfig,
control_queue: Arc<Queue<'a>>,
cursor_queue: Arc<Queue<'a>>,
transport: Arc<dyn Transport>,
has_edid: bool,
displays: Vec<Display>,
hidden_cursor: Option<Arc<VirtGpuFramebuffer<'a>>>,
}
impl<'a> fmt::Debug for VirtGpuAdapter<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("VirtGpuAdapter")
.field("displays", &self.displays)
.finish_non_exhaustive()
}
}
impl VirtGpuAdapter<'_> {
pub async fn update_displays(&mut self) -> Result<(), Error> {
let display_info = self.get_display_info().await?;
let raw_displays = &display_info.display_info[..self.config.num_scanouts() as usize];
self.displays.resize(
raw_displays.len(),
Display {
enabled: false,
width: 0,
height: 0,
edid: vec![],
active_resource: None,
},
);
for (i, info) in raw_displays.iter().enumerate() {
log::info!(
"virtio-gpu: display {i} ({}x{}px)",
info.rect.width,
info.rect.height
);
self.displays[i].enabled = info.enabled != 0;
if info.rect.width == 0 || info.rect.height == 0 {
// QEMU gives all displays other than the first a zero width and height, but trying
// to attach a zero sized framebuffer to the display will result an error, so
// default to 640x480px.
self.displays[i].width = 640;
self.displays[i].height = 480;
} else {
self.displays[i].width = info.rect.width;
self.displays[i].height = info.rect.height;
}
if self.has_edid {
let edid = self.get_edid(i as u32).await?;
self.displays[i].edid = edid.edid[..edid.size as usize].to_vec();
}
}
Ok(())
}
async fn send_request<T>(&self, request: Dma<T>) -> Result<Dma<ControlHeader>, Error> {
let header = Dma::new(ControlHeader::default())?;
let command = ChainBuilder::new()
.chain(Buffer::new(&request))
.chain(Buffer::new(&header).flags(DescriptorFlags::WRITE_ONLY))
.build();
self.control_queue.send(command).await;
Ok(header)
}
async fn send_request_fenced<T>(&self, request: Dma<T>) -> Result<Dma<ControlHeader>, Error> {
let mut header = Dma::new(ControlHeader::default())?;
header.flags |= VIRTIO_GPU_FLAG_FENCE;
let command = ChainBuilder::new()
.chain(Buffer::new(&request))
.chain(Buffer::new(&header).flags(DescriptorFlags::WRITE_ONLY))
.build();
self.control_queue.send(command).await;
Ok(header)
}
async fn get_display_info(&self) -> Result<Dma<GetDisplayInfo>, Error> {
let header = Dma::new(ControlHeader::with_ty(CommandTy::GetDisplayInfo))?;
let response = Dma::new(GetDisplayInfo::default())?;
let command = ChainBuilder::new()
.chain(Buffer::new(&header))
.chain(Buffer::new(&response).flags(DescriptorFlags::WRITE_ONLY))
.build();
self.control_queue.send(command).await;
assert!(response.header.ty == CommandTy::RespOkDisplayInfo);
Ok(response)
}
async fn get_edid(&self, scanout_id: u32) -> Result<Dma<GetEdidResp>, Error> {
let header = Dma::new(GetEdid::new(scanout_id))?;
let response = Dma::new(GetEdidResp::new())?;
let command = ChainBuilder::new()
.chain(Buffer::new(&header))
.chain(Buffer::new(&response).flags(DescriptorFlags::WRITE_ONLY))
.build();
self.control_queue.send(command).await;
assert!(response.header.ty == CommandTy::RespOkEdid);
Ok(response)
}
fn update_cursor(
&mut self,
cursor: &VirtGpuFramebuffer,
x: i32,
y: i32,
hot_x: i32,
hot_y: i32,
) {
//Transfering cursor resource to host
futures::executor::block_on(async {
let transfer_request = Dma::new(XferToHost2d::new(
cursor.id,
GpuRect {
x: 0,
y: 0,
width: 64,
height: 64,
},
0,
))
.unwrap();
let header = self.send_request_fenced(transfer_request).await.unwrap();
assert_eq!(header.ty, CommandTy::RespOkNodata);
});
//Update the cursor position
let request = Dma::new(UpdateCursor::update_cursor(x, y, hot_x, hot_y, cursor.id)).unwrap();
futures::executor::block_on(async {
let command = ChainBuilder::new().chain(Buffer::new(&request)).build();
self.cursor_queue.send(command).await;
});
}
fn move_cursor(&mut self, x: i32, y: i32) {
let request = Dma::new(MoveCursor::move_cursor(x, y)).unwrap();
futures::executor::block_on(async {
let command = ChainBuilder::new().chain(Buffer::new(&request)).build();
self.cursor_queue.send(command).await;
});
}
fn disable_cursor(&mut self) {
if self.hidden_cursor.is_none() {
let (width, height) = self.hw_cursor_size().unwrap();
let (cursor, stride) = self.create_dumb_buffer(width, height);
unsafe {
core::ptr::write_bytes(
cursor.sgl.as_ptr() as *mut u8,
0,
(stride * height) as usize,
);
}
self.hidden_cursor = Some(Arc::new(cursor));
}
let hidden_cursor = self.hidden_cursor.as_ref().unwrap().clone();
self.update_cursor(&hidden_cursor, 0, 0, 0, 0);
}
}
impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> {
type Connector = VirtGpuConnector;
type Crtc = ();
type Buffer = VirtGpuFramebuffer<'a>;
type Framebuffer = ();
fn name(&self) -> &'static [u8] {
b"virtio-gpud"
}
fn desc(&self) -> &'static [u8] {
b"VirtIO GPU"
}
fn init(&mut self, objects: &mut KmsObjects<Self>) {
futures::executor::block_on(async {
self.update_displays().await.unwrap();
});
for display_id in 0..self.config.num_scanouts.get() {
let crtc = objects.add_crtc((), ());
objects.add_connector(VirtGpuConnector { display_id }, (), &[crtc]);
}
}
fn get_cap(&self, cap: u32) -> syscall::Result<u64> {
match cap {
DRM_CAP_DUMB_BUFFER => Ok(1),
DRM_CAP_DUMB_PREFER_SHADOW => Ok(0),
DRM_CAP_CURSOR_WIDTH => Ok(64),
DRM_CAP_CURSOR_HEIGHT => Ok(64),
_ => Err(syscall::Error::new(EINVAL)),
}
}
fn set_client_cap(&self, cap: u32, _value: u64) -> syscall::Result<()> {
match cap {
// FIXME hide cursor plane unless this client cap is set
DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT => Ok(()),
_ => Err(syscall::Error::new(EINVAL)),
}
}
fn probe_connector(&mut self, objects: &mut KmsObjects<Self>, id: KmsObjectId) {
futures::executor::block_on(async {
let mut connector = objects.get_connector(id).unwrap().lock().unwrap();
let display = &self.displays[connector.driver_data.display_id as usize];
connector.connection = if display.enabled {
KmsConnectorStatus::Connected
} else {
KmsConnectorStatus::Disconnected
};
if self.has_edid {
connector.update_from_edid(&display.edid);
drop(connector);
let blob = objects.add_blob(display.edid.clone());
objects.get_connector(id).unwrap().lock().unwrap().edid = blob;
} else {
connector.update_from_size(display.width, display.height);
}
});
}
fn create_dumb_buffer(&mut self, width: u32, height: u32) -> (Self::Buffer, u32) {
futures::executor::block_on(async {
let bpp = 32;
let fb_size = width as usize * height as usize * bpp / 8;
let sgl = sgl::Sgl::new(fb_size).unwrap();
unsafe {
core::ptr::write_bytes(sgl.as_ptr() as *mut u8, 255, fb_size);
}
let res_id = ResourceId::alloc();
// Create a host resource using `VIRTIO_GPU_CMD_RESOURCE_CREATE_2D`.
let request = Dma::new(ResourceCreate2d::new(
res_id,
ResourceFormat::Bgrx,
width,
height,
))
.unwrap();
let header = self.send_request(request).await.unwrap();
assert_eq!(header.ty, CommandTy::RespOkNodata);
// Use the allocated framebuffer from the guest ram, and attach it as backing
// storage to the resource just created, using `VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING`.
let mut mem_entries =
unsafe { Dma::zeroed_slice(sgl.chunks().len()).unwrap().assume_init() };
for (entry, chunk) in mem_entries.iter_mut().zip(sgl.chunks().iter()) {
*entry = MemEntry {
address: chunk.phys as u64,
length: chunk.length.next_multiple_of(PAGE_SIZE) as u32,
padding: 0,
};
}
let attach_request =
Dma::new(AttachBacking::new(res_id, mem_entries.len() as u32)).unwrap();
let header = Dma::new(ControlHeader::default()).unwrap();
let command = ChainBuilder::new()
.chain(Buffer::new(&attach_request))
.chain(Buffer::new_unsized(&mem_entries))
.chain(Buffer::new(&header).flags(DescriptorFlags::WRITE_ONLY))
.build();
self.control_queue.send(command).await;
assert_eq!(header.ty, CommandTy::RespOkNodata);
(
VirtGpuFramebuffer {
queue: self.control_queue.clone(),
id: res_id,
sgl,
width,
height,
},
width * 4,
)
})
}
fn map_dumb_buffer(&mut self, buffer: &Self::Buffer) -> *mut u8 {
buffer.sgl.as_ptr()
}
fn create_framebuffer(&mut self, _buffer: &Self::Buffer) -> Self::Framebuffer {
()
}
fn set_crtc(
&mut self,
objects: &KmsObjects<Self>,
crtc: &Mutex<KmsCrtc<Self>>,
state: KmsCrtcState<Self>,
damage: Damage,
) -> syscall::Result<()> {
futures::executor::block_on(async {
let mut crtc = crtc.lock().unwrap();
let framebuffer = state
.fb_id
.map(|fb_id| objects.get_framebuffer(fb_id))
.transpose()?;
crtc.state = state;
for connector in objects.connectors() {
let connector = connector.lock().unwrap();
if connector.state.crtc_id != objects.crtc_ids()[crtc.crtc_index as usize] {
continue;
}
let display_id = connector.driver_data.display_id;
let Some(framebuffer) = framebuffer else {
let scanout_request = Dma::new(SetScanout::new(
display_id,
ResourceId::NONE,
GpuRect::new(0, 0, 0, 0),
))
.unwrap();
let header = self.send_request(scanout_request).await.unwrap();
assert_eq!(header.ty, CommandTy::RespOkNodata);
self.displays[display_id as usize].active_resource = None;
return Ok(());
};
let req = Dma::new(XferToHost2d::new(
framebuffer.buffer.id,
GpuRect {
x: 0,
y: 0,
width: framebuffer.width,
height: framebuffer.height,
},
0,
))
.unwrap();
let header = self.send_request(req).await.unwrap();
assert_eq!(header.ty, CommandTy::RespOkNodata);
// FIXME once we support resizing we also need to check that the current and target size match
if self.displays[display_id as usize].active_resource != Some(framebuffer.buffer.id)
{
let scanout_request = Dma::new(SetScanout::new(
display_id,
framebuffer.buffer.id,
GpuRect::new(0, 0, framebuffer.width, framebuffer.height),
))
.unwrap();
let header = self.send_request(scanout_request).await.unwrap();
assert_eq!(header.ty, CommandTy::RespOkNodata);
self.displays[display_id as usize].active_resource =
Some(framebuffer.buffer.id);
}
let flush = ResourceFlush::new(
framebuffer.buffer.id,
damage.clip(framebuffer.width, framebuffer.height).into(),
);
let header = self.send_request(Dma::new(flush).unwrap()).await.unwrap();
assert_eq!(header.ty, CommandTy::RespOkNodata);
}
Ok(())
})
}
fn hw_cursor_size(&self) -> Option<(u32, u32)> {
Some((64, 64))
}
fn handle_cursor(&mut self, cursor: &CursorPlane<Self::Buffer>, dirty_fb: bool) {
if let Some(buffer) = &cursor.buffer {
if dirty_fb {
self.update_cursor(buffer, cursor.x, cursor.y, cursor.hot_x, cursor.hot_y);
} else {
self.move_cursor(cursor.x, cursor.y);
}
} else {
if dirty_fb {
self.disable_cursor();
}
}
}
}
pub struct GpuScheme {}
impl<'a> GpuScheme {
pub fn new(
config: &'a mut GpuConfig,
control_queue: Arc<Queue<'a>>,
cursor_queue: Arc<Queue<'a>>,
transport: Arc<dyn Transport>,
has_edid: bool,
) -> Result<GraphicsScheme<VirtGpuAdapter<'a>>, Error> {
let adapter = VirtGpuAdapter {
config,
control_queue,
cursor_queue,
transport,
has_edid,
displays: vec![],
hidden_cursor: None,
};
Ok(GraphicsScheme::new(
adapter,
"display.virtio-gpu".to_owned(),
false,
))
}
}