milestone: desktop path Phases 1-5

Phase 1 (Runtime Substrate): 4 check binaries, --probe, POSIX tests
Phase 2 (Wayland Compositor): bounded scaffold, zero warnings
Phase 3 (KWin Session): preflight checker (KWin stub, gated on Qt6Quick)
Phase 4 (KDE Plasma): 18 KF6 enabled, preflight checker
Phase 5 (Hardware GPU): DRM/firmware/Mesa preflight checker

Build: zero warnings, all scripts syntax-clean. Oracle-verified.
This commit is contained in:
2026-04-29 09:54:06 +01:00
parent b23714f542
commit 8acc73d774
508 changed files with 76526 additions and 396 deletions
@@ -0,0 +1,104 @@
use std::collections::VecDeque;
use std::ops::ControlFlow;
use libredox::error::Error as LError;
use syscall::Result;
use syscall::error::{self as errno, Error};
use redox_scheme::scheme::{SchemeState, SchemeSync};
use redox_scheme::{Request, RequestKind, Response, SignalBehavior, Socket};
pub struct Blocking<'sock> {
// TODO: VecDeque for both when it implements spare_capacity
requests_read: Vec<Request>,
responses_to_write: VecDeque<Response>,
socket: &'sock Socket,
state: SchemeState,
}
impl<'sock> Blocking<'sock> {
pub fn new(socket: &'sock Socket, queue_size: usize) -> Self {
Self {
requests_read: Vec::with_capacity(queue_size),
responses_to_write: VecDeque::with_capacity(queue_size),
socket,
state: SchemeState::new(),
}
}
pub fn process_requests_nonblocking(
&mut self,
scheme: &mut impl SchemeSync,
) -> Result<ControlFlow<()>> {
assert!(self.requests_read.is_empty());
assert!(self.responses_to_write.is_empty());
match self
.socket
.read_requests(&mut self.requests_read, SignalBehavior::Interrupt)
{
Ok(()) if self.requests_read.is_empty() => {
unreachable!("blocking scheme read failed to read anything");
}
Ok(()) => {}
Err(Error {
errno: errno::EINTR | errno::EWOULDBLOCK | errno::EAGAIN,
}) => return Ok(ControlFlow::Break(())),
Err(err) => return Err(err),
}
for request in self.requests_read.drain(..) {
match request.kind() {
RequestKind::Call(req) => {
let response = req.handle_sync(scheme, &mut self.state);
self.responses_to_write.push_back(response);
}
RequestKind::Cancellation(_req) => {}
RequestKind::OnClose { id } => {
// TODO: state.on_close()
scheme.on_close(id);
}
RequestKind::SendFd(sendfd_request) => {
let result = scheme.on_sendfd(&sendfd_request);
let response = Response::new(result, sendfd_request);
self.responses_to_write.push_back(response);
}
RequestKind::RecvFd(recvfd_request) => {
let result = scheme.on_recvfd(&recvfd_request);
let response = Response::open_dup_like(result, recvfd_request);
self.responses_to_write.push_back(response);
}
_ => {}
}
}
match self
.socket
.write_responses(&mut self.responses_to_write, SignalBehavior::Restart)
{
Ok(()) if !self.responses_to_write.is_empty() => {
panic!("failed to write all scheme responses");
}
Ok(()) => Ok(ControlFlow::Continue(())),
Err(Error {
errno: errno::EINTR | errno::EWOULDBLOCK | errno::EAGAIN,
}) => {
panic!("scheme response writing should always block");
}
Err(err) => return Err(LError::from(err).into()),
}
}
pub fn process_requests_blocking(mut self, mut scheme: impl SchemeSync) -> Result<!> {
loop {
match self.process_requests_nonblocking(&mut scheme)? {
ControlFlow::Continue(()) => {}
ControlFlow::Break(()) => {
panic!("process_requests_blocking should not be used on non-blocking schemes");
}
}
}
}
}
+122
View File
@@ -0,0 +1,122 @@
#![feature(never_type)]
use std::collections::{BTreeMap, btree_map};
use std::fmt;
use std::num::Wrapping;
use syscall::{EBADF, Error, Result};
mod blocking;
mod readiness_based;
pub use blocking::Blocking;
pub use readiness_based::ReadinessBased;
pub struct HandleMap<T> {
handles: BTreeMap<usize, T>,
next_id: Wrapping<usize>,
}
impl<T> HandleMap<T> {
pub const fn new() -> Self {
HandleMap {
handles: BTreeMap::new(),
next_id: Wrapping(1),
}
}
pub fn insert(&mut self, handle: T) -> usize {
let id = self.next_id;
// If we've looped round there's a small chance that the file descriptor still exists, so loop till we get one that doesn't
self.next_id += Wrapping(1);
loop {
if !self.handles.contains_key(&self.next_id.0) {
break;
} else {
self.next_id += Wrapping(1);
}
}
self.handles.insert(id.0, handle);
id.0
}
pub fn remove(&mut self, id: usize) -> Option<T> {
self.handles.remove(&id)
}
pub fn get(&self, id: usize) -> Result<&T> {
self.handles.get(&id).ok_or(Error::new(EBADF))
}
pub fn get_mut(&mut self, id: usize) -> Result<&mut T> {
self.handles.get_mut(&id).ok_or(Error::new(EBADF))
}
pub fn iter(&self) -> btree_map::Iter<'_, usize, T> {
self.handles.iter()
}
pub fn iter_mut(&mut self) -> btree_map::IterMut<'_, usize, T> {
self.handles.iter_mut()
}
pub fn keys(&self) -> btree_map::Keys<'_, usize, T> {
self.handles.keys()
}
pub fn values(&self) -> btree_map::Values<'_, usize, T> {
self.handles.values()
}
pub fn values_mut(&mut self) -> btree_map::ValuesMut<'_, usize, T> {
self.handles.values_mut()
}
}
pub struct FpathWriter<'a> {
buf: &'a mut [u8],
written: usize,
}
impl<'a> FpathWriter<'a> {
pub fn with(
buf: &'a mut [u8],
scheme_name: &str,
f: impl FnOnce(&mut Self) -> Result<()>,
) -> Result<usize> {
let mut w = FpathWriter { buf, written: 0 };
write!(w, "/scheme/{scheme_name}/").unwrap();
f(&mut w)?;
Ok(w.written)
}
pub fn with_legacy(
buf: &'a mut [u8],
scheme_name: &str,
f: impl FnOnce(&mut Self) -> Result<()>,
) -> Result<usize> {
let mut w = FpathWriter { buf, written: 0 };
write!(w, "{scheme_name}:").unwrap();
f(&mut w)?;
Ok(w.written)
}
pub fn push_str(&mut self, s: &str) {
let count = core::cmp::min(s.len(), self.buf.len() - self.written);
self.buf[self.written..self.written + count].copy_from_slice(&s.as_bytes()[..count]);
self.written += count;
}
pub fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
std::fmt::write(self, args)
}
}
impl fmt::Write for FpathWriter<'_> {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.push_str(s);
Ok(())
}
}
@@ -0,0 +1,196 @@
use std::collections::{HashMap, VecDeque};
use std::ops::ControlFlow;
use libredox::error::Error as LError;
use syscall::Result;
use syscall::error::{self as errno, ECANCELED, EIO, EOPNOTSUPP, Error};
use redox_scheme::scheme::{Op, SchemeResponse, SchemeState, SchemeSync};
use redox_scheme::{CallerCtx, Id, Request, RequestKind, Response, SignalBehavior, Socket};
pub struct ReadinessBased<'sock> {
// TODO: VecDeque for both when it implements spare_capacity
requests_read: Vec<Request>,
responses_to_write: VecDeque<Response>,
states: HashMap<Id, (CallerCtx, Op)>,
ready_queue: VecDeque<Id>,
socket: &'sock Socket,
state: SchemeState,
}
impl<'sock> ReadinessBased<'sock> {
pub fn new(socket: &'sock Socket, queue_size: usize) -> Self {
Self {
requests_read: Vec::with_capacity(queue_size),
responses_to_write: VecDeque::with_capacity(queue_size),
states: HashMap::new(),
socket,
ready_queue: VecDeque::new(),
state: SchemeState::new(),
}
}
pub fn read_and_process_requests(&mut self, scheme: &mut impl SchemeSync) -> Result<()> {
assert!(self.requests_read.is_empty());
match self
.socket
.read_requests(&mut self.requests_read, SignalBehavior::Interrupt)
{
Ok(()) if self.requests_read.is_empty() => {
unreachable!("blocking scheme read failed to read anything");
}
Ok(())
| Err(Error {
errno: errno::EINTR | errno::EWOULDBLOCK | errno::EAGAIN,
}) => {}
Err(err) => return Err(err),
}
for request in self.requests_read.drain(..) {
let req = match request.kind() {
RequestKind::Call(c) => c,
RequestKind::Cancellation(req) => {
if let Some((_caller, op)) = self.states.remove(&req.id) {
self.responses_to_write
.push_back(Response::err(ECANCELED, op));
}
continue;
}
RequestKind::OnClose { id } => {
// TODO: state.on_close()
scheme.on_close(id);
continue;
}
RequestKind::SendFd(sendfd_request) => {
let result = scheme.on_sendfd(&sendfd_request);
let response = Response::new(result, sendfd_request);
self.responses_to_write.push_back(response);
continue;
}
RequestKind::RecvFd(recvfd_request) => {
let result = scheme.on_recvfd(&recvfd_request);
let caller = recvfd_request.caller();
if let Err(Error {
errno: errno::EWOULDBLOCK,
}) = result
{
self.states.insert(caller.id, (caller, recvfd_request.op()));
continue;
}
let response = Response::open_dup_like(result, recvfd_request);
self.responses_to_write.push_back(response);
continue;
}
_ => continue,
};
let caller = req.caller();
let mut op = match req.op() {
Ok(op) => op,
Err(req) => {
self.responses_to_write
.push_back(Response::err(EOPNOTSUPP, req));
continue;
}
};
let resp = match op.handle_sync_dont_consume(&caller, scheme, &mut self.state) {
SchemeResponse::Opened(Err(Error {
errno: errno::EWOULDBLOCK,
}))
| SchemeResponse::Regular(Err(Error {
errno: errno::EWOULDBLOCK,
})) if !op.is_explicitly_nonblock() => {
self.states.insert(caller.id, (caller, op));
continue;
}
SchemeResponse::Regular(r) => Response::new(r, op),
SchemeResponse::RegularAndNotifyOnDetach(status) => {
Response::new_notify_on_detach(status, op)
}
SchemeResponse::Opened(o) => Response::open_dup_like(o, op),
};
self.responses_to_write.push_back(resp);
}
Ok(())
}
// TODO: Doesn't scale. Instead, provide an API for some form of queue.
// TODO: panic if id isn't present?
pub fn poll_request(&mut self, id: Id, scheme: &mut impl SchemeSync) -> Result<bool> {
Ok(
match Self::poll_request_inner(id, scheme, &mut self.state, &mut self.states)? {
ControlFlow::Continue((caller, op)) => {
self.states.insert(id, (caller, op));
false
}
ControlFlow::Break(resp) => {
self.responses_to_write.push_back(resp);
true
}
},
)
}
fn poll_request_inner(
id: Id,
scheme: &mut impl SchemeSync,
state: &mut SchemeState,
states: &mut HashMap<Id, (CallerCtx, Op)>,
) -> Result<ControlFlow<Response, (CallerCtx, Op)>> {
let (caller, mut op) = states.remove(&id).ok_or(Error::new(EIO))?;
let resp = match op.handle_sync_dont_consume(&caller, scheme, state) {
SchemeResponse::Opened(Err(Error {
errno: errno::EWOULDBLOCK,
}))
| SchemeResponse::Regular(Err(Error {
errno: errno::EWOULDBLOCK,
})) if !op.is_explicitly_nonblock() => {
return Ok(ControlFlow::Continue((caller, op)));
}
SchemeResponse::Regular(r) => Response::new(r, op),
SchemeResponse::Opened(o) => Response::open_dup_like(o, op),
SchemeResponse::RegularAndNotifyOnDetach(status) => {
Response::new_notify_on_detach(status, op)
}
};
Ok(ControlFlow::Break(resp))
}
pub fn poll_ready_requests(&mut self, scheme: &mut impl SchemeSync) -> Result<()> {
for id in self.ready_queue.drain(..) {
match Self::poll_request_inner(id, scheme, &mut self.state, &mut self.states)? {
ControlFlow::Break(resp) => {
self.responses_to_write.push_back(resp);
}
ControlFlow::Continue((caller, op)) => {
self.states.insert(id, (caller, op));
}
}
}
Ok(())
}
pub fn poll_all_requests(&mut self, scheme: &mut impl SchemeSync) -> Result<()> {
// TODO: implement waker-like API
self.ready_queue.clear();
self.ready_queue.extend(self.states.keys().copied());
self.poll_ready_requests(scheme)
}
pub fn write_responses(&mut self) -> Result<()> {
match self
.socket
.write_responses(&mut self.responses_to_write, SignalBehavior::Restart)
{
Ok(()) if !self.responses_to_write.is_empty() => {
panic!("failed to write all scheme responses");
}
Ok(()) => Ok(()),
Err(Error {
errno: errno::EINTR | errno::EWOULDBLOCK | errno::EAGAIN,
}) => {
panic!("scheme response writing should always block");
}
Err(err) => return Err(LError::from(err).into()),
}
}
}