diff --git a/AGENTS.md b/AGENTS.md index 5688e5d1..1bcf2706 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -67,13 +67,13 @@ redox-master/ | Fix kernel | `recipes/core/kernel/source/` | Kernel is a recipe, not top-level | | Fix a driver | `recipes/core/base/source/drivers/` | All drivers are userspace daemons | | Fix relibc (POSIX) | `recipes/core/relibc/source/` | C library written in Rust | -| Wayland integration | `recipes/wip/wayland/` + `docs/03-WAYLAND-ON-REDOX.md` | 21 WIP recipes | +| Wayland integration | `recipes/wip/wayland/` + `local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` | 21 WIP recipes + local validation/ownership plan | | KDE Plasma path | `recipes/wip/kde/` + `docs/05-KDE-PLASMA-ON-REDOX.md` | 9 WIP KDE app recipes | | **Desktop path plan** | `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` | **Canonical plan: console → HW-accelerated KDE** | | Linux driver compat | `docs/04-LINUX-DRIVER-COMPAT.md` | linux-kpi + redox-driver-sys architecture (**GPU and Wi-Fi only — not USB**) | | Build system internals | `src/bin/repo.rs`, `src/lib.rs`, `mk/repo.mk` | Cookbook tool in Rust | | Cross-toolchain setup | `mk/prefix.mk`, `prefix/x86_64-unknown-redox/` | Downloads Clang/LLVM toolchain | -| Display/session surface | `config/redbear-kde.toml`, `config/wayland.toml` | Tracked KWin desktop target plus bounded validation slice | +| Display/session surface | `config/redbear-full.toml` | Active desktop/graphics compile surface; `redbear-kde` references elsewhere are historical/staging and not supported compile targets | | GPU/graphics stack | `recipes/libs/mesa/` | OSMesa + LLVMpipe (software only) | | GPU hardware drivers | `local/recipes/gpu/redox-drm/source/` | AMD + Intel DRM/KMS via redox-driver-sys | | D-Bus integration | `local/docs/DBUS-INTEGRATION-PLAN.md` | Architecture, gap analysis, phased implementation for KDE Plasma D-Bus | @@ -92,10 +92,13 @@ echo 'PODMAN_BUILD?=0' > .config # Native build (no container) echo 'PODMAN_BUILD?=1' > .config # Podman container build # Build Red Bear OS -make all # Build tracked KWin Wayland target → harddrive.img -make all CONFIG_NAME=redbear-full # Broader Red Bear integration slice + custom drivers -make all CONFIG_NAME=redbear-minimal # Minimal Red Bear OS server -CI=1 make all CONFIG_NAME=redbear-minimal # CI mode (disables TUI, for non-interactive) +# Supported compile targets: redbear-mini, redbear-live-mini, redbear-full, redbear-live-full +# Desktop/graphics are available only on redbear-full and redbear-live-full. +make all CONFIG_NAME=redbear-full # Desktop/graphics-enabled Red Bear target → harddrive.img +make live CONFIG_NAME=redbear-live-full # Desktop/graphics live image +make all CONFIG_NAME=redbear-mini # Minimal non-desktop Red Bear target +make live CONFIG_NAME=redbear-live-mini # Minimal live image +CI=1 make all CONFIG_NAME=redbear-mini # CI mode (disables TUI, for non-interactive) # Run make qemu # Boot in QEMU @@ -256,7 +259,7 @@ See `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` for the canonical desktop path p | ACPI power | ✅ | `\_PS0`/`\_PS3`/`\_PPC` AML methods available | | x2APIC/SMP | ✅ | Multi-core works | | IOMMU | 🚧 | QEMU first-use proof now passes; real hardware validation still open | -| AMD GPU | 🚧 | MMIO mapped, DC port compiles, MSI-X wired, no hardware validation yet | +| AMD GPU | 🚧 | MMIO mapped, bounded Red Bear display glue path builds, MSI-X wired; imported Linux AMD DC/TTM/core remain under compile triage; no hardware validation yet | ### Phased Roadmap (historical P0–P6) @@ -268,10 +271,10 @@ See `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` for the canonical desktop path p |-------|----------|----------| | ~~P0: Fix ACPI for AMD~~ | ~~4-6 weeks~~ | ✅ Materially complete — boots on modern AMD bare metal; see `local/docs/ACPI-IMPROVEMENT-PLAN.md` for forward work | | ~~P1: Driver infrastructure~~ | ~~8-12 weeks~~ | ✅ Complete — redox-driver-sys + linux-kpi + firmware-loader + pcid /config + MSI-X (compiles) | -| ~~P2: AMD GPU display~~ | ~~12-16 weeks~~ | ✅ Complete — redox-drm + AMD DC port + Intel driver (compiles, no HW validation) | +| ~~P2: AMD GPU display~~ | ~~12-16 weeks~~ | 🚧 Partial — redox-drm + bounded Red Bear AMD display glue build; imported Linux AMD DC/TTM/core remain under compile triage; Intel driver compiles, no HW validation | | ~~P3: POSIX + input~~ | ~~4-8 weeks~~ | 🚧 Build-side work substantially complete — relibc gaps exported to downstream consumers, evdevd/udev-shim/libevdev/libinput/D-Bus build; runtime validation still open | | P4: Wayland compositor | 4-6 weeks | 🚧 Partial — libwayland/Qt6 Wayland/Mesa EGL+GBM+GLES2/Qt6 OpenGL now build, but compositor/runtime validation is still incomplete | -| ~~P5: DML2 enablement~~ | ~~partial~~ | 🚧 DML2 config enabled, 63 DML source files in build, TTM compiled, libdrm amdgpu ✅, `iommu` daemon now builds; hardware validation still open | +| ~~P5: DML2 enablement~~ | ~~partial~~ | 🚧 Historical DML2 config work landed, but the current retained AMDGPU build no longer treats imported DML2/TTM as part of the default bounded compile path; libdrm amdgpu ✅, `iommu` daemon now builds; hardware validation still open | | P6: KDE Plasma | 12-16 weeks | 🚧 In progress — Qt6 ✅, KF6 32/32 ✅, Mesa EGL/GBM/GLES2 ✅, kf6-kcmutils ✅, kf6-kwayland ✅, kdecoration ✅, KWin 🔄 building | ### Canonical Desktop Path (current plan) diff --git a/README.md b/README.md index 1244c3e4..a7707554 100644 --- a/README.md +++ b/README.md @@ -88,8 +88,8 @@ with the subsystem plans listed above. |---|---|---| | P0 ACPI boot | ✅ Materially complete (historical boot baseline) | In-tree and documented in `local/docs/ACPI-FIXES.md`; not release-grade complete; forward ownership/robustness/validation work lives in `local/docs/ACPI-IMPROVEMENT-PLAN.md` | | P1 driver infra | ✅ Complete | Compile-oriented infrastructure present | -| P2 DRM / display | ✅ Code complete | Hardware validation still pending | -| P3 POSIX + input | 🚧 In progress | relibc exports now cover the rebuilt `signalfd`/`timerfd`/`eventfd`/`open_memstream` consumer path; runtime validation remains | +| P2 DRM / display | 🚧 Partial | redox-drm + bounded AMD display glue build; imported Linux AMD DC/TTM/core remain under compile triage; hardware validation still pending | +| P3 POSIX + input | 🚧 In progress | relibc now has strict Redox-target runtime proof for `signalfd` / `timerfd` / `eventfd` through the repaired test runner; broader desktop/runtime hardening still continues | | P4 Wayland runtime | 🚧 In progress | bounded Wayland runtime validation builds to a bootable image and reaches its packaged runtime entrypoint in QEMU/UEFI | | P5 desktop/network plumbing | 🚧 In progress | `redbear-full` now carries the native VirtIO networking path plus D-Bus system-bus plumbing as a broader integration slice, and the guest-side runtime check reaches `DBUS_SYSTEM_BUS=present` | | P6 KDE Plasma | 🚧 In progress | Mix of real builds, shims, and stubs | @@ -127,7 +127,7 @@ expand on top of them. | Component | Status | Detail | |-----------|--------|--------| -| AMD GPU driver (amdgpu) | ✅ Compiles | LinuxKPI compat + AMD DC modesetting + quirk-aware MSI-X/MSI/legacy IRQ fallback (no HW validation) | +| AMD GPU driver (amdgpu) | 🚧 Bounded path builds | redox-drm + Red Bear AMD display glue compile; imported Linux AMD DC/TTM/core remain under compile triage; quirk-aware MSI-X/MSI/legacy IRQ fallback present (no HW validation) | | Intel GPU driver | ✅ Compiles | Display pipe modesetting + quirk-aware MSI-X/MSI/legacy IRQ fallback (no HW validation) | | ext4 filesystem | ✅ Compiles | Read/write ext4 alongside RedoxFS | | ACPI for AMD bare metal | ✅ Materially complete (historical boot baseline) | x2APIC, MADT, FADT shutdown/reboot, power methods; not release-grade complete; see `local/docs/ACPI-IMPROVEMENT-PLAN.md` for remaining ownership, robustness, sleep-state, and validation work | diff --git a/build.sh b/build.sh index 6d8c723c..810c4cc0 100755 --- a/build.sh +++ b/build.sh @@ -22,11 +22,11 @@ usage() echo " i686. ARCH is not checked, so you can add a new architecture." echo " Defaults to the directory containing the FILESYSTEM_CONFIG file," echo " or x86_64 if no FILESYSTEM_CONFIG is specified." - echo " -c CONFIG: The name of the config, e.g. redbear-kde, redbear-full or redbear-minimal." +echo " -c CONFIG: The name of the config, e.g. redbear-full or redbear-minimal." echo " Determines the name of the image, build/ARCH/CONFIG/harddrive.img" - echo " e.g. build/x86_64/redbear-kde/harddrive.img" +echo " e.g. build/x86_64/redbear-full/harddrive.img" echo " Determines the name of FILESYSTEM_CONFIG if none is specified." - echo " Defaults to the basename of FILESYSTEM_CONFIG, or 'redbear-kde'" +echo " Defaults to the basename of FILESYSTEM_CONFIG, or 'redbear-full'" echo " if FILESYSTEM_CONFIG is not specified." echo " -f FILESYSTEM_CONFIG:" echo " The config file to use. It can be in any location." @@ -50,7 +50,7 @@ if [ "$1" == "-h" ] || [ "$1" == "--help" ]; then fi defaultarch="x86_64" -defaultname="redbear-kde" +defaultname="redbear-full" ARCH="" CONFIG_NAME="" FILESYSTEM_CONFIG="" diff --git a/config/redbear-full.toml b/config/redbear-full.toml index 9d7df064..cc5292a9 100644 --- a/config/redbear-full.toml +++ b/config/redbear-full.toml @@ -1,19 +1,18 @@ # Red Bear OS Full Configuration -# Broader desktop/session plumbing profile beneath the KDE direction +# Primary desktop/session target: Wayland + KWin/KDE session surface on Red Bear OS. # Build: make all CONFIG_NAME=redbear-full -# Live: make live CONFIG_NAME=redbear-full +# Live: make live CONFIG_NAME=redbear-live-full # -# This remains a broader integration slice for desktop/session plumbing. +# This is the only active full desktop target. Wayland and KDE package/runtime surfaces are folded +# here instead of being split across redbear-wayland and redbear-kde. # GPU driver stack ships through redbear-meta for this profile. # redox-drm now carries the runtime/package dependency on the AMD backend library. -# KDE Frameworks + KWin: depend on qtdeclarative/qtwayland. Re-enable when ported. -# libinput/libevdev: WIP meson builds, not yet validated. Re-enable when tested. -# seatd: now builds; DRM lease/runtime validation is still open before enabling broadly. +# Runtime/session claims still remain evidence-qualified until compositor/session proof is strong. -include = ["desktop.toml", "redbear-legacy-base.toml", "redbear-legacy-desktop.toml", "redbear-device-services.toml", "redbear-netctl.toml"] +include = ["desktop.toml", "redbear-legacy-base.toml", "redbear-legacy-desktop.toml", "redbear-device-services.toml", "redbear-netctl.toml", "redbear-greeter-services.toml"] [general] -filesystem_size = 2048 +filesystem_size = 4096 efi_partition_size = 16 [users.messagebus] @@ -27,6 +26,16 @@ shell = "/usr/bin/ion" # Red Bear OS branding (os-release, hostname, motd) redbear-release = {} +# Exclude inherited legacy desktop packages from the tracked KWin/Wayland target +orbdata = "ignore" +orbital = "ignore" +orbterm = "ignore" +orbutils = "ignore" +cosmic-edit = "ignore" +cosmic-files = "ignore" +cosmic-icons = "ignore" +cosmic-term = "ignore" + # Native Redox PCI/USB listing tools (lspci, lsusb) redbear-hwutils = {} @@ -78,12 +87,55 @@ htop = {} # Wayland protocol libwayland = {} +wayland-protocols = {} # Keyboard support libxkbcommon = {} +libevdev = {} +libinput = {} -# Qt6 base (Core+Concurrent+Xml+Gui+Widgets, software rendering) +# Seat management +seatd = {} + +# Qt6 stack qtbase = {} +qtdeclarative = {} +qtsvg = {} +qtwayland = {} +qt6-wayland-smoke = {} + +# KF6 Frameworks — Tier 1 +kf6-extra-cmake-modules = {} +kf6-kcoreaddons = {} +kf6-kconfig = {} +kf6-ki18n = {} +kf6-kcolorscheme = {} +kf6-kauth = {} + +# KF6 Frameworks — KWin session-surface chain +kf6-kwindowsystem = {} +kf6-knotifications = {} +kf6-kconfigwidgets = {} +kf6-kcrash = {} +kf6-kdbusaddons = {} +kf6-kglobalaccel = {} +kf6-kservice = {} +kf6-kpackage = {} +kf6-kiconthemes = {} +kirigami = {} +kf6-kio = {} +kf6-kdeclarative = {} +kf6-kcmutils = {} +kf6-kwayland = {} +kdecoration = {} + +# KWin Wayland compositor +kwin = {} + +# Graphics +redox-drm = {} +mesa = {} +libdrm = {} # Core Red Bear umbrella package redbear-meta = {} @@ -95,70 +147,9 @@ data = "" directory = true mode = 0o755 -# ── Desktop services (not provided by base package) ──────────────── - -# Display-session placeholder for the supplementary integration slice -[[files]] -path = "/usr/lib/init.d/20_display.service" -data = """ -[unit] -description = "Display session service" -requires_weak = [ -] - -[service] -cmd = "ion" -args = ["-c", "true"] -envs = { VT = "3" } -type = "oneshot_async" -""" - -# desktop-minimal.toml: "inputd -A 2", "nowait getty 2", "nowait getty /scheme/debug/no-preserve -J" -# Neutralize and replace with proper service files -[[files]] -path = "/usr/lib/init.d/30_console.service" -data = """ -[unit] -description = "Console terminals" -requires_weak = [ - "20_display.service", -] - -[service] -cmd = "getty" -args = ["2"] -type = "oneshot_async" -""" - -[[files]] -path = "/usr/lib/init.d/29_activate_console.service" -data = """ -[unit] -description = "Activate display VT" -requires_weak = [ - "20_display.service", -] - -[service] -cmd = "inputd" -args = ["-A", "3"] -type = "oneshot" -""" - -[[files]] -path = "/usr/lib/init.d/31_debug_console.service" -data = """ -[unit] -description = "Debug console" -requires_weak = [ - "20_display.service", -] - -[service] -cmd = "getty" -args = ["/scheme/debug/no-preserve", "-J"] -type = "oneshot_async" -""" +# Greeter/login service wiring now lives in config/redbear-greeter-services.toml, which is +# included above so the full desktop target has a single source of truth for display/fallback +# session services. [[files]] path = "/usr/lib/init.d/13_iommu.service" @@ -211,6 +202,22 @@ args = [ type = "oneshot_async" """ +[[files]] +path = "/usr/lib/init.d/13_seatd.service" +data = """ +[unit] +description = "seatd seat management daemon" +requires_weak = [ + "12_dbus.service", + "13_redbear-sessiond.service", +] + +[service] +cmd = "seatd" +args = ["-l", "info"] +type = "oneshot_async" +""" + [[files]] path = "/usr/lib/init.d/14_redbear-upower.service" data = """ @@ -276,3 +283,212 @@ path = "/run/dbus" data = "" directory = true mode = 0o755 + +[[files]] +path = "/usr/bin/redbear-validation-session" +mode = 0o755 +data = """ +#!/usr/bin/env sh + +export DISPLAY="" +export WAYLAND_DISPLAY="${WAYLAND_DISPLAY:-wayland-0}" +export XDG_SESSION_TYPE=wayland +export LIBSEAT_BACKEND=seatd +export SEATD_SOCK=/run/seatd.sock +export QT_PLUGIN_PATH=/usr/plugins +export QT_QPA_PLATFORM_PLUGIN_PATH=/usr/plugins/platforms +export QML2_IMPORT_PATH=/usr/qml +export RUST_BACKTRACE=full +export RUST_LOG=debug +export XCURSOR_THEME=Pop +export XKB_CONFIG_ROOT=/usr/share/X11/xkb + +if [ -z "${XDG_RUNTIME_DIR:-}" ]; then + export XDG_RUNTIME_DIR="/tmp/run/user/$(id -u)" +fi + +mkdir -p "$XDG_RUNTIME_DIR" + +wait_for_wayland_socket() { + socket_path="$XDG_RUNTIME_DIR/$WAYLAND_DISPLAY" + attempts=0 + while [ "$attempts" -lt 30 ]; do + if [ -e "$socket_path" ]; then + return 0 + fi + if ! kill -0 "$kwin_pid" 2>/scheme/null; then + return 1 + fi + attempts=$((attempts + 1)) + sleep 1 + done + return 1 +} + +if [ -z "${DBUS_SESSION_BUS_ADDRESS:-}" ]; then + eval "$(dbus-launch --sh-syntax)" +fi + +let validation_request = "/run/redbear-kde-session.validation-request" +let validation_success = "/run/redbear-kde-session.validation-success" + +if test -f $validation_request + echo "user=$USER" > $validation_success + echo "runtime=$XDG_RUNTIME_DIR" >> $validation_success + echo "wayland=$WAYLAND_DISPLAY" >> $validation_success + rm -f $validation_request + exit 0 +end + +dbus-update-activation-environment \ + DBUS_SESSION_BUS_ADDRESS \ + DBUS_SESSION_BUS_PID \ + WAYLAND_DISPLAY \ + XDG_SESSION_TYPE \ + XDG_RUNTIME_DIR \ + DISPLAY \ + HOME \ + USER + +if [ -d /usr/share/glib-2.0/schemas ]; then + glib-compile-schemas /usr/share/glib-2.0/schemas/ +fi + +kwin_wayland --replace & +kwin_pid=$! + +if ! wait_for_wayland_socket; then + echo "kwin_wayland failed to expose $XDG_RUNTIME_DIR/$WAYLAND_DISPLAY" >&2 + exit 1 +fi + +exec /usr/bin/wayland-session +""" + +[[files]] +path = "/usr/bin/wayland-session" +mode = 0o755 +data = """ +#!/usr/bin/env ion + +printenv +let session_started = "$HOME/.wayland-session.started" +rm -f $session_started +echo "started" > $session_started +export QT_PLUGIN_PATH=/usr/plugins +export QT_QPA_PLATFORM_PLUGIN_PATH=/usr/plugins/platforms +export QML2_IMPORT_PATH=/usr/qml +let smoke_ok = "$HOME/.qt6-wayland-smoke.ok" +let smoke_err = "$HOME/.qt6-wayland-smoke.err" +let smoke_log = "$HOME/.qt6-wayland-smoke.log" +let bootstrap_ok = "$HOME/.qt6-bootstrap-minimal.ok" +let bootstrap_log = "$HOME/.qt6-bootstrap-minimal.log" +let plugin_ok = "$HOME/.qt6-plugin-minimal.ok" +let plugin_err = "$HOME/.qt6-plugin-minimal.err" +let plugin_log = "$HOME/.qt6-plugin-minimal.log" +let smoke_minimal_ok = "$HOME/.qt6-wayland-smoke-minimal.ok" +let smoke_offscreen_ok = "$HOME/.qt6-wayland-smoke-offscreen.ok" +let smoke_wayland_ok = "$HOME/.qt6-wayland-smoke-wayland.ok" +let smoke_minimal_log = "$HOME/.qt6-wayland-smoke-minimal.log" +let smoke_offscreen_log = "$HOME/.qt6-wayland-smoke-offscreen.log" +let smoke_wayland_log = "$HOME/.qt6-wayland-smoke-wayland.log" +rm -f $smoke_ok $smoke_err +rm -f $smoke_log +rm -f $bootstrap_ok $bootstrap_log +rm -f $plugin_ok $plugin_err $plugin_log +rm -f $smoke_minimal_ok $smoke_offscreen_ok $smoke_wayland_ok +rm -f $smoke_minimal_log $smoke_offscreen_log $smoke_wayland_log +if which qt6-wayland-smoke >/scheme/null + if env LD_DEBUG=all QT_DEBUG_PLUGINS=1 QT_QPA_PLATFORM=minimal qt6-bootstrap-check > $bootstrap_log ^> $bootstrap_log + touch $bootstrap_ok + else + if test -f $bootstrap_log + cat $bootstrap_log + end + echo "qt6-bootstrap-check minimal failed; see $bootstrap_log" > $smoke_err + end + + if env LD_DEBUG=all QT_DEBUG_PLUGINS=1 QT_PLUGIN_PATH=/usr/plugins QT_QPA_PLATFORM_PLUGIN_PATH=/usr/plugins/platforms qt6-plugin-check /usr/plugins/platforms/libqminimal.so > $plugin_log ^> $plugin_log + touch $plugin_ok + else + if test -f $plugin_log + cat $plugin_log + end + echo "qt6-plugin-check failed; see $plugin_log" > $plugin_err + echo "qt6-plugin-check failed; see $plugin_log" > $smoke_err + end + + if env QT_DEBUG_PLUGINS=1 QT_QPA_PLATFORM=minimal qt6-wayland-smoke > $smoke_minimal_log ^> $smoke_minimal_log + touch $smoke_minimal_ok + else + echo "qt6-wayland-smoke minimal failed; see $smoke_minimal_log" > $smoke_err + end + + if env QT_DEBUG_PLUGINS=1 QT_QPA_PLATFORM=offscreen qt6-wayland-smoke > $smoke_offscreen_log ^> $smoke_offscreen_log + touch $smoke_offscreen_ok + else + echo "qt6-wayland-smoke offscreen failed; see $smoke_offscreen_log" > $smoke_err + end + + if env QT_DEBUG_PLUGINS=1 QT_QPA_PLATFORM=wayland qt6-wayland-smoke > $smoke_wayland_log ^> $smoke_wayland_log + touch $smoke_wayland_ok + touch $smoke_ok + else + echo "qt6-wayland-smoke wayland failed; see $smoke_wayland_log" > $smoke_err + end +end +""" + +[[files]] +path = "/usr/bin/redbear-kde-session" +mode = 0o755 +data = """ +#!/usr/bin/sh + +export DISPLAY="" +export WAYLAND_DISPLAY="${WAYLAND_DISPLAY:-wayland-0}" +export XDG_SESSION_TYPE=wayland +export KDE_FULL_SESSION=true +export XDG_CURRENT_DESKTOP=KDE +export LIBSEAT_BACKEND=seatd +export SEATD_SOCK=/run/seatd.sock + +if [ -z "${XDG_RUNTIME_DIR:-}" ]; then + export XDG_RUNTIME_DIR="/tmp/run/user/$(id -u)" +fi + +mkdir -p "$XDG_RUNTIME_DIR" + +if [ -z "${DBUS_SESSION_BUS_ADDRESS:-}" ]; then + eval "$(dbus-launch --sh-syntax)" +fi + +validation_request="/run/redbear-kde-session.validation-request" +validation_success="/run/redbear-kde-session.validation-success" + +if [ -f "$validation_request" ]; then + { + echo "user=$USER" + echo "runtime=$XDG_RUNTIME_DIR" + echo "wayland=$WAYLAND_DISPLAY" + } > "$validation_success" + rm -f "$validation_request" + exit 0 +fi + +dbus-update-activation-environment \ + DBUS_SESSION_BUS_ADDRESS \ + DBUS_SESSION_BUS_PID \ + WAYLAND_DISPLAY \ + XDG_SESSION_ID \ + XDG_SEAT \ + XDG_SESSION_TYPE \ + XDG_RUNTIME_DIR \ + XDG_CURRENT_DESKTOP \ + KDE_FULL_SESSION \ + DISPLAY \ + HOME \ + USER + +exec kwin_wayland --replace +""" diff --git a/config/redbear-greeter-services.toml b/config/redbear-greeter-services.toml new file mode 100644 index 00000000..4acbbb3b --- /dev/null +++ b/config/redbear-greeter-services.toml @@ -0,0 +1,116 @@ +# Red Bear greeter/login service wiring +# +# This fragment is intended to be included by the active desktop/graphics target. + +[users.greeter] +password = "" +uid = 101 +gid = 101 +name = "greeter" +home = "/nonexistent" +shell = "/usr/bin/ion" + +[groups.greeter] +gid = 101 +members = ["greeter"] + +[packages] +redbear-authd = {} +redbear-session-launch = {} +redbear-greeter = {} + +[[files]] +path = "/usr/lib/init.d/19_redbear-authd.service" +data = """ +[unit] +description = "Red Bear authentication daemon" +requires_weak = [ + "12_dbus.service", +] + +[service] +cmd = "ion" +args = [ + "-c", + "redbear-authd", +] +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/20_display.service" +data = """ +[unit] +description = "Legacy display compatibility shim" +requires_weak = [ +] + +[service] +cmd = "ion" +args = ["-c", "true"] +envs = { VT = "3" } +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/20_greeter.service" +data = """ +[unit] +description = "Red Bear greeter service" +requires_weak = [ + "12_dbus.service", + "13_redbear-sessiond.service", + "13_seatd.service", + "19_redbear-authd.service", +] + +[service] +cmd = "/usr/bin/redbear-greeterd" +envs = { VT = "3", REDBEAR_GREETER_USER = "greeter" } +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/30_console.service" +data = """ +[unit] +description = "Console terminals" +requires_weak = [ + "20_greeter.service", +] + +[service] +cmd = "getty" +args = ["2"] +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/29_activate_console.service" +data = """ +[unit] +description = "Activate display VT" +requires_weak = [ + "20_greeter.service", +] + +[service] +cmd = "inputd" +args = ["-A", "3"] +type = "oneshot" +""" + +[[files]] +path = "/usr/lib/init.d/31_debug_console.service" +data = """ +[unit] +description = "Debug console" +requires_weak = [ + "20_greeter.service", +] + +[service] +cmd = "getty" +args = ["/scheme/debug/no-preserve", "-J"] +type = "oneshot_async" +""" diff --git a/config/redbear-live-full.toml b/config/redbear-live-full.toml new file mode 100644 index 00000000..6dd48585 --- /dev/null +++ b/config/redbear-live-full.toml @@ -0,0 +1,13 @@ +# Red Bear OS Live Full Configuration +# Live ISO variant for the primary full desktop/session target. +# +# Build: make live CONFIG_NAME=redbear-live-full + +include = ["redbear-full.toml"] + +[general] +filesystem_size = 4096 + +[packages] +cub = {} +redbear-meta = {} diff --git a/config/redbear-live-minimal.toml b/config/redbear-live-minimal.toml new file mode 100644 index 00000000..cb88d458 --- /dev/null +++ b/config/redbear-live-minimal.toml @@ -0,0 +1,12 @@ +# Red Bear OS Live Minimal Configuration +# Live ISO variant for the minimal console/server target. +# +# Build: make live CONFIG_NAME=redbear-live-minimal + +include = ["redbear-minimal.toml"] + +[general] +filesystem_size = 3072 + +[packages] +cub = {} diff --git a/config/redbear-live.toml b/config/redbear-live.toml deleted file mode 100644 index 10c1ac32..00000000 --- a/config/redbear-live.toml +++ /dev/null @@ -1,15 +0,0 @@ -# Red Bear OS Live Configuration -# Live ISO variant — follows the tracked KDE desktop target -# -# Build: make live CONFIG_NAME=redbear-live - -include = ["redbear-kde.toml"] - -[general] -# Keep the live installer image reasonably sized -filesystem_size = 3072 - -[packages] -# Keep these explicit for the live profile even though cub is inherited via redbear-kde. -cub = {} -redbear-meta = {} diff --git a/config/redbear-minimal.toml b/config/redbear-minimal.toml index b979ed87..ed279302 100644 --- a/config/redbear-minimal.toml +++ b/config/redbear-minimal.toml @@ -53,6 +53,10 @@ data = "wired-dhcp\n" path = "/usr/lib/init.d/30_console" data = "" +[[files]] +path = "/usr/lib/init.d/15_fatd.service" +data = "" + [[files]] path = "/usr/lib/init.d/29_activate_console.service" data = """ diff --git a/config/wayland.toml b/config/wayland.toml index 6eda3661..8c93da7d 100644 --- a/config/wayland.toml +++ b/config/wayland.toml @@ -178,6 +178,22 @@ fi mkdir -p "$XDG_RUNTIME_DIR" +wait_for_wayland_socket() { + socket_path="$XDG_RUNTIME_DIR/$WAYLAND_DISPLAY" + attempts=0 + while [ "$attempts" -lt 30 ]; do + if [ -e "$socket_path" ]; then + return 0 + fi + if ! kill -0 "$kwin_pid" 2>/scheme/null; then + return 1 + fi + attempts=$((attempts + 1)) + sleep 1 + done + return 1 +} + if [ -z "${DBUS_SESSION_BUS_ADDRESS:-}" ]; then eval "$(dbus-launch --sh-syntax)" fi @@ -197,7 +213,13 @@ if [ -d /usr/share/glib-2.0/schemas ]; then fi kwin_wayland --replace & -sleep 2 +kwin_pid=$! + +if ! wait_for_wayland_socket; then + echo "kwin_wayland failed to expose $XDG_RUNTIME_DIR/$WAYLAND_DISPLAY" >&2 + exit 1 +fi + exec /usr/bin/wayland-session """ diff --git a/docs/02-GAP-ANALYSIS.md b/docs/02-GAP-ANALYSIS.md index d0e9e3f9..8f529363 100644 --- a/docs/02-GAP-ANALYSIS.md +++ b/docs/02-GAP-ANALYSIS.md @@ -3,7 +3,7 @@ ## Overview This document maps the distance between current Redox OS 0.9.0 and three goals: -1. **Wayland compositor support** → see [03-WAYLAND-ON-REDOX.md](03-WAYLAND-ON-REDOX.md) +1. **Wayland compositor support** → see `../local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` 2. **KDE Plasma desktop** → see [05-KDE-PLASMA-ON-REDOX.md](05-KDE-PLASMA-ON-REDOX.md) 3. **Linux driver compatibility layer** → see [04-LINUX-DRIVER-COMPAT.md](04-LINUX-DRIVER-COMPAT.md) @@ -21,8 +21,8 @@ Use the matrix below as the authoritative phase summary before reading the older |---|---|---| | P0 ACPI / bare-metal boot | **Materially complete for the historical boot baseline, not release-grade complete.** Implemented: kernel RSDP/RSDT/XSDT/MADT/FADT parsing, typed `StartupError` in `acpid`, AML mutex real state (`aml_physmem.rs`), EC widened accesses via byte transactions (`ec.rs`), kstop-based shutdown eventing (kernel registers `/scheme/kernel.acpi/kstop`, `acpid` subscribes, `redbear-sessiond` emits D-Bus `PrepareForShutdown`). Sleep state transitions (`\_Sx` beyond `\_S5`) and sleep eventing are **known gaps**. DMAR module remains present in `acpid` but not wired; ownership is still transitional/orphaned rather than cleanly transferred. Bare-metal validation remains bounded rather than broad. | `local/docs/ACPI-FIXES.md`, `local/docs/ACPI-IMPROVEMENT-PLAN.md`, `local/patches/kernel/redox.patch`, `local/patches/base/redox.patch`, `recipes/core/base/source/drivers/acpid/src/main.rs`, `recipes/core/base/source/drivers/acpid/src/aml_physmem.rs`, `recipes/core/base/source/drivers/acpid/src/ec.rs`, `local/recipes/system/redbear-sessiond/source/src/acpi_watcher.rs` | | P1 driver infrastructure | Complete in-tree, compile-oriented | `local/recipes/drivers/redox-driver-sys/`, `local/recipes/drivers/linux-kpi/`, `local/recipes/system/firmware-loader/` | -| P2 DRM / AMD+Intel display | Complete in-tree, hardware validation pending | `local/docs/P2-AMD-GPU-DISPLAY.md`, `local/recipes/gpu/redox-drm/`, `local/recipes/gpu/amdgpu/` | -| P3 POSIX + input | Implemented in-tree; consumer-visible `signalfd`/`timerfd`/`eventfd`/`open_memstream` header-export path fixed in this repo pass; runtime validation still pending | `recipes/core/relibc/source/src/header/`, `recipes/core/relibc/source/include/sys/signalfd.h`, `local/patches/relibc/`, `local/recipes/system/evdevd/`, `local/recipes/system/udev-shim/` | +| P2 DRM / AMD+Intel display | Partial — redox-drm + bounded AMD display glue build in-tree; imported Linux AMD DC/TTM/core remain under compile triage; hardware validation pending | `local/docs/AMD-FIRST-INTEGRATION.md`, `local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md`, `local/recipes/gpu/redox-drm/`, `local/recipes/gpu/amdgpu/` | +| P3 POSIX + input | Implemented in-tree; consumer-visible `signalfd`/`timerfd`/`eventfd`/`open_memstream` header-export path fixed in this repo pass; strict Redox-target relibc runtime proof now exists for the fd-event slice | `local/patches/relibc/`, `recipes/core/relibc/recipe.toml`, `recipes/tests/relibc-tests-bins/recipe.toml`, `local/recipes/system/evdevd/`, `local/recipes/system/udev-shim/` | | P4 Wayland stack | Partially complete | `recipes/wip/wayland/`, `recipes/wip/libs/other/libinput/`, `recipes/wip/services/seatd/` | | P5 AMD acceleration / IOMMU | Partial, but no longer blocked on basic QEMU first-use proof | `local/recipes/gpu/amdgpu/`, `local/recipes/system/iommu/` | | P6 KDE Plasma | In progress with mixed real builds and stubs/shims | `config/redbear-kde.toml`, `local/recipes/kde/`, `local/docs/QT6-PORT-STATUS.md` | @@ -67,9 +67,9 @@ implemented phase with its own config/recipe/doc boundary. | API | Status | Where to implement | Effort | |-----|--------|--------------------|--------| -| `signalfd`/`signalfd4` | **Implemented in-tree** | `relibc/src/header/signal/mod.rs` + `signal/signalfd.rs` | Runtime validation still needed | -| `timerfd_create/settime/gettime` | **Implemented in-tree** | `relibc/src/header/sys_timerfd/` | Runtime validation still needed | -| `eventfd`/`eventfd_read`/`eventfd_write` | **Implemented in-tree** | `relibc/src/header/sys_eventfd/` | Runtime validation still needed | +| `signalfd`/`signalfd4` | **Implemented and runtime-tested in-tree** | `relibc/src/header/signal/mod.rs` + `signal/signalfd.rs` | Strict Redox-target relibc runtime test now passes; broader consumer semantics still need continued confirmation | +| `timerfd_create/settime/gettime` | **Implemented and runtime-tested in-tree** | `relibc/src/header/sys_timerfd/` | Strict Redox-target relibc runtime test now passes; broader consumer semantics still need continued confirmation | +| `eventfd`/`eventfd_read`/`eventfd_write` | **Implemented and runtime-tested in-tree** | `relibc/src/header/sys_eventfd/` | Strict Redox-target relibc runtime test now passes; broader consumer semantics still need continued confirmation | | `F_DUPFD_CLOEXEC` | **Implemented in-tree** | `relibc/src/header/fcntl/mod.rs` | Verify against downstream consumers | | `MSG_CMSG_CLOEXEC` | **Implemented in-tree** | `relibc/src/header/sys_socket/mod.rs` | Verify against downstream consumers | | `MSG_NOSIGNAL` | **Implemented in-tree** | `relibc/src/header/sys_socket/mod.rs` | Verify against downstream consumers | @@ -89,7 +89,7 @@ implemented phase with its own config/recipe/doc boundary. | GPU driver (Intel) | Experimental modeset only | `redox-drm/src/drivers/intel/` | [04 §3](04-LINUX-DRIVER-COMPAT.md) | | GEM buffers | **Present in-tree** | `local/recipes/gpu/redox-drm/source/src/gem.rs` | [04 §3](04-LINUX-DRIVER-COMPAT.md) | | DMA-BUF sharing | ✅ Implemented | PRIME export/import via opaque tokens in `scheme.rs` | [DMA-BUF plan](../local/docs/DMA-BUF-IMPROVEMENT-PLAN.md) | -| Mesa hardware backend | **Missing** | Mesa winsys for Redox DRM | [03 §3.4](03-WAYLAND-ON-REDOX.md) | +| Mesa hardware backend | **Missing** | Mesa winsys for Redox DRM | `../local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` | | GPU OpenGL | Software only | Blocked on GPU driver | [04](04-LINUX-DRIVER-COMPAT.md) | ### Layer 3: Input Stack @@ -101,9 +101,9 @@ implemented phase with its own config/recipe/doc boundary. | Component | Status | Where to implement | Concrete doc | |-----------|--------|--------------------|-------------| -| evdev daemon | **Present in-tree** | `local/recipes/system/evdevd/` | [03 §2](03-WAYLAND-ON-REDOX.md) | -| udev shim | **Present in-tree** | `local/recipes/system/udev-shim/` | [03 §2](03-WAYLAND-ON-REDOX.md) | -| libinput | **Present as WIP port** | `recipes/wip/libs/other/libinput/` | [03 §2](03-WAYLAND-ON-REDOX.md) | +| evdev daemon | **Present in-tree** | `local/recipes/system/evdevd/` | `../local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` | +| udev shim | **Present in-tree** | `local/recipes/system/udev-shim/` | `../local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` | +| libinput | **Present as WIP port** | `recipes/wip/libs/other/libinput/` | `../local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` | | XKB layouts | **Done** | `xkeyboard-config` ported | — | | seatd | Builds and is wired into KDE config, runtime unvalidated | `recipes/wip/services/seatd/`, `config/redbear-kde.toml` | — | diff --git a/docs/03-WAYLAND-ON-REDOX.md b/docs/03-WAYLAND-ON-REDOX.md deleted file mode 100644 index 664383b7..00000000 --- a/docs/03-WAYLAND-ON-REDOX.md +++ /dev/null @@ -1,581 +0,0 @@ -# 03 — Wayland on Redox: Concrete Implementation Path - -## Goal - -Get a working Wayland compositor on Redox OS that can run KDE Plasma applications. - -For the current build/runtime truth summary of the desktop stack, use -`local/docs/DESKTOP-STACK-CURRENT-STATUS.md` together with `local/docs/QT6-PORT-STATUS.md`. -This file should now be read primarily as implementation history plus deeper Wayland-specific -porting notes. - -## Current State - -- `config/wayland.toml` exists — it remains the bounded validation path, not the production desktop path -- 21 Wayland recipes in `recipes/wip/wayland/` — most untested -- `libwayland` 1.24.0 now rebuilds with a much smaller `redox.patch`; the P3 POSIX path (`signalfd`, `timerfd`, `eventfd`, `open_memstream`, `MSG_CMSG_CLOEXEC`, `MSG_NOSIGNAL`) is back on the native path, and the remaining patch is down to Redox-specific build quirks -- the bounded validation compositor path remains separate from the production desktop goal -- the remaining compositor/runtime issue is still input integration, not simply the absence of a libinput recipe -- `libdrm` builds with amdgpu and Intel enabled -- Mesa builds with EGL+GBM+GLES2 (software via LLVMpipe; hardware acceleration still requires kernel DMA-BUF) -- **evdevd** (`scheme:evdev`) provides Linux-compatible `/dev/input/eventX` interface -- **udev-shim** (`scheme:udev`) provides udev-like device enumeration -- **seatd** now builds for Redox and is wired into the KDE runtime config, but DRM-lease/runtime validation is still open -- **redox-drm** (`scheme:drm`) provides DRM/KMS with AMD + Intel GPU support - ---- - -## Status Correction (2026-04-14) - -This document is partly historical. The repo already contains substantial P3/P4 work in-tree, -but the downstream Wayland stack is still not free of compatibility patches. - -What is actually true today: -- relibc now contains in-tree implementations for `signalfd`, `timerfd`, `eventfd`, `open_memstream`, `MSG_CMSG_CLOEXEC`, and `MSG_NOSIGNAL` support paths -- the relibc module wiring plus the consumer-visible `signalfd` / `timerfd` / `eventfd` / `open_memstream` export path were fixed in this repo pass -- `libwayland/redox.patch` has been reduced to residual Redox-specific build tweaks (`wayland-scanner` detection and `F_DUPFD_CLOEXEC` guard), so the old POSIX bypasses are no longer the main blocker -- `evdevd`, `udev-shim`, and `redox-drm` exist in-tree, but full compositor/runtime validation remains open -- `seatd` now also builds in-tree for Redox and is started by the KDE runtime config, but has not yet been validated end-to-end with the compositor/DRM path - -Read the step-by-step sections below as design history plus implementation notes, not as an exact -current-state checklist. - -For the current Wayland runtime entrypoint in this repo, use: - -- `config/redbear-wayland.toml` -- `local/scripts/test-phase4-wayland-qemu.sh` - -> **Numbering note:** the "Phase 4" in the script name above refers to the historical P0-P6 -> hardware-enablement sequence (see `AGENTS.md`). In the v2.0 desktop plan -> (`local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md`), Wayland compositor work falls under Phase 2. -> The scripts still work under their original names. - -That path is the current bounded validation target. The production desktop path is -`config/redbear-kde.toml` with `kwin_wayland`. - -Current runtime evidence for that target: - -- `redbear-wayland` builds to a bootable image -- the image boots to a real login prompt in QEMU/UEFI -- `redbear-phase4-wayland-check` runs in-guest and confirms the Wayland launch surface is present -- the compositor reaches xkbcommon initialization and selects the Redox EGL platform in the live guest -- the direct launcher `local/scripts/test-phase4-wayland-qemu.sh` now boots the built - `redbear-wayland` disk image instead of re-entering the build pipeline - -For the runtime-validation pass, keep the **validation target** intentionally small as a bounded -compositor harness and not as the production desktop goal. The current `redbear-wayland` -profile still inherits the broader desktop package set from `desktop.toml`, but this repo's -Wayland validation remains a bounded regression harness subordinate to the `redbear-kde` / -`kwin_wayland` production path, not a peer desktop direction. - ---- - -## Historical Step 1: Fix relibc POSIX Gaps (1-2 weeks) - -### Historical implementation sketch - -Historically these were the 7 APIs that `libwayland/redox.patch` worked around. They now exist -in-tree in relibc, and this repo pass restored the full build-side path for `signalfd`, `timerfd`, -`eventfd`, `open_memstream`, and the related message flags. The remaining work is no longer basic -POSIX availability, but runtime validation of the full Wayland stack. - -#### 1.1 `signalfd` / `signalfd4` - -**Files to create/modify in relibc:** -``` -src/header/signal/mod.rs — add signalfd(), signalfd4() -src/header/signal/src.rs — add SFD_CLOEXEC, SFD_NONBLOCK constants -src/header/signal/types.rs — add signalfd_siginfo struct -src/platform/redox/mod.rs — wire to kernel event scheme or userspace signal handler -``` - -**Implementation approach:** -```rust -// src/header/signal/mod.rs -pub fn signalfd(fd: c_int, mask: *const sigset_t, flags: c_int) -> c_int { - // If fd == -1, create a new "signal FD" using event scheme - // Register signal mask with the signal handling infrastructure - // Return FD that becomes readable when signals arrive - // Map to Redox: use event: scheme + signal userspace handler -} -``` - -**Approximate effort**: ~200 lines of Rust. - -#### 1.2 `timerfd` - -**Files to create in relibc:** -``` -src/header/sys_timerfd/mod.rs — NEW: timerfd_create(), timerfd_settime(), timerfd_gettime() -src/header/sys_timerfd/types.rs — NEW: itimerspec, TFD_CLOEXEC, TFD_NONBLOCK, TFD_TIMER_ABSTIME -src/platform/redox/mod.rs — wire to time: scheme -``` - -**Implementation approach:** -```rust -// src/header/sys_timerfd/mod.rs -pub fn timerfd_create(clockid: c_int, flags: c_int) -> c_int { - // Create a timer FD using Redox time: scheme - // Return FD that becomes readable when timer fires - // Read returns uint64_t count of expirations -} - -pub fn timerfd_settime(fd: c_int, flags: c_int, new: *const itimerspec, old: *mut itimerspec) -> c_int { - // Arm/disarm timer - // Use time: scheme for absolute/relative timers -} -``` - -**Approximate effort**: ~300 lines of Rust. - -#### 1.3 `eventfd` - -**Files to create in relibc:** -``` -src/header/sys_eventfd/mod.rs — NEW: eventfd(), eventfd_read(), eventfd_write() -src/header/sys_eventfd/types.rs — EFD_CLOEXEC, EFD_NONBLOCK, EFD_SEMAPHORE -``` - -**Implementation approach:** -```rust -// Simplest of the three — just an atomic counter accessed via read/write -pub fn eventfd(initval: c_uint, flags: c_int) -> c_int { - // Create a pipe-like FD backed by a shared atomic counter - // read() blocks until counter > 0, returns counter, resets to 0 - // write() adds to counter - // Use Redox pipe: scheme internally -} -``` - -**Approximate effort**: ~100 lines of Rust. - -#### 1.4 `F_DUPFD_CLOEXEC` - -**File to modify in relibc:** -``` -src/header/fcntl/mod.rs — add F_DUPFD_CLOEXEC constant (value 0x40 on Linux x86_64) -src/platform/redox/alloc.rs — handle F_DUPFD_CLOEXEC in fcntl() -``` - -```rust -// In fcntl handler: -pub const F_DUPFD_CLOEXEC: c_int = 0x406; // Linux value - -// In fcntl() match: -F_DUPFD_CLOEXEC => { - let new_fd = syscall::dup(fd, None)?; - // Set CLOEXEC flag on new_fd - // Return new_fd -} -``` - -**Approximate effort**: ~20 lines. - -#### 1.5 `MSG_CMSG_CLOEXEC` and `MSG_NOSIGNAL` - -**Files to modify in relibc:** -``` -src/header/sys_socket/mod.rs — add MSG_CMSG_CLOEXEC (0x40000000), MSG_NOSIGNAL (0x4000) -src/platform/redox/mod.rs — handle in recvmsg/sendmsg -``` - -`MSG_NOSIGNAL`: suppress SIGPIPE on broken connection. On Redox, SIGPIPE handling -is already userspace — just don't send the signal when this flag is set. - -`MSG_CMSG_CLOEXEC`: set CLOEXEC on FDs received via SCM_RIGHTS. Apply the flag -when processing ancillary data in recvmsg. - -**Approximate effort**: ~50 lines. - -#### 1.6 `open_memstream` - -**File to modify in relibc:** -``` -src/header/stdio/mod.rs — add open_memstream() -src/header/stdio/src.rs — implementation -``` - -```rust -pub fn open_memstream(bufp: *mut *mut c_char, sizep: *mut usize) -> *mut FILE { - // Create a write-only stream that dynamically grows a buffer - // On close or flush, update *bufp and *sizep - // Can be implemented using a backing Vec and custom FILE vtable -} -``` - -**Approximate effort**: ~200 lines. - -### Verification - -After implementing all 7 APIs: -1. Rebuild relibc: `./target/release/repo cook recipes/core/relibc` -2. Rebuild libwayland **without** `redox.patch` — it should compile natively -3. Test: `wayland-rs_simple_window` runs without crashes - ---- - -## Historical Step 2: evdev Input Daemon (4-6 weeks) - -### Architecture - -``` -┌──────────────────┐ ┌──────────────────────┐ ┌──────────────┐ -│ libinput │────→│ /dev/input/eventX │────→│ evdevd │ -│ (ported) │ │ (character devices) │ │ (daemon) │ -└──────────────────┘ └──────────────────────┘ └──────┬───────┘ - │ - reads Redox schemes: - input:, scheme:irq -``` - -### What to build - -**New daemon: `evdevd`** (userspace, like all Redox drivers) - -Create as a new recipe: `recipes/core/evdevd/` - -**Source structure:** -``` -evdevd/ -├── Cargo.toml -├── src/ -│ ├── main.rs — daemon entry, scheme registration -│ ├── scheme.rs — implements "evdev" scheme -│ ├── device.rs — translates Redox events to input_event -│ └── ioctl.rs — handles EVIOCG* ioctls -``` - -**Key implementation:** - -```rust -// src/main.rs -fn main() { - // 1. Open existing Redox input sources - let keyboard = File::open("scheme:input/keyboard")?; - let mouse = File::open("scheme:input/mouse")?; - - // 2. Create /dev/input symlinks (pointing to our scheme) - // /dev/input/event0 → /scheme/evdev/keyboard - // /dev/input/event1 → /scheme/evdev/mouse - - // 3. Register evdev scheme - let scheme = File::create(":evdev")?; - - // 4. Event loop: read from Redox input schemes, translate, write to evdev clients - loop { - let redox_event = read_redox_event(&keyboard)?; - let evdev_event = translate_to_input_event(redox_event); - // Deliver to subscribed clients - } -} -``` - -```rust -// src/ioctl.rs — implement evdev ioctls -fn handle_ioctl(fd: usize, request: usize, arg: usize) -> Result { - match request { - EVIOCGNAME => { /* write device name string to arg */ }, - EVIOCGBIT => { /* write supported event types bitmap to arg */ }, - EVIOCGABS => { /* write absinfo struct for absolute axes */ }, - EVIOCGRAB => { /* grab/exclusive access to device */ }, - EVIOCGPROP => { /* write device properties bitmap */ }, - _ => Err(syscall::Error::new(syscall::EINVAL)), - } -} -``` - -**Also needed: udev shim** - -> **Historical path note:** the `recipes/wip/...` paths in the example steps below document the -> original upstream-oriented implementation path. Under the current Red Bear policy, upstream WIP is -> an input/reference, but the shipping/fixed version may live under `local/recipes/` instead. - -Create `recipes/wip/wayland/udev-shim/` — a minimal udev implementation that: -- Enumerates `/dev/input/event*` devices -- Emits "add"/"remove" events via netlink-compatible socket -- Provides `udev_device_get_property_value()` for `ID_INPUT_*` properties - -libinput needs this for hotplug. A minimal shim is ~500 lines of Rust. - -**Then port libinput:** - -Modify `recipes/wip/wayland/libinput/` (currently missing — create it): -```toml -[source] -tar = "https://gitlab.freedesktop.org/wayland/libinput/-/archive/1.27.0/libinput-1.27.0.tar.gz" -patches = ["redox.patch"] - -[build] -template = "meson" -dependencies = [ - "evdevd", - "libffi", - "libwayland", - "udev-shim", - "mtdev", # touchpad multi-touch - "libevdev", # evdev wrapper library -] -mesonflags = [ - "-Ddocumentation=false", - "-Dtests=false", - "-Ddebug-gui=false", -] -``` - -### Verification - -1. Build and run `evdevd` -2. `cat /dev/input/event0` shows keyboard events -3. Build libinput against evdevd -4. `libinput list-devices` shows keyboard and mouse - ---- - -## Historical Step 3: DRM/KMS Scheme (8-12 weeks) - -### Architecture - -``` -┌──────────────┐ ┌───────────────────┐ ┌────────────────┐ -│ libdrm │───→│ scheme:drm/card0 │───→│ drmd (daemon) │ -│ (ported) │ │ DRM ioctls via │ │ GPU driver │ -│ │ │ scheme protocol │ │ userspace │ -└──────────────┘ └───────────────────┘ └───────┬────────┘ - │ - scheme:memory + scheme:irq - │ - Hardware (GPU) -``` - -### What to build - -**New daemon: `drmd`** (DRM daemon — starts with Intel support) - -Create as: `recipes/core/drmd/` - -**Source structure:** -``` -drmd/ -├── Cargo.toml -├── src/ -│ ├── main.rs — daemon entry, PCI enumeration -│ ├── scheme.rs — registers "drm" scheme -│ ├── kms/ -│ │ ├── mod.rs — KMS object management -│ │ ├── crtc.rs — CRTC implementation -│ │ ├── connector.rs — connector (HDMI, DP, eDP) -│ │ ├── encoder.rs — encoder -│ │ ├── plane.rs — primary + cursor planes -│ │ └── framebuffer.rs — framebuffer allocation -│ ├── gem/ -│ │ └── mod.rs — GEM buffer management -│ └── drivers/ -│ ├── mod.rs — driver trait -│ └── intel.rs — Intel GPU driver (modesetting) -``` - -**Core DRM scheme protocol:** - -```rust -// src/scheme.rs -// DRM scheme implements the same ioctls as Linux /dev/dri/card0 -// but via Redox scheme read/write/packet protocol - -enum DrmRequest { - // Core - GetVersion, - GetCap { capability: u64 }, - - // KMS - ModeGetResources, - ModeGetConnector { connector_id: u32 }, - ModeGetEncoder { encoder_id: u32 }, - ModeGetCrtc { crtc_id: u32 }, - ModeSetCrtc { crtc_id: u32, fb_id: u32, x: u32, y: u32, connectors: Vec, mode: ModeModeInfo }, - ModePageFlip { crtc_id: u32, fb_id: u32, flags: u32, user_data: u64 }, - ModeAtomicCommit { flags: u32, props: Vec }, - - // GEM - GemCreate { size: u64 }, - GemClose { handle: u32 }, - GemMmap { handle: u32 }, - - // Prime/DMA-BUF - PrimeHandleToFd { handle: u32, flags: u32 }, - PrimeFdToHandle { fd: i32 }, -} -``` - -**Intel driver (starting point):** - -```rust -// src/drivers/intel.rs -// Based on public Intel GPU documentation: -// https://01.org/linuxgraphics/documentation/hardware-specification-prm - -pub struct IntelDriver { - mmio: *mut u8, // Memory-mapped I/O registers (via scheme:memory) - gtt_size: usize, // Graphics Translation Table size - framebuffer: PhysAddr, // Current scanout buffer -} - -impl IntelDriver { - pub fn new(pci_dev: &PciDev) -> Result { - // Map MMIO registers via scheme:memory/physical - let mmio = map_physical_memory(pci_dev.bar[0], pci_dev.bar_size[0])?; - - // Initialize GTT (Graphics Translation Table) - // Set up display pipeline - - Ok(Self { mmio, gtt_size, framebuffer }) - } - - pub fn modeset(&self, mode: &ModeInfo) -> Result<()> { - // 1. Allocate framebuffer in GTT - // 2. Configure pipe (timing, PLL) - // 3. Configure transcoder - // 4. Configure port (HDMI/DP) - // 5. Enable scanout from new framebuffer - Ok(()) - } - - pub fn page_flip(&self, crtc: u32, fb: PhysAddr) -> Result<()> { - // 1. Update GTT entry to point to new framebuffer - // 2. Trigger page flip on next VBlank - // 3. VBlank interrupt signals completion (via scheme:irq) - Ok(()) - } -} -``` - -### Verification - -1. `drmd` registers `scheme:drm/card0` -2. Port `modetest` (from libdrm tests) — shows connector info and modes -3. `modetest -M intel -s 0:1920x1080` sets a mode and shows test pattern - ---- - -## Historical Step 4: Working Wayland Compositor (4-6 weeks after Steps 1-3) - -### Historical staging note: bounded compositor validation before KWin - -This section is historical context only. It explains why a smaller compositor was used earlier as a bounded -validation compositor before the KWin-first production path became the repo's architecture. - -**Why a smaller compositor was used first as a validation compositor:** -- Pure Rust — no C++ toolchain issues -- Already has a Redox branch (`https://github.com/jackpot51/smithay`, branch `redox`) -- Smithay's input backend is pluggable — write a Redox-specific one -- Provided an earlier bounded compositor proof before the KWin session path was viable - -**What to modify in Smithay:** - -``` -smithay/ -├── src/backend/ -│ ├── input/ -│ │ └── redox.rs — NEW: Redox input backend (reads evdev scheme) -│ ├── drm/ -│ │ └── redox.rs — NEW: Redox DRM backend (uses scheme:drm) -│ └── egl/ -│ └── redox.rs — NEW: Redox EGL display (uses Mesa) -``` - -**Redox input backend:** -```rust -// src/backend/input/redox.rs -pub struct RedoxInputBackend { - devices: Vec, // opened from /dev/input/eventX -} - -impl InputBackend for RedoxInputBackend { - fn dispatch(&mut self) -> Vec { - // Read from all evdev devices via evdevd - // Translate to Smithay's InternalEvent type - } -} -``` - -**Redox DRM backend:** -```rust -// src/backend/drm/redox.rs -pub struct RedoxDrmBackend { - drm_fd: File, // opened from /scheme/drm/card0 -} - -impl DrmBackend for RedoxDrmBackend { - fn create_surface(&self, size: Size) -> Surface { - // Create framebuffer via DRM GEM - // Set KMS mode via scheme:drm - } - - fn page_flip(&self, surface: &Surface) -> Result { - // DRM page flip via scheme - } -} -``` - -### Recipe to add/modify - -```toml -# recipes/wip/wayland//recipe.toml (modify existing) -[source] -git = "https://github.com/jackpot51/smithay" -branch = "redox" - -[build] -template = "cargo" -dependencies = [ - "libffi", - "libwayland", - "libxkbcommon", - "mesa", # for EGL - "libdrm", # for DRM backend - "evdevd", # for input - "seatd", # for session management -] -cargopackages = [""] -``` - -### Verification - -1. the validation compositor launches with DRM backend — takes over display -2. Keyboard and mouse work via evdevd -3. `libcosmic-wayland_application` renders a window on the compositor -4. Screenshot shows the window - ---- - -## Historical Step 5: Additional compositor experiments - -Once Steps 1-4 are done: - -1. Re-enable additional historical compositor experiments only if they serve bounded validation needs -2. Keep DRM + libinput + GBM experimentation explicitly subordinate to the canonical KWin path -3. Treat any extra compositor work as historical/reference exploration rather than a forward desktop path -4. For the canonical desktop path, see `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md`; for historical rationale, see `05-KDE-PLASMA-ON-REDOX.md` - ---- - -## Fastest Path Summary - -``` -Week 1-2: Implement signalfd/timerfd/eventfd/etc in relibc - → libwayland builds without patches - -Week 3-8: Build evdevd (input daemon) + udev shim - → libinput works - -Week 9-20: Build drmd (DRM daemon) with Intel modesetting - → libdrm works, modesetting functional - -Week 21-26: Smithay Redox backends (input + DRM + EGL) - → Working Wayland compositor with hardware display - -Week 27+: Port Qt, KDE Frameworks, Plasma Shell - → KDE Plasma desktop (see doc 05) -``` - -**Key insight**: Steps 2 (evdev) and 3 (DRM) can run in parallel. -With 2 developers, the Wayland compositor is achievable in ~6 months. diff --git a/docs/AGENTS.md b/docs/AGENTS.md index bc754a67..a9fb4dbe 100644 --- a/docs/AGENTS.md +++ b/docs/AGENTS.md @@ -25,7 +25,6 @@ For current Red Bear OS status, also read: docs/ ├── 01-REDOX-ARCHITECTURE.md # Architecture reference: microkernel, scheme system, driver model, display architecture ├── 02-GAP-ANALYSIS.md # Historical gap matrix with corrected current-state notes -├── 03-WAYLAND-ON-REDOX.md # Historical Wayland implementation path + deeper rationale ├── 04-LINUX-DRIVER-COMPAT.md # Driver-compat architecture reference + historical porting path ├── 05-KDE-PLASMA-ON-REDOX.md # Historical KDE implementation path + deeper rationale ├── 06-BUILD-SYSTEM-SETUP.md # Build/setup mechanics guide (not canonical policy) @@ -46,11 +45,9 @@ docs/ | What is the current Wi-Fi architecture and validation path? | local/docs/WIFI-IMPLEMENTATION-PLAN.md / local/docs/WIFI-VALIDATION-RUNBOOK.md | Entire document | | What is the current desktop-stack truth? | local/docs/DESKTOP-STACK-CURRENT-STATUS.md | Entire document | | What is the current Qt/KF6 status? | local/docs/QT6-PORT-STATUS.md | Entire document | -| What's missing for Wayland? | 02 | Status correction + ordered remaining gaps | +| What's missing for Wayland? | local/docs/WAYLAND-IMPLEMENTATION-PLAN.md | Entire document | | How to fix POSIX gaps? | local/docs/RELIBC-COMPLETENESS-AND-ENHANCEMENT-PLAN.md | Current relibc completeness work | -| How to build evdevd? | 03 | §2 (evdev input daemon architecture) | -| How to build DRM/KMS? | 03 | §3 (drmd daemon, Intel driver) | -| How to port a Wayland compositor? | 03 | §4 (Smithay Redox backends) | +| What is the current Wayland plan? | local/docs/WAYLAND-IMPLEMENTATION-PLAN.md | Entire document | | How to run Linux GPU drivers? | 04 | Architecture diagram, i915 porting example | | What is redox-driver-sys? | 04 | Crate 1: memory, IRQ, PCI, DMA wrappers | | What is linux-kpi? | 04 | Crate 2: C headers translating Linux→Redox APIs | diff --git a/local/AGENTS.md b/local/AGENTS.md index 03c823e8..3330350e 100644 --- a/local/AGENTS.md +++ b/local/AGENTS.md @@ -11,11 +11,20 @@ Red Bear OS relates to Redox OS in the same way Ubuntu relates to Debian: - The `local/` directory is our overlay — untouched by upstream updates - First-class configs use `redbear-*` naming (not `my-*`, which is gitignored) +## FREE/LIBRE SOFTWARE POLICY + +Red Bear OS must remain a free/libre project. + +- Prefer components that are open-source, freely available to all users, or built in-tree by Red Bear. +- Do not introduce proprietary, source-unavailable, paywalled, or redistributability-restricted dependencies into the tracked system surface. +- When a dependency is dual-licensed under multiple free/open licenses, choose and document the option that is compatible with the Red Bear project surface. +- For the greeter/login stack specifically, the current SHA-crypt verifier path is the pure-Rust `sha-crypt` crate, licensed `MIT OR Apache-2.0`; Red Bear treats it under the MIT option for compatibility with the project's free-software policy. + Build flow: ``` -make all CONFIG_NAME=redbear-kde - → mk/config.mk resolves to config/redbear-kde.toml - → Config includes desktop.toml (mainline) + Red Bear packages +make all CONFIG_NAME=redbear-full + → mk/config.mk resolves to the active desktop/graphics compile target + → Desktop/graphics are available only on redbear-full and redbear-live-full → repo cook builds all packages including our custom ones → mk/disk.mk creates harddrive.img with Red Bear branding ``` @@ -23,9 +32,24 @@ make all CONFIG_NAME=redbear-kde Update flow: ``` ./local/scripts/sync-upstream.sh # Rebase onto upstream Redox + verify symlinks -make all CONFIG_NAME=redbear-kde # Rebuild with latest +make all CONFIG_NAME=redbear-full # Rebuild the active desktop/graphics target ``` +## ACTIVE COMPILE TARGETS + +The supported compile targets are exactly: + +- `redbear-mini` +- `redbear-live-mini` +- `redbear-full` +- `redbear-live-full` + +Desktop/graphics are available only on `redbear-full` and `redbear-live-full`. + +Names such as `redbear-kde`, `redbear-wayland`, and `redbear-minimal` may still appear in older +docs, legacy validation notes, or in-repo staging configs, but they should not be treated as the +current supported compile targets. + ## TRACKING UPSTREAM (SYNC WITH REDOX OS) Red Bear OS tracks the Redox OS build system as upstream. The `local/` directory @@ -133,9 +157,10 @@ redox-master/ ← git pull updates mainline Redox ├── config/ │ ├── desktop.toml ← mainline configs (untouched) │ ├── minimal.toml -│ ├── redbear-desktop.toml ← RED BEAR OS configs (first-class, tracked) -│ ├── redbear-minimal.toml -│ └── redbear-live.toml +│ ├── redbear-full.toml ← Active desktop/graphics target +│ ├── redbear-live-full.toml ← Live desktop/graphics target +│ ├── redbear-mini*.toml ← Minimal target surface (legacy/staging naming may still vary in-tree) +│ └── redbear-greeter-services.toml ← Greeter/auth/session-launch wiring fragment ├── recipes/ ← mainline package recipes (untouched) ├── mk/ ← mainline build system (untouched) ├── local/ ← RED BEAR OS custom work @@ -147,8 +172,11 @@ redox-master/ ← git pull updates mainline Redox │ │ ├── drivers/ ← redox-driver-sys, linux-kpi (GPU/Wi-Fi compat only — NOT USB) │ │ ├── gpu/ ← redox-drm (AMD + Intel display drivers), amdgpu (C port) │ │ ├── system/ ← cub, evdevd, udev-shim, redbear-firmware, firmware-loader, redbear-hwutils, redbear-info, redbear-netctl, redbear-quirks, redbear-meta -│ │ │ ├── redbear-sessiond ← org.freedesktop.login1 D-Bus session broker (zbus-based Rust daemon) -│ │ │ ├── redbear-dbus-services ← D-Bus .service activation files + XML policies +│ │ │ ├── redbear-sessiond ← org.freedesktop.login1 D-Bus session broker (zbus-based Rust daemon) +│ │ │ ├── redbear-authd ← local-user authentication daemon (`/etc/passwd` + `/etc/shadow` + `/etc/group`) +│ │ │ ├── redbear-session-launch ← session bootstrap helper (uid/gid/env/runtime-dir handoff) +│ │ │ ├── redbear-greeter ← greeter orchestrator package (`redbear-greeterd`, UI, compositor wrapper, staged assets) +│ │ │ ├── redbear-dbus-services ← D-Bus .service activation files + XML policies │ │ ├── wayland/ ← Wayland compositor (v2.0 Phase 2) │ │ └── kde/ ← KDE Plasma (v2.0 Phases 3–4) │ ├── patches/ @@ -185,25 +213,27 @@ redox-master/ ← git pull updates mainline Redox │ │ ├── test-ps2-qemu.sh ← QEMU launcher for the bounded PS/2 + serio runtime proof │ │ ├── test-timer-qemu.sh ← QEMU launcher for the bounded monotonic timer runtime proof │ │ ├── test-lowlevel-controllers-qemu.sh ← Sequential wrapper for bounded low-level controller proofs -│ │ └── test-usb-maturity-qemu.sh ← Sequential wrapper for bounded USB maturity proofs +│ │ ├── test-usb-maturity-qemu.sh ← Sequential wrapper for bounded USB maturity proofs +│ │ └── test-greeter-qemu.sh ← Bounded QEMU proof for the Red Bear greeter/auth/session surface │ └── docs/ ← Integration docs ``` ## HOW TO BUILD RED BEAR OS ```bash -# Tracked KWin Wayland desktop target -./local/scripts/build-redbear.sh redbear-kde +# Active desktop/graphics target +./local/scripts/build-redbear.sh redbear-full -# Minimal server variant -./local/scripts/build-redbear.sh redbear-minimal +# Minimal non-desktop target +./local/scripts/build-redbear.sh redbear-mini -# Live ISO -./local/scripts/build-redbear.sh redbear-live && make live CONFIG_NAME=redbear-live +# Live images +./local/scripts/build-redbear.sh redbear-live-full && make live CONFIG_NAME=redbear-live-full +./local/scripts/build-redbear.sh redbear-live-mini && make live CONFIG_NAME=redbear-live-mini # VM-network baseline validation helpers ./local/scripts/validate-vm-network-baseline.sh -./local/scripts/test-vm-network-qemu.sh redbear-minimal +./local/scripts/test-vm-network-qemu.sh redbear-mini # Then run inside the guest: # ./local/scripts/test-vm-network-runtime.sh @@ -212,7 +242,8 @@ redox-master/ ← git pull updates mainline Redox ./local/scripts/test-phase1-desktop-substrate.sh --qemu redbear-wayland # Legacy Phase 3 runtime-substrate validation (historical P0-P6 numbering; script still works) -./local/scripts/test-phase3-runtime-substrate.sh --qemu redbear-kde +# Use the active desktop target when adapting historical validation flows. +./local/scripts/test-phase3-runtime-substrate.sh --qemu redbear-full # Low-level controller validation ./local/scripts/test-xhci-irq-qemu.sh --check @@ -249,6 +280,13 @@ redox-master/ ← git pull updates mainline Redox # Then run inside the guest: # redbear-phase5-network-check +# Experimental Red Bear greeter/login validation +./local/scripts/build-redbear.sh redbear-full +./local/scripts/test-greeter-qemu.sh --check +# Then run inside the guest: +# redbear-greeter-check +# redbear-greeter-check --invalid root wrong + # Bounded Intel Wi-Fi runtime validation (real target or passthrough guest) # Host preparation for VFIO-backed guests: # sudo ./local/scripts/validate-wifi-vfio-host.sh --host-pci 0000:xx:yy.z --expect-driver iwlwifi @@ -265,7 +303,7 @@ redox-master/ ← git pull updates mainline Redox # ./local/scripts/finalize-wifi-validation-run.sh ./wifi-passthrough-capture.json ./wifi-passthrough-artifacts.tar.gz # Legacy Phase 6 KDE session-surface validation (historical P0-P6 numbering; script still works) -./local/scripts/build-redbear.sh redbear-kde +./local/scripts/build-redbear.sh redbear-full ./local/scripts/test-phase6-kde-qemu.sh --check # Then run inside the guest: # redbear-phase6-kde-check @@ -274,7 +312,7 @@ redox-master/ ← git pull updates mainline Redox redbear-netctl --help # Or manually: -make all CONFIG_NAME=redbear-kde +make all CONFIG_NAME=redbear-full # Single custom recipe: ./target/release/repo cook local/recipes/branding/redbear-release @@ -313,13 +351,16 @@ When mainline updates affect our work: - `docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md` is the canonical public execution plan. - `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` (v2.0) is the canonical desktop path plan from console to hardware-accelerated KDE Plasma on Wayland, using a three-track Phase 1–5 model. +- `local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` is the canonical Wayland subsystem plan beneath the + desktop path. Use it for Wayland-specific stability, completeness, ownership, and runtime-proof + sequencing. - `local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md` is the current DRM-focused execution plan beneath the canonical desktop path. It keeps Intel and AMD at the same evidence bar while separating display/KMS maturity from render/3D maturity. - Older GPU-specific docs such as `local/docs/AMD-FIRST-INTEGRATION.md`, - `local/docs/P2-AMD-GPU-DISPLAY.md`, `local/docs/HARDWARE-3D-ASSESSMENT.md`, and - `local/docs/DMA-BUF-IMPROVEMENT-PLAN.md` remain useful reference material, but they are not the - planning authority when sequencing or acceptance criteria differ. + `local/docs/HARDWARE-3D-ASSESSMENT.md`, and `local/docs/DMA-BUF-IMPROVEMENT-PLAN.md` remain + useful reference material, but they are not the planning authority when sequencing or acceptance + criteria differ. - `local/docs/AMD-FIRST-INTEGRATION.md` remains the deeper AMD-specific technical roadmap, but AMD and Intel machines are now equal-priority Red Bear OS targets. - The earlier Phase 0–3 reassessment bridge has been retired. Its reconciliation role is now @@ -338,6 +379,7 @@ When mainline updates affect our work: - `local/docs/QUIRKS-IMPROVEMENT-PLAN.md` is the current follow-up plan for removing quirks drift, integrating quirks into real drivers, and converging on one source of truth. - `local/docs/DBUS-INTEGRATION-PLAN.md` is the canonical D-Bus architecture and implementation plan for KDE Plasma 6 on Wayland. It defines the phased approach to D-Bus service integration, the `redbear-sessiond` login1-compatible session broker, and the gap analysis for desktop-facing D-Bus services. +- `local/docs/GREETER-LOGIN-IMPLEMENTATION-PLAN.md` is the canonical Red Bear-native greeter/login design and current implementation plan for the `redbear-full` desktop path. It defines the `redbear-authd` / `redbear-session-launch` / `redbear-greeter` split, service wiring, validation surface, and the current boundary between the active greeter path and the older `redbear-validation-session` helper flows. The current execution order for these subsystem plans is: @@ -511,35 +553,34 @@ local/Assets/ ## RED BEAR OS CONFIG HIERARCHY +Active compile targets: + +- `redbear-mini` +- `redbear-live-mini` +- `redbear-full` +- `redbear-live-full` + +Desktop/graphics are available only on the `full` targets. Older names such as `redbear-kde`, +`redbear-wayland`, `redbear-minimal`, and `redbear-live-minimal` may still exist in the tree as +legacy or staging artifacts, but they are not the supported compile-target surface. + ``` -redbear-live.toml - └── redbear-kde.toml +redbear-live-full.toml + └── redbear-full.toml + ├── desktop.toml (mainline) ├── redbear-legacy-base.toml ← Neutralize broken base legacy init scripts ├── redbear-legacy-desktop.toml ← Neutralize broken desktop legacy init scripts ├── redbear-device-services.toml ← Shared firmware-loader / evdevd / udev service wiring ├── redbear-netctl.toml ← Shared Red Bear network profile files + netctl boot service - ├── desktop.toml (mainline) - │ ├── desktop-minimal.toml - │ │ └── minimal.toml - │ │ └── base.toml - │ └── server.toml - │ └── minimal.toml - │ └── base.toml + ├── redbear-greeter-services.toml ← Greeter/auth/session-launch wiring for desktop targets └── [packages] redbear-release, redbear-hwutils, redbear-netctl, firmware-loader, evdevd, udev-shim, redbear-info, - mc, cub - NOTE: ext4d is inherited from desktop.toml (mainline package) - NOTE: cub is treated as an essential Red Bear utility and is included through the tracked - flavor configs; it still depends on the custom recipe symlink - (recipes/system/cub → local/recipes/system/cub) being created by - integrate-redbear.sh or apply-patches.sh before building. - NOTE: redbear-netctl provides a Redox-native `netctl` command with profiles - in /etc/netctl and a boot-time `netctl --boot` service. - NOTE: redbear-info is the canonical runtime integration report. Keep it updated when - Red Bear adds new tools, schemes, services, or hardware integration paths. - NOTE: redbear-live inherits cub through redbear-kde.toml. - NOTE: redbear-meta is explicitly included in redbear-full.toml. Keep any broader inclusion - deliberate because its dependency surface is much heavier than the core utility layer. + redbear-sessiond, redbear-authd, redbear-session-launch, + redbear-greeter, redbear-meta, cub + NOTE: Desktop/graphics are available only on redbear-full and redbear-live-full. + NOTE: ext4d is inherited from desktop.toml (mainline package). + NOTE: redbear-meta is explicitly included in redbear-full.toml; keep broader inclusion deliberate. + NOTE: redbear-live-full inherits from redbear-full.toml. redbear-full.toml └── desktop.toml (mainline) @@ -547,20 +588,17 @@ redbear-full.toml └── redbear-legacy-desktop.toml ← Neutralize broken desktop legacy init scripts └── redbear-device-services.toml ← Shared firmware-loader / evdevd / udev service wiring └── redbear-netctl.toml ← Shared Red Bear network profile files + netctl boot service + └── redbear-greeter-services.toml ← Greeter/auth/session-launch wiring -redbear-wayland.toml - └── wayland.toml (mainline-derived Wayland profile) - └── bounded validation runtime surface - └── validation entrypoints: test-phase4-wayland-qemu.sh + redbear-phase4-wayland-check +redbear-live-mini.toml + └── minimal non-desktop live target + └── desktop/graphics intentionally absent -redbear-kde.toml - └── desktop.toml (mainline) - └── redbear-legacy-base.toml ← Neutralize broken base legacy init scripts - └── redbear-legacy-desktop.toml ← Neutralize broken desktop legacy init scripts - └── redbear-device-services.toml ← Shared firmware-loader / evdevd / udev service wiring - └── redbear-netctl.toml ← Shared Red Bear network profile files + netctl boot service +redbear-mini + └── legacy/staging config files in-tree still use the older `redbear-minimal*` names + in some places; do not treat those names as the supported compile-target surface -redbear-minimal.toml +redbear-minimal.toml (legacy/staging naming still present in tree) └── minimal.toml (mainline) └── base.toml └── redbear-legacy-base.toml ← Neutralize broken base legacy init scripts @@ -573,9 +611,10 @@ redbear-minimal.toml Config comparison: | Config | GPU Stack | Desktop | Branding | ext4d | filesystem_size | |--------|-----------|---------|----------|-------|-----------------| -| redbear-desktop | Full | Supplementary integration support | Yes | ✅ (via desktop.toml) | 10240 MiB | -| redbear-minimal | None | None | Yes | ❌ | 512 MiB | -| redbear-live | Full | KWin Wayland target | Yes | ✅ (via desktop.toml) | 12288 MiB | +| redbear-full | Full | Yes | Yes | ✅ (via desktop.toml) | 4096 MiB | +| redbear-live-full | Full | Yes | Yes | ✅ (via redbear-full.toml) | 4096 MiB | +| redbear-mini | None | None | Yes | legacy/staging naming in tree still maps through `redbear-minimal*` files | legacy/staging | +| redbear-live-mini | None | None | Yes | legacy/staging naming in tree still maps through `redbear-live-minimal*` files | legacy/staging | ## ANTI-PATTERNS (COMMIT POLICY) diff --git a/local/docs/ACPI-IMPROVEMENT-PLAN.md b/local/docs/ACPI-IMPROVEMENT-PLAN.md index bdf37e46..7e68efe9 100644 --- a/local/docs/ACPI-IMPROVEMENT-PLAN.md +++ b/local/docs/ACPI-IMPROVEMENT-PLAN.md @@ -103,12 +103,16 @@ bounded-hardware, or release-grade completeness. - MCFG handling was removed from `acpid` and replaced with the `pcid /config` path. - Shutdown eventing via `/scheme/kernel.acpi/kstop` is implemented and consumed by `redbear-sessiond`. +- `acpid` now models `S1` / `S3` / `S4` / `S5` explicitly in userspace, and the current `_S5` + shutdown path routes through that model instead of a special-case magic value. ### Weak today - Sleep-state transitions beyond `\_S5` are unsupported. - Sleep eventing is unsupported. - `SLP_TYPb` remains incomplete for broader sleep-state handling. +- Non-`S5` sleep targets are now represented explicitly, but they remain groundwork-only and do not + imply implemented suspend/resume support yet. - AML init order is still tied to PCI FD registration timing. - Some physmem / opregion failure paths are still not explicit enough. - DMAR remains orphaned in `acpid` source: present, not wired, not fully transferred. diff --git a/local/docs/AMD-FIRST-INTEGRATION.md b/local/docs/AMD-FIRST-INTEGRATION.md index ac0c159e..66f3bc22 100644 --- a/local/docs/AMD-FIRST-INTEGRATION.md +++ b/local/docs/AMD-FIRST-INTEGRATION.md @@ -44,7 +44,7 @@ take 5+ years. | x2APIC | ✅ Works | Auto-detected via CPUID, APIC/SMP functional | | HPET | ✅ Works | Timer initialized from ACPI | | IOMMU | 🚧 In progress | `iommu` daemon now builds, auto-discovers common IVRS table paths, reaches unit detection plus `scheme:iommu` registration in the QEMU/AMD-IOMMU validation path, and now has a guest-driven first-use self-test that initializes both discovered units and drains events successfully in QEMU; real hardware validation is still missing | -| AMD GPU | 🚧 In progress | MMIO mapped, DC port compiles, MSI-X wired, no hardware validation yet | +| AMD GPU | 🚧 In progress | MMIO mapped, bounded Red Bear display glue path builds, MSI-X wired; imported Linux AMD DC/TTM/core remain under compile triage; no hardware validation yet | | Wi-Fi/BT | 🚧 In progress | Repo now carries bounded wireless scaffolding: one experimental in-tree Bluetooth slice exists, and a bounded Intel Wi-Fi scaffold exists elsewhere, but validated wireless connectivity support is still incomplete | | USB | ⚠️ Variable | Some USB controllers work, others don't | @@ -259,22 +259,27 @@ ONLY the display/modesetting portion first, using linux-kpi headers. | MSI-X interrupt support | ✅ | `local/recipes/gpu/redox-drm/source/src/drivers/interrupt.rs` — shared MSI-X/MSI/legacy abstraction with quirk-aware fallback | | Intel pcid-spawner config | ✅ | `local/config/pcid.d/intel_gpu.toml` — auto-detect Intel GPUs | -### P2: AMD GPU Display — COMPLETE (compiles, no HW validation) +### P2: AMD GPU Display — BOUNDED PATH BUILDS (imported Linux AMD DC/TTM/core still under compile triage) | Component | Status | Files | |-----------|--------|-------| | redox-drm daemon | ✅ | `local/recipes/gpu/redox-drm/source/` — DRM scheme daemon | | AMD driver (Rust) | ✅ | `local/recipes/gpu/redox-drm/source/src/drivers/amd/mod.rs` | -| AMD DisplayCore (FFI) | ✅ | `local/recipes/gpu/redox-drm/source/src/drivers/amd/display.rs` | -| AMD PCI stubs (dynamic) | ✅ | `local/recipes/gpu/amdgpu/source/redox_stubs.c` — populated from Rust via FFI | -| AMD DC init (C) | ✅ | `local/recipes/gpu/amdgpu/source/amdgpu_redox_main.c` — modesetting, connector detect | -| AMD glue headers | ✅ | `local/recipes/gpu/amdgpu/source/redox_glue.h` — Linux compat surface | +| AMD DisplayCore (FFI surface) | ✅ bounded | `local/recipes/gpu/redox-drm/source/src/drivers/amd/display.rs` | +| AMD PCI stubs (dynamic) | ✅ bounded | `local/recipes/gpu/amdgpu/source/redox_stubs.c` — populated from Rust via FFI | +| AMD DC init / modeset glue (C) | ✅ bounded | `local/recipes/gpu/amdgpu/source/amdgpu_redox_main.c` — modesetting, connector detect | +| AMD glue headers | ✅ bounded | `local/recipes/gpu/amdgpu/source/redox_glue.h` — Linux compat surface for the retained path | | GTT manager | ✅ | `local/recipes/gpu/redox-drm/source/src/drivers/amd/gtt.rs` | | Ring buffer | ✅ | `local/recipes/gpu/redox-drm/source/src/drivers/amd/ring.rs` | | GEM buffer mgmt | ✅ | `local/recipes/gpu/redox-drm/source/src/gem.rs` | | DMA-BUF | ✅ | `local/recipes/gpu/redox-drm/source/src/scheme.rs` (PRIME export/import via opaque tokens) | | Intel driver | ✅ | `local/recipes/gpu/redox-drm/source/src/drivers/intel/mod.rs` + `display.rs` | +The current retained AMD build path now produces the `amdgpu` recipe from the Red Bear glue layer +plus Rust-side driver/runtime pieces. The broad imported Linux AMD display, TTM, and amdgpu core +trees are no longer treated as compile-complete deliverables; they remain under compile triage until +the bounded path proves a concrete need to re-introduce them. + For bounded runtime display validation, Red Bear now uses the shared `local/scripts/test-drm-display-runtime.sh` harness, with `local/scripts/test-amd-gpu.sh` as the AMD wrapper. @@ -282,6 +287,27 @@ AMD wrapper. Human-readable PCI naming for AMD/Intel devices now comes from the shipped `pciids` database rather than from hand-maintained GPU name tables in local runtime tools. +#### Historical P2 implementation snapshot + +The old standalone `P2-AMD-GPU-DISPLAY.md` milestone record is now folded into this AMD-specific +reference. + +Important historical P2 details that still matter: + +- **Architecture:** `userspace apps -> scheme:drm -> redox-drm daemon -> AMD DC (C code, + linux-kpi) -> MMIO` +- **Build integration:** the Red Bear GPU path is rooted in `local/recipes/gpu/redox-drm/` and + `local/recipes/gpu/amdgpu/`, with PCI auto-detection from `local/config/pcid.d/amd_gpu.toml` + and the imported Linux AMD driver tree in `local/recipes/gpu/amdgpu-source/` +- **Historical P2 boot sequence:** kernel PCI init -> `pcid` AMD GPU detection -> `redox-drm` + launch -> BAR/MMIO mapping -> firmware load via `scheme:firmware` -> AMD DC init -> connector + detect / EDID -> `scheme:drm/card0` registration +- **Historical implementation closure:** the scoped P2 implementation task was compile-complete for + display-side bring-up, but hardware validation remained and still remains a separate evidence gate + +That milestone should now be read through the current GPU/DRM plan and current desktop status docs +rather than as a standalone execution authority. + ### Build Verification All crates compile with `cargo check` (0 errors): diff --git a/local/docs/AMDGPU-DC-COMPILE-TRIAGE-PLAN.md b/local/docs/AMDGPU-DC-COMPILE-TRIAGE-PLAN.md new file mode 100644 index 00000000..420df68f --- /dev/null +++ b/local/docs/AMDGPU-DC-COMPILE-TRIAGE-PLAN.md @@ -0,0 +1,363 @@ +# AMDGPU DC Compile Triage Plan + +**Date:** 2026-04-18 +**Scope:** Triage of the current Red Bear amdgpu AMD Display Core compile path, specifically the +decision between growing the Linux compatibility surface and narrowing the imported display/DC +source set to the bounded path actually needed for first display bring-up. + +> **Planning authority note (2026-04-18):** this file is a focused amdgpu/DC compile-triage and +> execution document. It does not replace `local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md` as the +> canonical GPU/DRM plan. Use the DRM modernization plan for overall execution order, Intel/AMD +> parity criteria, and broader acceptance gates. Use this file for the specific question of how to +> triage the current amdgpu DC compile break without drifting into open-ended compatibility work. + +> **Status update (2026-04-18):** Phase 1B has now been carried out in bounded form. The `amdgpu` +> recipe builds successfully on the retained Red Bear glue path (`amdgpu_redox_main.c` + +> `redox_stubs.c`), while the imported Linux AMD display, TTM, and amdgpu core trees remain +> explicitly outside the retained compile surface and still under compile triage. + +## Title and intent + +Red Bear currently compiles the imported AMD display tree too broadly for the evidence-backed goal +it actually has today. + +The immediate goal is **not** to prove that the full imported AMD Display Core tree compiles on +Redox. The immediate goal is to unblock the bounded display path needed for first display-side +bring-up while preserving a maintainable route toward broader DC closure later. + +This document exists to prevent two failure modes: + +1. treating the first compile error as if it justifies unconstrained `linux-kpi` expansion, and +2. claiming progress from a narrowed compile path without documenting exactly what was excluded and + why. + +## Current grounded state + +### Bottom line + +The original broad-tree failure was **not** a `freesync.c`-specific logic bug. It exposed a broader +mismatch between the imported AMD DC / TTM / amdgpu trees and the current Red Bear compatibility +strategy. + +After narrowing the recipe to the actual retained first-display path, the `amdgpu` recipe now +builds successfully from the Red Bear glue layer alone. That is the current truthful state: the +bounded retained path builds, while the imported Linux trees remain under compile triage rather than +being claimed as compile-complete. + +### Confirmed evidence + +| Area | Current evidence | Repo grounding | +|---|---|---| +| Historical broad-path rule | The old recipe compiled all `display/*.c` files and failed in optional AMD DC code before the retained path was proven | historical recipe state + `local/recipes/gpu/amdgpu/target/x86_64-unknown-redox/build/freesync.o.log` | +| Current retained build rule | The current recipe compiles only the bounded Red Bear glue path and links `libamdgpu_dc_redox.so` from that retained surface | `local/recipes/gpu/amdgpu/recipe.toml` | +| Historical first hard failure | `freesync.c -> dm_services.h -> dm_services_types.h -> os_types.h -> linux/kgdb.h` | `local/recipes/gpu/amdgpu/target/x86_64-unknown-redox/build/freesync.o.log` | +| Current shim posture | Compatibility surface is partial, not absent | `local/recipes/drivers/linux-kpi/source/src/c_headers/`, `local/recipes/gpu/amdgpu/source/redox_glue.h` | +| Small retained-path shim probes attempted | Added minimal `linux/export.h` and `linux/refcount.h` while testing whether imported TTM belonged on the retained path | `local/recipes/drivers/linux-kpi/source/src/c_headers/linux/export.h`, `.../linux/refcount.h` | +| Switch criterion outcome | Imported TTM immediately fanned into broader Linux-kernel surfaces (`__cond_acquires`, `iosys-map`, and related header fallout), so the retained path was narrowed again instead of growing shims further | retained build logs during TTM probe | +| Current Red Bear need | First display bring-up needs a bounded display path, not proof that all optional AMD DC subtrees compile | `local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md`, `local/docs/AMD-FIRST-INTEGRATION.md` | + +### Why the current approach is unstable + +The current amdgpu recipe uses a broad compile rule that effectively says: + +> compile the imported display tree first, then see what breaks. + +That is useful for discovery, but it is a poor default execution strategy for bounded bring-up. + +It pulls optional and advanced display code into the same compile surface as the first modeset path, +which means a failure in a module such as FreeSync can block the entire experiment even when that +module is not yet proven necessary for the first Red Bear display target. + +## Triage question + +Red Bear needs an explicit answer to this question before continuing: + +> Should the repo first grow the Linux compatibility layer until the full imported AMD display tree +> compiles further, or should it first narrow the imported source set to the display path Red Bear +> actually needs today? + +This document answers: + +- **Start with Strategy B** — narrow the DC source set. +- Use **Strategy A** — minimal shim additions — only as a controlled fallback when the retained, + bounded display path still proves a small required compatibility gap. + +## Strategy comparison + +| Strategy | What it does | Best when | Success criteria | Main failure mode | +|---|---|---|---|---| +| **A. Minimal shim additions** | Add the smallest Linux compatibility surface needed to expose the next blocker | The real retained display path is already known, and the missing API surface stays small and generic | Each shim advances the build by one blocker class without broadening scope dramatically | Header whack-a-mole grows into de facto kernel-environment emulation | +| **B. Narrow the DC source set** | Replace broad full-tree compile with an explicit bounded file list aligned to the actual first display goal | Optional or advanced modules are being pulled into the build before their necessity is proven | The reduced source set compiles further or reveals the next blocker on the true bring-up path | False confidence if the narrowed claim is not documented precisely | + +## Recommendation + +### Recommendation summary + +Start with **B: narrow the compiled DC source set to the bounded display path Red Bear actually +uses today**. + +That recommendation has now been implemented in bounded form. The retained path was narrowed far +enough to prove that the current Red Bear bring-up surface does not need the imported Linux AMD +display, TTM, or amdgpu core trees in order to build the shipped `amdgpu` recipe. + +The current evidence supports that recommendation because: + +1. the recipe compiles the entire imported display tree, +2. the first blocker sits in a dependency cone that likely contains several more Linux/DRM header + and semantic assumptions, and +3. Red Bear's current need is bounded display bring-up, not immediate proof that every imported + AMD DC subsystem compiles under Redox. + +### Why A is not the first move + +The first hard failure (`linux/kgdb.h`) is shallow enough to tempt a quick shim fix. That is useful +only if the retained path is already known. Right now it is not. Without narrowing the source set +first, each new shim risks paying compatibility cost for files Red Bear may not need for first +bring-up. + +That is the main hidden cost of Strategy A at this stage: it can create real maintenance debt before +the repo has proven that the affected code is on the first bring-up path at all. + +## ULW execution plan + +## Phase 0 — Freeze the baseline + +### Goal + +Create one canonical failure snapshot that all later triage work can refer back to. + +### Actions + +- Record the current broad display compile rule in the amdgpu recipe. +- Record the first failing translation unit and full include chain. +- Record the current bounded Red Bear display objective and the currently targeted ASIC/runtime + surface. + +### Exit criteria + +One written baseline exists showing: + +- the current full-tree compile behavior, +- the current first hard failure at `linux/kgdb.h`, and +- the current bounded display objective. + +### Current status + +- complete enough to proceed + +## Phase 1B — Narrow-source probe + +### Goal + +Identify the minimum imported display/DC source set required for current Red Bear display bring-up. + +### Required mindset + +The question in this phase is not “what can Linux build?” + +The question is: + +> what does Red Bear actually need compiled now to support its present display-side target? + +### Actions + +- Replace broad `find .../display -name '*.c'` behavior with an explicit bounded file list. +- Treat the first retained file list as a **probe hypothesis**, not as a proven final minimum. +- Keep only the C sources required for the current Red Bear bring-up surface hypothesis: + - device initialization, + - connector detection and mode enumeration, + - bounded modeset path, + - cleanup, + - and the currently targeted ASIC families. +- Exclude obvious scope inflators first unless the call graph proves they are required: + - `modules/freesync/*`, + - untargeted DCN generations, + - `amdgpu_dm/*`, + - optional feature modules not on the first display path. + +### Verification + +- The reduced file list is explicit and reviewable. +- The reduced build is re-run. +- The next failure is checked to confirm that it occurs on the retained bounded path rather than in + an excluded optional subtree. + +### Exit criteria + +One of the following becomes true: + +1. the narrowed set compiles meaningfully further than the current build, or +2. the next blocker appears on the real retained path and is therefore a justified compatibility + problem. + +### Failure signal + +If the narrowed set cannot be described cleanly because the retained path immediately drags in broad +optional subsystems, stop and move to the decision gate rather than continuing to guess. + +### Current status + +- complete — the retained path is now explicit and builds + +## Phase 1A — Minimal-shim probe + +### Goal + +Expose the next blocker with the smallest justified compatibility addition. + +### Entry condition + +Only do this after Phase 1B has established a retained bounded path, or after the narrowed path +proves that a small missing Linux primitive is genuinely required. + +### Allowed shim order + +Add one shim family at a time, in this rough priority order: + +1. `linux/kgdb.h` +2. `asm/byteorder.h` +3. `linux/vmalloc.h` +4. `ktime_get_raw_ns` / timekeeping support +5. `div64_u64` / `div64_u64_rem` +6. `linux/refcount.h` + +### Rules + +- One shim family per change. +- No speculative shim batches. +- No ad hoc amdgpu-only workaround when the gap clearly belongs in `linux-kpi`. +- If a shim exposes a large new Linux subsystem expectation rather than a narrow primitive, stop and + reconsider the strategy. + +### Verification + +- Re-run the build after each shim family. +- Confirm that the build advances by one blocker class. +- Confirm that the next failure remains on the retained bounded path. + +### Exit criteria + +- The build advances by exactly one blocker class, and +- the next failure still belongs to the retained bounded path. + +### Failure signal + +If one shim immediately reveals several unrelated Linux subsystem requirements, stop and return to +Strategy B. + +## Phase 2 — Decision gate + +### Stay on Strategy B if + +- the blocker sits in optional or advanced code such as FreeSync, +- narrowing quickly reduces the blocker surface, +- failures outside the retained path disappear, +- or the retained path becomes understandable and controllable. + +### Switch from B to A if + +- **all** of the following are true: + - an explicit retained file list has been written down, + - the failure reproduces on that retained path after the narrowing pass, + - the missing piece is a small generic primitive or header family rather than a broad subsystem + expectation, + - and the same compatibility gap is visible across multiple retained core files or one retained + shared include chain. + +### Abort A and return to B if + +- more than one or two unrelated shim families are required before reaching a meaningful compile + milestone, +- missing APIs are dominated by files outside the retained runtime path, +- or the work starts resembling unconstrained kernel-environment emulation. + +## Phase 3 — Continue on the chosen path + +### If B wins + +- Keep the bounded file list explicit. +- Document exactly what the bounded claim covers. +- Do not quietly re-expand the tree. +- Add excluded modules back only behind explicit proof of need. +- Treat success here as **compile-triage progress only**. It does not imply full DC feature closure, + optional-module completeness, or runtime readiness. + +### If A wins + +- Expand `linux-kpi` deliberately rather than scattering shims through amdgpu-local code. +- Keep each new shim family generic and reusable where possible. +- Track each new compatibility family as maintenance debt that must justify itself. + +## Commit slicing + +Recommended commit order: + +1. narrow source set only, +2. first shim family only, +3. one blocker family per follow-up change. + +Never mix broad source pruning and broad compatibility growth in the same commit. + +## Red / Green / Refactor loop + +### Red + +The historical full-tree display build failed at `linux/kgdb.h` while compiling `freesync.c`. + +### Green + +Either: + +- the narrowed source set compiles further, or +- one small shim advances the retained path to the next blocker. + +Current green state: + +- the bounded retained path now builds successfully, +- and the imported Linux AMD display / TTM / amdgpu trees remain explicitly excluded pending proven + need. + +### Refactor + +Codify the smallest proven source set and execution path before adding more compatibility surface. + +## Hidden failure modes + +### Strategy B hidden failure mode + +Strategy B can produce false confidence if the repo narrows the file list but does not write down +what functionality is now intentionally out of scope. + +That is why every narrowing step must be paired with an explicit bounded claim. + +### Strategy A hidden failure mode + +Strategy A can feel productive because each header addition removes one hard stop. But that can hide +the fact that the repo is drifting into long-term Linux-environment emulation for code that the +current Red Bear target may not even need. + +That is why A must stay subordinate to a retained, justified source set. + +## Definition of done + +This triage plan is complete when: + +- the repo has an explicit choice between bounded source narrowing and compatibility expansion, +- the choice is backed by compile evidence, +- optional AMD DC modules are not silently treated as required for first bring-up, +- and compatibility growth, if needed, is happening in the right long-term layer. + +For clarity, done here means the compile-triage path is explicit and justified. It does **not** mean +that the full AMD DC tree is complete, that excluded optional modules are unnecessary in all future +phases, or that runtime display validation is closed. + +## Immediate next action + +Do this next: + +1. keep the retained `amdgpu` build path explicit and bounded, +2. do not quietly re-introduce imported Linux AMD display / TTM / core sources, +3. re-introduce imported subsystems only behind concrete runtime or feature evidence, +4. if a future re-introduction attempt fans into broad Linux-kernel compatibility work again, + treat that as a new triage pass rather than as proof that the broader tree belongs in the + default retained build. diff --git a/local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md b/local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md index 0b01fead..7aa90bc6 100644 --- a/local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md +++ b/local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md @@ -9,13 +9,15 @@ This is the single authoritative plan for the Red Bear OS path from console boot to a hardware-accelerated KDE Plasma desktop running on Wayland. -It consolidates and replaces the planning role previously held by: +It consolidates and replaces the top-level planning role previously held by: -- `docs/03-WAYLAND-ON-REDOX.md` (historical Wayland rationale) - `docs/05-KDE-PLASMA-ON-REDOX.md` (historical KDE rationale) - `local/docs/AMD-FIRST-INTEGRATION.md` (AMD-specific hardware detail) - Prior revisions of this document (v1, which used a different Phase 1–5 breakdown) +`local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` now serves as the canonical Wayland subsystem plan +beneath this top-level desktop path. + Those documents remain useful for subsystem detail, porting history, and design rationale. The earlier reassessment bridge is now retired, and its reconciliation role is covered here together with `local/docs/DESKTOP-STACK-CURRENT-STATUS.md` and `docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md`. @@ -71,12 +73,12 @@ Rules: | Area | State | Evidence | Notes | |---|---|---|---| | AMD bare-metal boot | validated | Boot, ACPI, SMP, x2APIC all work | Bounded to current tested hardware | -| relibc Wayland/Qt unblockers | builds | signalfd, timerfd, eventfd, open_memstream, F_DUPFD_CLOEXEC, MSG_NOSIGNAL, bounded waitid, bounded RLIMIT, bounded eth0 networking, shm_open, bounded sem_open, bounded sys/ipc.h, bounded sys/shm.h | Runtime pressure from real consumers still untested | +| relibc Wayland/Qt unblockers | builds + targeted runtime proof | signalfd, timerfd, eventfd, open_memstream, F_DUPFD_CLOEXEC, MSG_NOSIGNAL, bounded waitid, bounded RLIMIT, bounded eth0 networking, shm_open, bounded sem_open, bounded sys/ipc.h, bounded sys/shm.h | Strict relibc Redox-target runtime proof now exists for the fd-event slice; broader real-consumer semantics still need confirmation | | redox-driver-sys | builds | Driver substrate | | | linux-kpi | builds | Linux kernel API compatibility layer | | | firmware-loader | builds, boots | scheme:firmware registers at boot | | | redox-drm (AMD + Intel) | builds | DRM scheme daemon | No hardware runtime validation | -| amdgpu C port | builds | AMD DC + TTM + linux-kpi compat | No hardware runtime validation | +| amdgpu retained C path | builds | Red Bear display glue retained path + linux-kpi compat; imported Linux AMD DC/TTM/core remain under compile triage | No hardware runtime validation | | evdevd | builds, boots | scheme:evdev registers at boot | | | udev-shim | builds, boots | scheme:udev registers at boot | | | libwayland 1.24.0 | builds | No compositor proof yet | | @@ -578,7 +580,7 @@ This is the canonical document for the desktop path. It does not replace subsyst | `local/docs/DESKTOP-STACK-CURRENT-STATUS.md` | Short current-state desktop truth summary | | `local/docs/RELIBC-COMPLETENESS-AND-ENHANCEMENT-PLAN.md` | relibc completeness detail + patch ownership | | `local/docs/INPUT-SCHEME-ENHANCEMENT.md` | Input-path design if structural cleanup needed | -| `local/docs/P2-AMD-GPU-DISPLAY.md` | AMD display status + validation targets | +| `local/docs/AMDGPU-DC-COMPILE-TRIAGE-PLAN.md` | AMD DC compile-triage + bounded source-set strategy | | `local/docs/DMA-BUF-IMPROVEMENT-PLAN.md` | DMA-BUF scheme detail | | `local/docs/IRQ-AND-LOWLEVEL-CONTROLLERS-ENHANCEMENT-PLAN.md` | Controller/IRQ/IOMMU quality work | | `local/docs/PROFILE-MATRIX.md` | Profile roles + support-language reference | @@ -600,8 +602,8 @@ continuity, not as future work. |---|---|---| | AMD bare-metal boot (ACPI, SMP, x2APIC) | ✅ Boot-baseline complete | Prior to this plan; see `local/docs/ACPI-IMPROVEMENT-PLAN.md` for ongoing ownership and robustness work | | Driver infrastructure (redox-driver-sys, linux-kpi, firmware-loader) | ✅ Builds complete | Prior to this plan | -| AMD GPU display (redox-drm, amdgpu C port) | ✅ Builds complete | Prior to this plan | -| relibc POSIX unblockers (signalfd, timerfd, eventfd, etc.) | ✅ Builds complete | Prior to this plan | +| AMD GPU display (redox-drm + bounded amdgpu retained path) | 🚧 Partial build completion | Imported Linux AMD DC/TTM/core remain under compile triage; no hardware runtime validation yet | +| relibc POSIX unblockers (signalfd, timerfd, eventfd, etc.) | ✅ Builds + targeted runtime proof complete | Prior to this plan | | Qt6 base stack (qtbase, qtdeclarative, qtsvg, qtwayland) | ✅ Builds complete | Prior to this plan | | D-Bus 1.16.2 | ✅ Builds + bounded runtime | Prior to this plan | | All 32 KF6 frameworks | ✅ Builds complete | Prior to this plan | diff --git a/local/docs/DESKTOP-STACK-CURRENT-STATUS.md b/local/docs/DESKTOP-STACK-CURRENT-STATUS.md index f70d7d2b..5b3d7de5 100644 --- a/local/docs/DESKTOP-STACK-CURRENT-STATUS.md +++ b/local/docs/DESKTOP-STACK-CURRENT-STATUS.md @@ -1,6 +1,6 @@ # Red Bear OS Desktop Stack — Current Status -**Last updated:** 2026-04-18 +**Last updated:** 2026-04-19 **Canonical plan:** `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` (v2.0) ## Purpose @@ -15,7 +15,7 @@ Its job is to answer: - and what still blocks a trustworthy Wayland/KDE session claim. For the execution plan (phases, timelines, acceptance criteria), see the canonical plan above. -For historical design rationale, see `docs/03-WAYLAND-ON-REDOX.md` and `docs/05-KDE-PLASMA-ON-REDOX.md`. +For subsystem planning detail, see `local/docs/WAYLAND-IMPLEMENTATION-PLAN.md`; for historical KDE rationale, see `docs/05-KDE-PLASMA-ON-REDOX.md`. ## Where We Are in the Plan @@ -25,15 +25,18 @@ The canonical desktop plan uses a three-track model: - **Track B (Phase 3–4):** KWin Session → KDE Plasma — **blocked on Track A** - **Track C (Phase 5):** Hardware GPU — **can start after Phase 1** -**Current position:** Build-side gates are crossed. Phase 1 (Runtime Substrate Validation) is the -next work target. The repo has not yet started systematic runtime validation. +**Current position:** Build-side gates are crossed. Phase 1 (Runtime Substrate Validation) is still +the next broad desktop target, but the repo now also carries an experimental Red Bear-native +greeter/auth/session-launch stack on the `redbear-full` desktop path. -## Tracked Default and Evidence Boundary +## Active Target Surface and Evidence Boundary -- The tracked default build now resolves to `CONFIG_NAME?=redbear-kde`. -- the bounded Phase 2 validation profile remains available. -- `redbear-kde` is the tracked KWin Wayland desktop direction, but runtime-proven compositor/ - session claims still remain incomplete. +- The supported compile targets are `redbear-mini`, `redbear-live-mini`, `redbear-full`, and `redbear-live-full`. +- Desktop/graphics are available only on `redbear-full` and `redbear-live-full`. +- Older names such as `redbear-kde`, `redbear-wayland`, and `redbear-minimal*` still appear in + historical or staging material, but they are not the supported compile-target surface. +- The greeter/login path is currently an **experimental build/integration surface** on `redbear-full`; + it is not yet a runtime-validated end-to-end desktop-login claim. ## Status Matrix @@ -48,8 +51,15 @@ next work target. The repo has not yet started systematic runtime validation. | Mesa EGL+GBM+GLES2 | **builds** | Software path via LLVMpipe proven in QEMU; hardware path not proven | | libdrm amdgpu | **builds** | Package-level success only | | Input stack | **builds, enumerates** | evdevd, libevdev, libinput, seatd present; evdevd registers scheme at boot | -| D-Bus | **builds, usable (bounded)** | System bus wired in `redbear-full` and `redbear-kde`; D-Bus plan + sessiond complete (DB-1), Qt 6.11 D-Bus coverage documented (Section 14), DB-2/3/4 service daemons implemented as stubs (notifications, upower, udisks, polkit) | -| redbear-sessiond | ✅ Scaffold | org.freedesktop.login1 D-Bus session broker — Rust daemon (zbus 5), config wired in redbear-kde.toml, acpi_watcher with edge detection | +| D-Bus | **builds, usable (bounded)** | System bus wired in `redbear-full`; D-Bus plan + sessiond complete (DB-1), Qt 6.11 D-Bus coverage documented (Section 14), DB-2/3/4 service daemons implemented as stubs (notifications, upower, udisks, polkit) | +| redbear-sessiond | **builds, scaffold** | org.freedesktop.login1 D-Bus session broker — Rust daemon (zbus 5), wired on the `redbear-full` desktop path; now includes runtime control updates used by the greeter/auth session handoff | +| redbear-authd | **builds** | Privileged local-user auth daemon; `/etc/passwd`/`/etc/shadow`/`/etc/group` parsing, SHA-256/SHA-512 crypt verification, bounded lockout, target-side recipe build proven | +| redbear-session-launch | **builds** | User-session bootstrap tool; runtime-dir/env setup, uid/gid handoff, dbus-run-session → `redbear-kde-session`, target-side recipe build proven | +| redbear-greeterd | **builds, experimental** | Root-owned greeter orchestrator; UI/auth socket protocol, bounded restart policy, return-to-greeter daemon logic, crate tests pass; end-to-end runtime proof still pending | +| redbear-greeter UI | **builds, experimental** | Qt6/QML unprivileged login surface now ships in-tree; bounded runtime proof remains narrower than a full trusted KDE desktop-login claim | +| redbear-validation-session | **builds, bounded helper** | Still staged as a validation launcher/helper, but no longer the primary `redbear-full` display-service owner | +| Greeter runtime checker | ✅ implemented (bounded checker) | `redbear-greeter-check` asserts greeter binaries, assets, service files, socket reachability, hello protocol, invalid-login handling, and a validation-only successful-login/session-return loop inside the guest; current graphical runtime proof is still blocked below the greeter slice by guest-side Qt shared-plugin parsing | +| Greeter QEMU harness | ✅ implemented (bounded harness) | `test-greeter-qemu.sh` boots `redbear-full`, logs in on the fallback console, and runs the in-guest greeter checker for hello, invalid-login, and bounded successful-login return-to-greeter proof; the compositor leg is presently blocked by guest-side Qt plugin loader failure rather than missing greeter artifacts | | redbear-notifications | ✅ Scaffold | org.freedesktop.Notifications — logs to stderr, no display integration yet | | redbear-upower | ✅ bounded real | org.freedesktop.UPower — enumerates real AC adapters/batteries from `/scheme/acpi/power`; desktop machines with no battery report line power only | | redbear-udisks | ✅ bounded real | org.freedesktop.UDisks2 — enumerates real `disk.*` schemes and partitions into read-only D-Bus objects; no fabricated mount/serial metadata | @@ -61,36 +71,37 @@ next work target. The repo has not yet started systematic runtime validation. | GPU acceleration | **blocked** | PRIME/DMA-BUF ioctls and bounded private CS surface implemented; real vendor render CS/fence path still missing | | validation compositor runtime | **experimental** | Reaches early init in QEMU; no complete session | | validation profile | **builds, boots** | Bounded Wayland runtime profile | -| `redbear-full` profile | **builds, boots** | Broader desktop plumbing profile | -| `redbear-kde` profile | **builds** | Tracked KWin desktop-direction profile | -| `redbear-live` profile | **builds** | Live image following the tracked KWin desktop target | +| `redbear-full` profile | **builds, boots** | Active desktop/graphics compile surface; now owns the experimental greeter/auth/session-launch integration path | +| `redbear-live-full` profile | **builds** | Live image following the active desktop/graphics target | +| `redbear-mini` profile | **builds** | Minimal non-desktop compile target | +| `redbear-live-mini` profile | **builds** | Minimal live image target | ## Profile View -### Validation profile - -- **Role:** Phase 2 Wayland compositor validation target -- **Current truth:** Builds and boots in QEMU; bounded compositor initialization reaches early init but no complete session -- **Use for:** Compositor/runtime regression testing, not broad desktop claims - ### `redbear-full` -- **Role:** Broader desktop/network/session plumbing -- **Current truth:** Carries D-Bus and broader integration pieces; VirtIO networking works in QEMU, and the bounded Phase 5 network/session checker is evidence-backed there -- **Use for:** Desktop integration testing beyond the narrow Wayland slice +- **Role:** Active desktop/graphics compile target and current greeter-integration surface +- **Current truth:** Carries D-Bus, sessiond, broader integration pieces, and the experimental Red Bear-native greeter/auth/session-launch stack; VirtIO networking works in QEMU, the bounded Phase 5 network/session checker is evidence-backed there, and the repo now includes a bounded greeter checker/harness for the login surface. `redbear-validation-session` remains staged only as a bounded helper, not the active `20_display.service` owner on this target. +- **Use for:** Desktop integration testing, greeter/login bring-up, and bounded desktop/network plumbing validation - **Do not overclaim:** This profile proves bounded QEMU desktop/network plumbing only. It does not by itself close the Wi-Fi implementation plan's later real-hardware Phase W5 reporting/recovery gate. -### `redbear-kde` +### `redbear-live-full` -- **Role:** Phase 3–4 KDE/Plasma session bring-up -- **Current truth:** Carries KWin/session wiring and KDE-facing package set; experimental but selected as the tracked default desktop target -- **Use for:** KDE session surface testing once Phase 2 completes +- **Role:** Live/demo/recovery image layered on the active desktop target +- **Current truth:** Follows `redbear-full`; desktop/graphics-capable live image, but the greeter/login surface remains experimental until end-to-end proof exists +- **Use for:** Demo, install, and bounded live-media validation on the current desktop surface -### `redbear-live` +### `redbear-mini` -- **Role:** Live/demo/recovery image layered on the tracked desktop profile -- **Current truth:** Inherits `redbear-kde`, so live media now follows the tracked KWin desktop target -- **Use for:** Demo, install, and recovery workflows based on the current shipped desktop surface +- **Role:** Minimal non-desktop target +- **Current truth:** No desktop/graphics path; recovery and non-desktop integration surface only +- **Use for:** Minimal runtime bring-up, subsystem validation, and non-desktop packaging checks + +### `redbear-live-mini` + +- **Role:** Minimal live image target +- **Current truth:** No desktop/graphics path; live/recovery-oriented minimal image surface +- **Use for:** Minimal live boot and recovery workflows ## Current Blockers @@ -104,7 +115,31 @@ Phase 1 exists specifically to close this gap. A bounded compositor initialization reaches early startup but does not complete a usable Wayland compositor session. This blocks all desktop session work. -### 3. KWin reduced build is now dependency-honest, but runtime proof is still missing (Phase 3 gate) +### 3. Greeter/login path now exists, but runtime proof is still missing (desktop-login gate) + +The repo now carries the main non-visual pieces of the Red Bear-native greeter/login plan: + +- `redbear-authd` +- `redbear-session-launch` +- `redbear-greeterd` +- `redbear-greeter-services.toml` +- `redbear-greeter-check` +- `test-greeter-qemu.sh` + +Current truth for that slice: + +| Piece | Current state | Remaining limitation | +|---|---|---| +| `redbear-authd` | Target-side recipe build proven; unit tests cover passwd/shadow parsing, SHA-crypt verification, lockout, approval checks | No bounded in-guest login proof yet | +| `redbear-session-launch` | Target-side recipe build proven; unit tests cover env/runtime-dir/argument handling | Real session handoff still depends on full greeter/runtime proof | +| `redbear-greeterd` | Crate tests cover protocol-facing state strings, installed asset paths, bounded restart policy, and now own successful-login session launch directly after response delivery | Full desktop-login trust still depends on wider KDE runtime proof plus the unresolved guest-side Qt plugin-loader defect | +| Greeter validation helpers | `redbear-greeter-check` + `test-greeter-qemu.sh` exist and are wired for bounded runtime proof | The successful-login path is validation-only and does not replace broader KDE session proof; current graphical proof is blocked by guest-side Qt plugin parsing rather than by greeter protocol/packaging gaps | +| `redbear-greeter` packaging | Builds in-tree | Qt/QML UI binary, compositor wrapper, and branded assets are packaged; broader runtime trust still remains experimental because the guest-side Qt plugin loader currently rejects shared platform plugins (`libqminimal.so`, KWin QPA) as invalid ELF during metadata scan | + +This means Red Bear now has a credible **build-visible login boundary**, but not yet a runtime-trusted +graphical login surface. + +### 4. KWin reduced build is now dependency-honest, but runtime proof is still missing (desktop-session gate) The reduced KWin path now builds with honest provider linkage for `libepoxy`, `lcms2`, `libudev`, and `libdisplay-info`. @@ -121,7 +156,7 @@ Current truth for that slice: Additionally, two packages still need more honest session-ready treatment: kirigami (stub-only), kf6-kio (heavy shim). -### 4. Hardware acceleration missing GPU CS ioctl (Phase 5 gate) +### 5. Hardware acceleration missing GPU CS ioctl (Phase 5 gate) PRIME/DMA-BUF buffer sharing is implemented at the scheme level, and a bounded private CS surface now exists for shared-contract work. Real vendor render command submission and shared @@ -139,9 +174,10 @@ exercised on real Intel and AMD hardware. |---|---| | `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` | Canonical desktop path plan (v2.0, Phase 1–5) | | This document | Current build/runtime truth summary | +| `local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md` | Canonical GPU/DRM execution plan beneath the desktop path | | `local/docs/QT6-PORT-STATUS.md` | Qt/KF6/KWin package-level build status | | `local/docs/AMD-FIRST-INTEGRATION.md` | AMD-specific hardware/driver detail | -| `docs/03-WAYLAND-ON-REDOX.md` | Historical Wayland design rationale | +| `local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` | Canonical Wayland subsystem plan | | `docs/05-KDE-PLASMA-ON-REDOX.md` | Historical KDE design rationale | | `local/docs/PROFILE-MATRIX.md` | Profile roles and support-language reference | @@ -149,8 +185,9 @@ exercised on real Intel and AMD hardware. The Red Bear desktop stack has crossed major build-side gates: - All Qt6 core modules, all 32 KF6 frameworks, Mesa EGL/GBM/GLES2, and D-Bus build -- Three tracked desktop profiles exist and at least boot in QEMU +- Four supported compile targets exist, with desktop/graphics on `redbear-full` and `redbear-live-full` +- the non-visual Red Bear-native greeter/login pieces now build and test - relibc compatibility is materially stronger than before -The remaining work is **runtime validation, session assembly, and the remaining KDE session/runtime proof work**. -Phase 1 (Runtime Substrate Validation) remains the immediate next target, while the KWin reduced path still lacks runtime compositor/session proof. +The remaining work is **runtime validation, greeter/UI completion, session assembly, and the remaining KDE session/runtime proof work**. +Phase 1 (Runtime Substrate Validation) remains the immediate broad target, while the new greeter/login path and the KWin reduced path both still need bounded runtime proof before stronger claims are safe. diff --git a/local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md b/local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md index ee380645..134a9045 100644 --- a/local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md +++ b/local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md @@ -7,7 +7,6 @@ **Supersedes as planning authority:** - `local/docs/AMD-FIRST-INTEGRATION.md` for forward execution order -- `local/docs/P2-AMD-GPU-DISPLAY.md` for future-task sequencing - `local/docs/HARDWARE-3D-ASSESSMENT.md` for roadmap ordering - `local/docs/DMA-BUF-IMPROVEMENT-PLAN.md` for PRIME/render dependency ordering @@ -54,7 +53,7 @@ The repo has real progress in shared DRM/KMS, GEM, PRIME, firmware plumbing, int | KMS ioctl surface | Implemented in shared scheme layer | `local/recipes/gpu/redox-drm/source/src/scheme.rs` | | GEM allocation and mapping | Implemented in shared scheme and GEM manager | `local/recipes/gpu/redox-drm/source/src/gem.rs`, `local/recipes/gpu/redox-drm/source/src/scheme.rs` | | PRIME and DMA-BUF style sharing | Implemented at scheme level | `local/docs/HARDWARE-3D-ASSESSMENT.md`, `local/docs/DMA-BUF-IMPROVEMENT-PLAN.md`, `local/recipes/gpu/redox-drm/source/src/scheme.rs` | -| AMD display backend | Build-visible, firmware-aware, interrupt-aware | `local/recipes/gpu/redox-drm/source/src/drivers/amd/mod.rs`, `local/recipes/gpu/amdgpu/source/amdgpu_redox_main.c` | +| AMD display backend | Build-visible on the bounded retained path, firmware-aware, interrupt-aware | `local/recipes/gpu/redox-drm/source/src/drivers/amd/mod.rs`, `local/recipes/gpu/amdgpu/source/amdgpu_redox_main.c` | | Intel display backend | Build-visible, GGTT and ring scaffolding present | `local/recipes/gpu/redox-drm/source/src/drivers/intel/mod.rs`, `.../intel/ring.rs` | | Mesa userland base | Builds with EGL, GBM, OSMesa, software Gallium path | `recipes/libs/mesa/recipe.toml` | @@ -255,6 +254,8 @@ quirk path. Do not use the Linux quirk extractor as a substitute for PCI naming - the raw `(crtc_id, vblank_count)` IRQ tuple path has been replaced with a small shared driver-event model for internal driver → main loop → scheme transport. - `scheme.rs` now owns event ingestion through a shared helper, so page-flip retirement remains tied to explicit vblank events while non-vblank events do not pretend to be render completion. - both Intel and AMD now forward shared hotplug events through the same internal event path instead of backend-specific side handling. +- `scheme.rs` now turns shared hotplug and vblank events into a queued scheme-visible `EVENT_READ` surface for `card0`, and hotplug also targets the matching connector handle. +- unit tests now cover card-level hotplug readiness, connector-targeted hotplug readiness, queued vblank delivery, and event draining, while preserving the rule that non-vblank events do not retire pending page flips. - this is structural groundwork only; real fence objects, sync waits, and backend-proven render completion semantics are still not implemented. ### Workstream C, Intel backend maturation @@ -276,7 +277,7 @@ quirk path. Do not use the Linux quirk extractor as a substitute for PCI naming ### Workstream D, AMD backend maturation -**Goal:** Turn the AMD path from code-complete display work plus amdgpu port scaffolding into an evidence-backed modern AMD track. +**Goal:** Turn the AMD path from a bounded retained display build plus broader imported amdgpu/DC triage into an evidence-backed modern AMD track. **Tasks:** diff --git a/local/docs/GREETER-LOGIN-IMPLEMENTATION-PLAN.md b/local/docs/GREETER-LOGIN-IMPLEMENTATION-PLAN.md new file mode 100644 index 00000000..d6cfc472 --- /dev/null +++ b/local/docs/GREETER-LOGIN-IMPLEMENTATION-PLAN.md @@ -0,0 +1,876 @@ +# Red Bear OS Greeter / Login Implementation Plan + +**Version:** 1.0 — 2026-04-19 +**Status:** Active plan with experimental implementation in progress on `redbear-full` +**Scope:** Red Bear-native graphical greeter, authentication boundary, and session handoff for the KDE-on-Wayland desktop path +**Parent plans:** `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` (v2.0), `local/docs/DBUS-INTEGRATION-PLAN.md` + +--- + +## Table of Contents + +1. [Executive Summary](#1-executive-summary) +2. [Scope and Non-Goals](#2-scope-and-non-goals) +3. [Evidence Model](#3-evidence-model) +4. [Current State Assessment](#4-current-state-assessment) +5. [Decision Record: Login-Manager Direction](#5-decision-record-login-manager-direction) +6. [Architecture Principles](#6-architecture-principles) +7. [Architecture Design](#7-architecture-design) +8. [Component Specifications](#8-component-specifications) +9. [Protocols and Session Contracts](#9-protocols-and-session-contracts) +10. [Phased Implementation](#10-phased-implementation) +11. [Testing and Validation](#11-testing-and-validation) +12. [Risks and Mitigations](#12-risks-and-mitigations) +13. [Relationship to Other Plans](#13-relationship-to-other-plans) +14. [File and Recipe Inventory](#14-file-and-recipe-inventory) +15. [Open Questions](#15-open-questions) + +--- + +## 1. Executive Summary + +Red Bear OS currently has enough session substrate to start **one fixed KDE Wayland session**, but it does +not yet have a real graphical login path. + +What exists today: + +- `dbus-daemon` on the system bus +- `redbear-sessiond` exposing a minimal `org.freedesktop.login1` subset for KWin +- `seatd` as the seat/libseat backend +- a direct session launcher (`redbear-kde-session`) that starts `kwin_wayland` +- fallback text `getty` surfaces on VT2 and `/scheme/debug/no-preserve` + +What does **not** exist today: + +- no display manager +- no graphical greeter +- no authentication daemon +- no session-launch privilege boundary +- no PAM-backed or systemd-logind-shaped login stack + +This plan defines the forward path for the missing layer: + +1. **Do not adopt SDDM first.** Upstream KDE convention points to SDDM, but the current Red Bear + session/auth substrate is not yet shaped like a conventional Linux desktop-login environment. +2. **Build a Red Bear-native minimal greeter/login path first.** The system should present one + graphical login surface for one session only: **KDE on Wayland**. +3. **Keep the architecture narrow.** Separate: + - `redbear-sessiond` → login1/session compatibility for KWin + - `redbear-greeter` → login UX and session orchestration + - `redbear-authd` → credential verification and privilege boundary + - `redbear-session-launch` → user-session bootstrap only + +This plan intentionally avoids generic display-manager scope. Red Bear wants **one desktop direction**, +not a multi-session desktop-manager framework. + +--- + +## 2. Scope and Non-Goals + +### 2.1 In Scope + +- One graphical login surface for the Red Bear KDE-on-Wayland desktop path +- A Red Bear-native greeter daemon, greeter UI, authentication daemon, and session launcher +- Integration with existing `dbus-daemon`, `redbear-sessiond`, `seatd`, `inputd`, and `redbear-kde-session` +- Explicit VT ownership and handoff on the desktop VT +- A narrow local-user authentication model backed by `/etc/passwd`, `/etc/shadow`, and `/etc/group` +- Branding integration using Red Bear assets from `local/Assets/` +- Packaging and config wiring under `local/recipes/system/` and tracked `config/redbear-*.toml` + +This plan applies only to the **graphical desktop path**. It does **not** replace console-first or +minimal non-desktop configurations. Existing text and debug console surfaces remain part of the +recovery model. + +### 2.2 Out of Scope + +- X11 login surfaces +- multiple desktop environments +- session chooser UI +- remote authentication +- PAM/NSS plugin ecosystems +- LDAP/SSO/smartcard/fingerprint login +- graphical lock screen / unlock manager +- full Plasma session-manager semantics (`ksmserver`, multi-user desktop switching) + +### 2.3 Policy Assumption + +This plan assumes the Red Bear desktop direction converges on **one KDE-on-Wayland path**. + +Current implementation answer: the first tracked owner is `redbear-full` (and therefore +`redbear-live-full` for live media). Older names such as `redbear-kde` may still appear in +historical or staging material, but they are not the supported compile-target surface for this plan. + +--- + +## 3. Evidence Model + +This plan uses the same evidence language as the canonical desktop plan. + +| Class | Meaning | Safe to say | Not safe to say | +|---|---|---|---| +| **builds** | Package compiles and stages | "builds" | "works" | +| **boots** | Image reaches prompt or known runtime surface | "boots" | "desktop works" | +| **enumerates** | Service/register surface appears and answers basic queries | "enumerates" | "usable end to end" | +| **usable** | Bounded runtime path performs its intended task | "usable for this path" | "broadly stable" | +| **validated** | Repeated proof on the intended target class | "validated" | "complete everywhere" | +| **experimental** | Partial, scaffolded, or unproven | "experimental" | "done" | + +Rules: + +- A greeter binary that compiles is only **builds**. +- A VM image that reaches a graphical login surface is only **boots**. +- A greeter that hands off to KDE on Wayland in bounded QEMU proof is **usable (bounded)**. +- Nothing is **validated** until it repeats reliably on the intended target class. + +--- + +## 4. Current State Assessment + +### 4.1 What Exists and Works + +| Component | Location | Status | Detail | +|---|---|---|---| +| system D-Bus | `config/redbear-full.toml` | ✅ usable (bounded) | `12_dbus.service` starts `dbus-daemon --system` on the active desktop target | +| login1 compatibility | `local/recipes/system/redbear-sessiond/` | ✅ scaffold | Minimal `org.freedesktop.login1` broker for KWin | +| seat backend | `config/redbear-full.toml` | ✅ builds, wired | `13_seatd.service`; session env exports `LIBSEAT_BACKEND=seatd` | +| display VT activation | `29_activate_console.service` in desktop configs | ✅ usable (bounded) | `inputd -A 3` activates desktop VT | +| fallback text login | `30_console.service` | ✅ boots | `getty 2` on VT2 | +| debug console | `31_debug_console.service` | ✅ boots | `getty /scheme/debug/no-preserve -J` | +| direct KDE session launcher | `/usr/bin/redbear-kde-session` | ✅ builds, experimental | Starts session bus if needed, then `exec kwin_wayland --replace` | +| authentication daemon | `local/recipes/system/redbear-authd/` | ✅ builds, experimental | Local-user auth boundary with `/etc/passwd` / `/etc/shadow` / `/etc/group` parsing and SHA-crypt verification | +| session launcher boundary | `local/recipes/system/redbear-session-launch/` | ✅ builds, experimental | User-session bootstrap with bounded environment/runtime-dir setup | +| greeter daemon scaffold | `local/recipes/system/redbear-greeter/` | ✅ builds, experimental | Root-owned greeter orchestrator, socket protocol, bounded restart policy | +| greeter config fragment | `config/redbear-greeter-services.toml` | ✅ builds, experimental | Adds `19_redbear-authd.service`, `20_greeter.service`, compatibility `20_display.service`, and fallback console dependencies | +| bounded validation launcher | `/usr/bin/redbear-validation-session` | ✅ retained helper | Still available for older bounded validation flows, but no longer the primary `redbear-full` display-service path | +| branding assets | `local/Assets/images/` | ✅ present | `Red Bear OS loading background.png`, `Red Bear OS icon.png` | + +### 4.2 What Exists But Is Incomplete + +| Component | Status | Gap | +|---|---|---| +| `redbear-sessiond` seat switching | ⚠️ scaffold | `Seat.SwitchTo` is currently logged/delegated externally to `inputd -A` | +| KDE runtime services | ⚠️ partial | D-Bus substrate exists, but broader Plasma session services remain incomplete | +| `redbear-full` greeter flow | ⚠️ experimental | Non-visual pieces are implemented, but final packaged UI and bounded runtime proof are still pending | +| greeter runtime validation | ⚠️ partial | `redbear-greeter-check` + `test-greeter-qemu.sh` exist, but final proof still depends on the packaged greeter UI | + +### 4.3 What Does Not Exist + +| Missing piece | Why it matters | +|---|---| +| packaged graphical greeter UI | no complete user-visible graphical login surface is staged yet | +| bounded end-to-end login proof | build-side pieces exist, but runtime-trusted login/session handoff is not proven yet | +| shared login protocol extraction | current protocol is encoded directly in the first-cut daemon/checker implementations | +| display-manager package integration | no SDDM/greetd/lightdm/ly path in repo | + +### 4.4 Baseline Conclusion + +The current Red Bear desktop path can **start a session**, but it cannot yet **own a login flow**. + +The missing work is not “port more KDE packages.” The missing work is the **login boundary**: + +1. graphical greeter surface, +2. authentication boundary, +3. session-launch privilege drop, +4. clean handoff into the existing KDE Wayland session path. + +That login boundary must be added **without** replacing the current seat/session substrate and +without removing existing console recovery paths. + +--- + +## 5. Decision Record: Login-Manager Direction + +### 5.1 Recommendation + +The best fit for Red Bear OS **today** is a **Red Bear-native minimal single-session greeter/launcher**. + +This is closer in class to **greetd-style minimal orchestration** than to **SDDM-style full desktop +manager behavior**, but the forward path should be **Red Bear-specific**, not a generic Linux deployment. + +### 5.2 Why Not SDDM First + +SDDM is the standard answer for a conventional KDE distribution, but Red Bear is not yet a conventional +Linux-shaped session/auth environment. + +The repo evidence today shows: + +- no SDDM integration, +- no PAM path, +- no mature general-purpose display-manager substrate, +- a deliberately minimal `login1` compatibility layer, +- a fixed single desktop direction. + +Adopting SDDM first would force Red Bear to emulate a broader environment before the current narrower +session path is runtime-trusted. + +### 5.3 Ranked Direction + +| Rank | Direction | Verdict | +|---|---|---| +| 1 | Red Bear-native minimal greeter | **Primary** | +| 2 | Current direct session launcher | bring-up baseline only | +| 3 | SDDM-class integration | future option after session/auth substrate matures | +| 4 | GDM/LightDM/elogind-shaped path | reject | + +### 5.4 Future Revisit Trigger + +Revisit SDDM-class integration only if Red Bear later decides it needs: + +- richer multi-user semantics, +- session chooser behavior, +- broader desktop-manager policy surface, +- significantly fuller login/session accounting than the current `redbear-sessiond` contract. + +--- + +## 6. Architecture Principles + +### 6.1 One Desktop, One Session Path + +The greeter must launch exactly one session target: + +- **KDE on Wayland** + +There is no session chooser in v1. + +### 6.2 Keep the Existing Session Substrate + +Reuse existing pieces rather than replacing them: + +- `dbus-daemon` +- `redbear-sessiond` +- `seatd` +- `inputd` +- `redbear-kde-session` + +The greeter layer sits **above** them. + +`seatd` remains the **seat/device authority** for this design. The greeter stack consumes the existing +seat/libseat path; it does not introduce a second seat/session-manager authority. + +### 6.3 Separate Login UX, Authentication, and Session Bootstrap + +Do not collapse these roles into one process. + +| Component | Responsibility | +|---|---| +| `redbear-sessiond` | login1/session compatibility for KWin | +| `redbear-greeterd` | login flow orchestration | +| `redbear-greeter-ui` | graphical login UX | +| `redbear-authd` | credential verification and privilege boundary | +| `redbear-session-launch` | drop privileges, set env, start user session | + +`redbear-sessiond` is therefore **not** the login/auth/session-launch authority. It remains the +KWin-facing session compatibility broker defined by the D-Bus plan. + +### 6.4 Avoid a PAM Clone + +Red Bear should not build a new generic PAM/NSS plugin ecosystem merely to satisfy display-manager +expectations. For this path, use a narrow local account model first. + +### 6.5 Stop-and-Start Handoff Is Acceptable + +The greeter and the user session do not need an in-place seamless transition in v1. + +It is acceptable to: + +1. stop the greeter UI, +2. start the user session cleanly, +3. return to the greeter after session exit. + +### 6.6 Branding Is Part of the Product Surface + +Use committed Red Bear assets as the default greeter look. + +Source-of-truth art files in the repo are: + +- background: `local/Assets/images/Red Bear OS loading background.png` +- icon: `local/Assets/images/Red Bear OS icon.png` + +At runtime, the greeter must use **installed asset paths**, not source-tree paths. + +--- + +## 7. Architecture Design + +### 7.1 Stack Overview + +```text +┌────────────────────────────────────────────────────────────────────┐ +│ KDE Wayland user session │ +│ redbear-kde-session → kwin_wayland → later Plasma services │ +├────────────────────────────────────────────────────────────────────┤ +│ redbear-session-launch │ +│ drop privileges, set env, start session bus, exec session │ +├────────────────────────────────────────────────────────────────────┤ +│ redbear-authd redbear-greeter-ui │ +│ local auth + privilege boundary Qt6/QML login surface │ +├────────────────────────────────────────────────────────────────────┤ +│ redbear-greeterd │ +│ login state machine, VT3 ownership, auth/session orchestration │ +├────────────────────────────────────────────────────────────────────┤ +│ dbus-daemon --system redbear-sessiond seatd inputd │ +├────────────────────────────────────────────────────────────────────┤ +│ Redox schemes / system services │ +│ scheme:input, scheme:acpi, scheme:drm, debug scheme, etc. │ +└────────────────────────────────────────────────────────────────────┘ +``` + +### 7.2 Boot-to-Login Sequence + +```text +boot + → 12_dbus.service (system D-Bus) + → 13_redbear-sessiond.service (login1 subset) + → 13_seatd.service (seat backend) + → 20_greeter.service (start redbear-greeterd on VT3) + → 29_activate_console.service (inputd -A 3) + → 30_console.service (fallback getty 2 on VT2) + → 31_debug_console.service (debug getty) + → redbear-greeter-ui shows login surface on VT3 + → successful login + → redbear-session-launch + → dbus-run-session -- redbear-kde-session + → kwin_wayland +``` + +### 7.3 Session Return Path + +```text +user session exits or crashes + → redbear-greeterd observes session root exit + → greeter-specific cleanup + → reactivate VT3 + → respawn redbear-greeter-ui + → return to login surface +``` + +### 7.4 Why This Shape Fits the Repo + +- matches existing `VT=3` display path +- preserves fallback text login on VT2 +- reuses `redbear-sessiond` instead of replacing it +- does not assume a broader Linux-style session-manager stack than the repo currently has +- avoids dead-end graphical boot behavior by preserving text/debug fallback paths + +--- + +## 8. Component Specifications + +### 8.1 `redbear-greeterd` + +**Type:** root-owned orchestrator daemon + +**Responsibilities:** + +- own the login state machine, +- own greeter/UI lifecycle, +- talk to `redbear-authd`, +- start the user session via `redbear-session-launch`, +- monitor the session root process, +- return to greeter after logout/session crash. + +**Must not do:** + +- parse or verify passwords directly unless `redbear-authd` is intentionally collapsed into it, +- render the login UI, +- absorb generic session-manager policy. + +### 8.2 `redbear-greeter-ui` + +**Type:** unprivileged Qt6/QML frontend + +**Responsibilities:** + +- render Red Bear background and icon, +- collect username/password, +- present Login / Shutdown / Reboot, +- show bounded status (`Authenticating`, `Login failed`, `Starting session`). + +**Must not do:** + +- read `/etc/shadow`, +- own power/device/session policy, +- choose alternate desktop sessions. + +### 8.3 `redbear-authd` + +**Type:** privileged authentication daemon + +**Responsibilities:** + +- read local user data, +- verify password hashes, +- check lock/disable rules, +- perform narrow privileged actions (`login`, optional `shutdown`, optional `reboot`), +- spawn `redbear-session-launch` for a verified user. + +**Must not do:** + +- own the greeter UI, +- own compositor startup policy, +- become a general identity platform. + +`redbear-authd` is the **only** component in this plan allowed to read password-hash data +(` /etc/shadow`-equivalent runtime content). Neither UI nor session launcher may touch it. + +### 8.4 `redbear-session-launch` + +**Type:** small bootstrap tool + +**Responsibilities:** + +- create/fix `XDG_RUNTIME_DIR`, +- drop to target uid/gid and supplementary groups, +- construct a minimal KDE/Wayland environment, +- launch the user session bus, +- exec `redbear-kde-session`. + +`redbear-session-launch` is intentionally thin. It must not duplicate KDE session policy already owned +by `redbear-kde-session`. + +### 8.5 `redbear-sessiond` + +This plan does **not** replace `redbear-sessiond`. + +It remains responsible for: + +- `org.freedesktop.login1` subset for KWin, +- session/seat compatibility surface, +- bounded power/sleep integration already assigned in the D-Bus plan. + +--- + +## 9. Protocols and Session Contracts + +### 9.1 UI ↔ Greeter Daemon Protocol + +Transport: + +- Unix socket at `/run/redbear-greeterd.sock` +- JSON messages, versioned + +Minimum message set: + +```json +{ "type": "hello", "version": 1 } +{ "type": "submit_login", "username": "alice", "password": "secret" } +{ "type": "request_shutdown" } +{ "type": "request_reboot" } +``` + +Example reply: + +```json +{ "type": "hello_ok", "background": "/usr/share/redbear/greeter/background.png", "icon": "/usr/share/redbear/greeter/icon.png", "session_name": "KDE on Wayland" } +``` + +### 9.2 Greeter Daemon ↔ Auth Daemon Protocol + +Transport: + +- Unix socket at `/run/redbear-authd.sock` +- JSON messages, versioned + +Minimum message set: + +```json +{ "type": "authenticate", "request_id": 17, "username": "alice", "password": "secret", "vt": 3 } +{ "type": "start_session", "request_id": 17, "username": "alice", "session": "kde-wayland" } +``` + +### 9.3 State Machine + +`redbear-greeterd` uses this state set: + +1. `Starting` +2. `GreeterReady` +3. `Authenticating` +4. `LaunchingSession` +5. `SessionRunning` +6. `ReturningToGreeter` +7. `PowerAction` +8. `FatalError` + +Rules: + +- one greeter UI process at a time, +- one session launch in flight, +- one supported session only: `kde-wayland`, +- greeter UI never survives into `SessionRunning`. + +### 9.4 Local Account Storage Contract + +Use a simple Unix-like model first: + +- `/etc/passwd` +- `/etc/shadow` +- `/etc/group` + +This plan explicitly rejects inventing a new account database format for v1. + +The local-account model is a **runtime contract**. Source-tree examples or provisioning helpers may live +elsewhere, but the greeter/auth path must interact only with installed runtime account files. + +### 9.5 Session-Launch Environment + +`redbear-session-launch` should set a minimal explicit environment: + +- `HOME` +- `USER` +- `LOGNAME` +- `SHELL` +- `PATH=/usr/bin:/bin` +- `XDG_RUNTIME_DIR=/run/user/$UID` +- `XDG_SESSION_TYPE=wayland` +- `XDG_CURRENT_DESKTOP=KDE` +- `KDE_FULL_SESSION=true` +- `WAYLAND_DISPLAY=wayland-0` +- `XDG_SEAT=seat0` +- `XDG_VTNR=3` +- `LIBSEAT_BACKEND=seatd` +- `SEATD_SOCK=/run/seatd.sock` + +Preferred launch form: + +```text +dbus-run-session -- redbear-kde-session +``` + +If `dbus-run-session` proves unreliable on Red Bear, use the current `dbus-launch` pattern as a bounded +fallback. + +### 9.6 Branding Contract + +Stage the current assets at stable runtime paths: + +- `/usr/share/redbear/greeter/background.png` +- `/usr/share/redbear/greeter/icon.png` + +Use: + +- `Red Bear OS loading background.png` as the full-screen wallpaper +- `Red Bear OS icon.png` above the login form + +The greeter runtime must reference only the installed `/usr/share/redbear/greeter/*` paths. +`local/Assets/...` remains the source-of-truth location in the repo, not a runtime lookup path. + +### 9.7 Failure and Fallback Contract + +Greeter failure must never create a dead-end boot surface. + +Required behavior: + +- VT2 `getty` remains available as text recovery, +- debug `getty` remains available, +- greeter failures return control to a recoverable state, +- repeated greeter/UI restart failures must stop escalating after a bounded retry count, +- the system must prefer a reachable fallback console over an infinite graphical restart loop. + +--- + +## 10. Phased Implementation + +### Phase G0 — Scope Freeze and Wiring Baseline + +**Goal:** Freeze the architectural split and identify the tracked desktop profile(s) that will own the +greeter path. + +| # | Task | Acceptance criteria | +|---|---|---| +| G0.1 | Freeze component boundaries | `sessiond`, `greeterd`, `authd`, `session-launch` responsibilities documented without overlap | +| G0.2 | Freeze single-session policy | Only `kde-wayland` is named as the supported graphical session | +| G0.3 | Freeze branding inputs | Runtime asset paths and source asset files documented | + +**Exit criteria:** + +- architecture split is documented, +- session policy is explicit, +- asset source of truth is explicit. + +### Phase G1 — Service Skeleton and Boot Wiring + +**Goal:** Add daemon/package skeletons and init wiring without claiming a usable login flow. + +| # | Task | Acceptance criteria | +|---|---|---| +| G1.1 | Create recipe skeletons | `redbear-greeter`, `redbear-authd`, `redbear-session-launch`, optional `redbear-login-protocol` build and stage | +| G1.2 | Add config fragment | A tracked config fragment wires `20_greeter.service` and supporting files | +| G1.3 | Replace direct display launch in the chosen profile | Desktop profile starts `redbear-greeterd` instead of directly starting `redbear-kde-session` | +| G1.4 | Keep text/debug recovery path | VT2 `getty` and debug `getty` still boot | + +**Exit criteria:** + +- packages build, +- boot wiring is in place, +- image still boots, +- fallback text surfaces remain reachable. + +### Phase G2 — Auth Foundation + +**Goal:** Prove the local account/authentication boundary independent of the full greeter UI. + +| # | Task | Acceptance criteria | +|---|---|---| +| G2.1 | Implement passwd/shadow parsing | Local users can be parsed from the chosen account files | +| G2.2 | Implement password verification | Valid and invalid credentials are distinguished correctly in tests | +| G2.3 | Implement lock/disable rules | Locked/disabled users are rejected predictably | +| G2.4 | Implement session-spawn authorization boundary | Only `redbear-authd` can approve session launch | +| G2.5 | Implement bounded failure handling | Retry throttling / lockout policy is documented and covered by tests | + +**Exit criteria:** + +- auth parser tests pass, +- credential checks pass, +- negative cases pass, +- no UI process reads auth data, +- repeated auth failure behavior is bounded and explicit. + +### Phase G3 — Greeter UI and Daemon State Machine + +**Goal:** Bring up the graphical greeter surface and daemon orchestration. + +| # | Task | Acceptance criteria | +|---|---|---| +| G3.1 | Start greeter UI on VT3 | QEMU image reaches a Red Bear-branded graphical greeter surface | +| G3.2 | Implement UI/daemon socket protocol | UI can submit login and power requests | +| G3.3 | Implement daemon state machine | State transitions are test-covered for success and failure paths | +| G3.4 | Implement bounded login error UX | Invalid credentials return cleanly to `GreeterReady` | +| G3.5 | Implement failure fallback behavior | Greeter/UI restart failure yields reachable fallback behavior rather than infinite restart | + +**Exit criteria:** + +- greeter surface boots, +- UI/daemon protocol works, +- failure returns to the login screen, +- no session starts yet without auth success, +- fallback console path remains reachable under greeter failure. + +### Phase G4 — Session Handoff to KDE on Wayland + +**Goal:** Replace direct session startup with authenticated session launch. + +| # | Task | Acceptance criteria | +|---|---|---| +| G4.1 | Implement `redbear-session-launch` env/bootstrap path | Session runs with correct uid/gid/groups/runtime dir/env | +| G4.2 | Implement greeter teardown before session launch | Greeter UI exits before KDE session becomes active | +| G4.3 | Implement session-monitor return path | Session exit returns to the greeter | +| G4.4 | Keep bounded D-Bus/sessiond compatibility intact | KWin still sees the required login1 subset | + +**Exit criteria:** + +- successful login reaches `redbear-kde-session`, +- session uses intended env/runtime dir, +- session exit returns to greeter, +- fallback VT2 login still works. + +### Phase G5 — Desktop Integration and Product Surface Hardening + +**Goal:** Move from “bounded login proof” to a product-quality Red Bear login surface. + +| # | Task | Acceptance criteria | +|---|---|---| +| G5.1 | Implement reboot/shutdown path | Greeter can trigger bounded power actions | +| G5.2 | Hardening | rate limiting, buffer clearing, socket permission checks, retry behavior | +| G5.3 | Packaging and profile cleanup | Target desktop profile wiring is canonical and documented | +| G5.4 | Validation tooling | scripted QEMU/runtime proof exists for greeter boot/login/logout loop | + +**Exit criteria:** + +- login loop is repeatable, +- power actions are bounded and explicit, +- hardening checks pass, +- documentation matches shipped surface. + +### Critical Path + +```text +G0 (scope) + → G1 (wiring) + → G2 (auth boundary) + → G3 (greeter surface) + → G4 (session handoff) + → G5 (product hardening) +``` + +--- + +## 11. Testing and Validation + +### 11.1 Unit and Component Tests + +| Component | Tests | +|---|---| +| `redbear-login-protocol` | message encoding/decoding, version checks | +| `redbear-authd` | passwd parsing, shadow parsing, hash verification, lockout logic | +| `redbear-session-launch` | env construction, runtime-dir creation, argument validation | +| `redbear-greeterd` | state transitions, socket protocol handling, session-monitor behavior | +| `redbear-greeter-ui` | smoke only; no auth logic in UI tests | + +### 11.2 Integration Checks + +The first bounded integration proofs should answer these questions in order: + +1. does the image boot to a graphical greeter surface on VT3? +2. does invalid login return to the greeter surface? +3. does valid login reach `redbear-kde-session`? +4. does session exit return to the greeter? +5. do VT2 and debug login remain available as recovery paths? +6. does greeter failure still leave a recoverable console path instead of looping forever? + +### 11.3 Suggested Validation Commands / Harnesses + +This plan expects a bounded QEMU harness similar in style to existing Red Bear runtime proofs. + +Expected future surfaces: + +- `local/scripts/test-greeter-qemu.sh` +- in-target checker such as `redbear-greeter-check` + +The exact script names are implementation details, but the proof style should match existing bounded +runtime validation patterns already used elsewhere in the repo. + +### 11.4 Definition of Done + +This plan is only substantially complete when **all** of the following are true: + +- a Red Bear-branded graphical greeter boots on the tracked KDE desktop path, +- credentials are verified through a narrow privileged boundary, +- valid login reaches KDE on Wayland, +- invalid login returns cleanly to the greeter, +- session exit returns to the greeter, +- VT2 fallback and debug console remain available, +- greeter/UI failure does not trap the machine in an unrecoverable restart loop, +- the bounded login/logout proof repeats reliably on the intended target class. + +--- + +## 12. Risks and Mitigations + +| ID | Risk | Likelihood | Impact | Mitigation | +|---|---|---:|---:|---| +| R1 | `redbear-sessiond` login1 subset proves too thin for stable KWin session ownership | Medium | High | keep greeter plan explicitly dependent on D-Bus/sessiond validation; widen only the needed contract | +| R2 | Auth layer grows into a PAM replacement by accident | Medium | High | freeze v1 to local users + passwd/shadow only | +| R3 | Greeter UI becomes privileged by convenience | Medium | High | keep UI unprivileged and enforce daemon/auth socket boundary | +| R4 | VT/session handoff is flaky on real targets | Medium | High | keep VT2 fallback path and validate QEMU before broader claims | +| R5 | Profile ownership confusion (`redbear-kde` vs `redbear-full`) delays integration | High | Medium | keep profile naming a policy question separate from greeter architecture | +| R6 | Branding/assets are staged inconsistently | Low | Medium | stage stable runtime paths under `/usr/share/redbear/greeter/` | +| R7 | Session launch inherits too much ambient environment | Medium | Medium | start from a clean explicit environment in `redbear-session-launch` | +| R8 | Greeter restart policy creates boot loops | Medium | High | bound retries and prefer console fallback after repeated failures | + +--- + +## 13. Relationship to Other Plans + +| Document | Role relative to this plan | +|---|---| +| `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` | Parent desktop-path authority; this plan fills the graphical login boundary beneath it | +| `local/docs/DBUS-INTEGRATION-PLAN.md` | Parent session/D-Bus authority for `redbear-sessiond` and related service model | +| `local/docs/DESKTOP-STACK-CURRENT-STATUS.md` | Current truth source for what the desktop stack actually builds/boots today | +| `local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` | Wayland/compositor subsystem plan beneath the desktop path | +| `docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md` | Repo-wide product/profile/workstream framing | + +This document does **not** replace any of the above. It fills a missing subsystem-planning gap: +the login/greeter boundary between a booted desktop substrate and a real KDE session surface. + +--- + +## 14. File and Recipe Inventory + +### 14.1 Existing Files This Plan Builds On + +- `config/redbear-full.toml` +- `config/redbear-greeter-services.toml` +- `local/recipes/system/redbear-sessiond/` +- `local/recipes/system/redbear-dbus-services/` +- `local/Assets/images/Red Bear OS loading background.png` +- `local/Assets/images/Red Bear OS icon.png` + +### 14.2 Proposed New Recipe Layout + +```text +local/recipes/system/ +├── redbear-authd/ +├── redbear-session-launch/ +└── redbear-greeter/ +``` + +Current implementation status: + +- `redbear-authd/` — implemented (experimental, target-side recipe build proven) +- `redbear-session-launch/` — implemented (experimental, target-side recipe build proven) +- `redbear-greeter/` — implemented as an experimental bounded surface; daemon, Qt/QML UI, compositor wrapper, staged assets, and bounded runtime checks now exist, while broader KDE runtime trust still remains open +- Current blocker after the greeter/UI packaging work: guest-side Qt shared-plugin loading on Red Bear still rejects platform plugins during metadata scan (`libqminimal.so`, `qwayland-org.kde.kwin.qpa.so`) even though the plugin files are present in the image and their on-disk ELF headers read correctly via non-Qt tools. This blocks the bounded graphical compositor proof below the greeter slice. +- `redbear-login-protocol/` — optional follow-up extraction, not required for the first bounded runtime proof + +### 14.3 Proposed New Runtime Files + +```text +/usr/bin/redbear-greeterd +/usr/bin/redbear-greeter-ui +/usr/bin/redbear-authd +/usr/bin/redbear-session-launch +/usr/share/redbear/greeter/background.png +/usr/share/redbear/greeter/icon.png +/run/redbear-greeterd.sock +/run/redbear-authd.sock +/usr/bin/redbear-greeter-check +``` + +Bounded validation helper currently landed: + +```text +local/scripts/test-greeter-qemu.sh +``` + +### 14.4 Proposed Config Fragment + +This plan expects a tracked config include fragment such as: + +```text +config/redbear-greeter-services.toml +``` + +That fragment should own: + +- package inclusions for greeter/auth/session-launch, +- `20_greeter.service`, +- any bounded init-service overrides needed to replace direct session startup. + +The greeter **recipe**, not the config fragment, should own staged runtime artifacts such as: + +- `/usr/bin/redbear-greeter-ui` +- `/usr/share/redbear/greeter/background.png` +- `/usr/share/redbear/greeter/icon.png` +- compositor/helper payloads that the greeter package installs under `/usr/share/redbear/greeter/` + +--- + +## 15. Open Questions + +1. Which tracked profile should own the canonical desktop greeter path first: + `redbear-kde`, `redbear-full`, or a unified future target? +2. Which password-hash scheme should Red Bear standardize on for v1 local users? +3. Should reboot/shutdown requests go through `redbear-authd` or a separate narrow power helper? +4. Is `dbus-run-session` reliable enough on Red Bear, or should the current `dbus-launch` path remain the first shipped session-bus strategy? +5. At what point should the project consider SDDM-class integration again, if ever? + +Current answer to (1): **`redbear-full` first**, with `redbear-live-full` inheriting that path for +live media. + +Current answer to (2): **traditional `/etc/shadow` SHA-512-crypt / SHA-256-crypt first** (`$6$` / `$5$`), +with narrower support preferred over premature multi-format sprawl. + +Free/libre policy note for (2): the current verifier path uses the pure-Rust `sha-crypt` crate, +which is licensed `MIT OR Apache-2.0`; for Red Bear policy purposes it is treated under the MIT +option, keeping the greeter/login stack within a free/open-source dependency surface. The intended +implementation direction remains **pure-Rust verification crates first**, not `crypt(3)` FFI. + +Current answer to (3): **through `redbear-authd` in the first cut**, to preserve one narrow privileged +boundary until runtime evidence justifies a separate helper. + +Current answer to (4): **`dbus-run-session` remains the preferred first shipped path**, with fallback +conservatism retained in validation/docs until broader runtime proof exists. + +Current answer to (5): **not before the Red Bear-native greeter path is runtime-trusted and the +session/auth substrate is materially stronger than it is today.** diff --git a/local/docs/HARDWARE-3D-ASSESSMENT.md b/local/docs/HARDWARE-3D-ASSESSMENT.md index 64b619bd..31ec358f 100644 --- a/local/docs/HARDWARE-3D-ASSESSMENT.md +++ b/local/docs/HARDWARE-3D-ASSESSMENT.md @@ -42,7 +42,7 @@ GPU hardware (AMD RDNA / Intel Gen) | Component | Status | Lines | What's Implemented | |-----------|--------|-------|-------------------| | DRM/KMS modesetting | ✅ Code complete | ~500 | 16 KMS ioctls, CRTC/connector/encoder/plane | -| AMD Display Core | ✅ Compiles | ~1400 | DC init, CRTC programming, firmware loading, HPD | +| AMD display backend (bounded retained path) | ✅ Builds | ~2 C glue files + Rust FFI surface | Red Bear display glue (`amdgpu_redox_main.c`, `redox_stubs.c`) plus the Rust FFI consumer build; imported Linux AMD DC/TTM/core remain under compile triage | | Intel Display Driver | ✅ Compiles | ~800 | Display pipe, GGTT, forcewake | | GEM buffer management | ✅ Full | ~350 | create/close/mmap with DmaBuffer | | GEM scheme ioctls | ✅ Wired | ~100 | GEM_CREATE, GEM_CLOSE, GEM_MMAP | @@ -167,5 +167,5 @@ platform priority. - `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` — Phase 5 covers hardware GPU enablement - `local/docs/AMD-FIRST-INTEGRATION.md` — AMD-specific GPU driver details -- `local/docs/P2-AMD-GPU-DISPLAY.md` — Display driver code-complete status +- `local/docs/AMDGPU-DC-COMPILE-TRIAGE-PLAN.md` — AMD DC compile-triage and bounded source-set strategy - `docs/04-LINUX-DRIVER-COMPAT.md` — linux-kpi architecture reference diff --git a/local/docs/IOMMU-SPEC-REFERENCE.md b/local/docs/IOMMU-SPEC-REFERENCE.md index c7f39266..231bdb5c 100644 --- a/local/docs/IOMMU-SPEC-REFERENCE.md +++ b/local/docs/IOMMU-SPEC-REFERENCE.md @@ -2,7 +2,7 @@ **Purpose**: Implementation-ready hardware register and data structure reference for Red Bear OS IOMMU support. Based on AMD IOMMU Specification 48882 Rev 3.10 and Intel Virtualization Technology for Directed I/O (VT-d) Rev 5.0. -**Status**: The `iommu` daemon now builds in-tree, but hardware validation is still missing in the AMD-first integration plan (see `AMD-FIRST-INTEGRATION.md`). This document provides the register and data-structure reference for finishing AMD-Vi and Intel VT-d bring-up. +**Status**: The `iommu` daemon now builds in-tree, owns AMD-Vi runtime initialization, and also detects the presence of a kernel ACPI `DMAR` table so Intel VT-d runtime ownership can converge here instead of remaining conceptually stranded in `acpid`. Hardware validation is still missing in the AMD-first integration plan (see `AMD-FIRST-INTEGRATION.md`). This document provides the register and data-structure reference for finishing AMD-Vi and Intel VT-d bring-up. --- diff --git a/local/docs/IRQ-AND-LOWLEVEL-CONTROLLERS-ENHANCEMENT-PLAN.md b/local/docs/IRQ-AND-LOWLEVEL-CONTROLLERS-ENHANCEMENT-PLAN.md index 88c85892..ec5ef666 100644 --- a/local/docs/IRQ-AND-LOWLEVEL-CONTROLLERS-ENHANCEMENT-PLAN.md +++ b/local/docs/IRQ-AND-LOWLEVEL-CONTROLLERS-ENHANCEMENT-PLAN.md @@ -78,6 +78,9 @@ Red Bear OS already has a meaningful low-level controller and interrupt foundati including PCI scheme-entry parsing bounds, I/O BAR port conversion safety, and MSI-X BAR window helper validation. This should be treated as **source + host-test evidence**, not as runtime controller proof. +- `redox-driver-sys` fast PCI enumeration now preserves capability-chain data from config-space + bytes instead of returning empty capability lists, and exposes a quirk-aware interrupt-support + summary (`none` / `legacy` / `msi` / `msix`) for downstream policy convergence. - `redox-drm` already contains a shared interrupt abstraction with MSI-X-first and legacy-IRQ fallback paths for GPU drivers. - The AMD-Vi / Intel VT-d reference material and the in-tree `iommu` daemon establish a serious @@ -328,6 +331,9 @@ Open enhancement items: - the repo now has a bounded PS/2 runtime-proof path via `redbear-phase-ps2-check` and `local/scripts/test-ps2-qemu.sh --check`, which proves serio node presence and a successful handoff into the existing Phase 3 input-path checker inside a guest +- `ps2d` controller init now also drains stale controller output during probe and around the core + init/self-test path, which is the current bounded Red Bear-native equivalent of Linux i8042 flush + discipline before broader PS/2 suspend/resume work exists. ### USB xHCI controller interrupt path diff --git a/local/docs/LINUX-BORROWING-RUST-IMPLEMENTATION-PLAN.md b/local/docs/LINUX-BORROWING-RUST-IMPLEMENTATION-PLAN.md index cbc4609d..cef734ce 100644 --- a/local/docs/LINUX-BORROWING-RUST-IMPLEMENTATION-PLAN.md +++ b/local/docs/LINUX-BORROWING-RUST-IMPLEMENTATION-PLAN.md @@ -12,6 +12,43 @@ This document answers a specific Red Bear question: This plan is intentionally **Red Bear-native**. It does **not** propose importing Linux subsystem architecture into Red Bear. +## Current implementation status snapshot (2026-04-18) + +The software-only, bounded slices from this plan that are now implemented in code are: + +- **Phase A — PCI / IRQ substrate** +- shared PCI config-space parsing now preserves capability chains in `redox-driver-sys` +- shared quirk-aware interrupt support summary exists (`none` / `legacy` / `msi` / `msix`) +- `pcid` now consumes the shared PCI parser in its header path for interrupt-support reporting, + which starts the planned downstream convergence onto the shared substrate instead of keeping all + capability interpretation local. +- **Phase B — ACPI / IOMMU groundwork** + - `acpid` now has an explicit userspace sleep-target model for `S1` / `S3` / `S4` / `S5` + - `_S5` shutdown routes through that model, while non-`S5` targets remain groundwork-only + - `iommu` now detects kernel ACPI `DMAR` presence, establishing the Intel VT-d ownership seam +- **Phase C — PS/2 / USB / storage** + - `ps2d` now flushes stale controller output during probe and around core init/self-test + - `xhcid` now tracks active alternate settings and resolves endpoint descriptors through that map + - `usbscsid` now has a bounded `SYNCHRONIZE CACHE(10/16)` heuristic behind `needs_sync_cache` +- **Phase D — Wi-Fi / DRM shared-core** + - `redbear-wifictl` transport probing now uses the shared PCI parser and interrupt-support summary + - `redox-drm` now exposes queued shared hotplug/vblank events through a real scheme `EVENT_READ` surface + +The work that still remains is the larger **vendor/backend maturation and hardware-validation** side: + +- full ACPI sleep/resume implementation beyond groundwork +- full Intel VT-d runtime support beyond DMAR ownership discovery +- deeper PCI / `pcid` convergence on shared helpers +- broader PS/2 resume/wake policy +- broader USB architecture/runtime maturation beyond the bounded helper slices already implemented +- deeper Wi-Fi transport/helper extraction beyond probing +- Intel and AMD DRM backend maturation and real hardware validation + +This document should therefore be read as: + +- **implemented now** for the bounded shared-core and software-only slices listed above +- **still in progress** for backend maturation and hardware-backed acceptance phases + ## Hard rules 1. **Linux suspend/resume is reference-only.** Red Bear should study Linux ordering and edge cases, but implement its own suspend/resume support in the Red Bear architecture. @@ -285,6 +322,15 @@ Keep only: - unit tests for malformed capability chains and BAR layout - interrupt mode logged deterministically +**Current implementation progress (2026-04-18)** +- `redox-driver-sys` fast PCI enumeration now parses capability chains from config bytes in the + read-only path, so enumerated `PciDeviceInfo` records no longer default to empty capability + lists. +- `PciDeviceInfo` now exposes a quirk-aware interrupt support summary (`none`, `legacy`, `msi`, + `msix`) that can serve as the common policy input for future `pcid`/driver convergence. +- Host-runnable unit coverage exists for capability-chain parsing, malformed next-pointer handling, + and interrupt-support selection behavior. + ### Phase B — ACPI / suspend / IOMMU **Primary targets** @@ -304,6 +350,19 @@ Keep only: - explicit sleep phase machine exists - IOMMU ownership clarified and moved out of `acpid` +**Current implementation progress (2026-04-18)** +- `acpid` now has an explicit `SleepTarget` / `SleepPhase` model in userspace, covering `S1`, `S3`, + `S4`, and `S5` as named Red Bear sleep targets. +- The real shutdown path now routes through that target model, while non-`S5` targets are + recognized but reported as groundwork-only rather than silently ignored. +- Unit coverage exists for sleep-target parsing, AML sleep-object naming, and the current + Red Bear-native rule that only `S5` is treated as an implemented soft-off path today. +- This is still groundwork only: there is no claim of full suspend/resume or sleep eventing yet, + and Linux suspend sequencing remains reference material rather than imported structure. +- The `iommu` daemon now also detects the presence of a kernel ACPI `DMAR` table and reports that + Intel VT-d runtime ownership should converge there instead of remaining conceptually attached to + the old transitional `acpid` DMAR code. + ### Phase C — PS/2 / USB / storage **Primary targets** @@ -321,6 +380,33 @@ Keep only: - xHCI and USB maturity proofs remain green - no Linux USB/input-core structure imported +**Current implementation progress (2026-04-18)** +- `xhcid` now tracks active alternate settings per interface and resolves endpoint descriptors using + that active-alternate map instead of flattening all interface descriptors in a configuration. +- Direct unit coverage exists for default-alternate endpoint selection and alternate-setting-aware + endpoint remapping, closing the most explicit in-tree USB interface-selection TODO without + importing Linux USB-core structure. +- `xhcid` now also preserves previously selected alternates on the same configuration and applies a + requested interface/alternate override before endpoint planning, so alternate-setting + reconfiguration no longer silently falls back to all-zero defaults. +- `xhcid` endpoint-direction lookup now also follows the active interface/alternate selection state + instead of reading from the first configuration/interface pair unconditionally. +- `xhcid` driver spawning now also follows the selected configuration and active alternate map + instead of hardcoding the first configuration and ignoring non-zero alternates. +- `xhcid` now also has a preserve-and-grow event-ring path in the IRQ reactor, so `EventRingFull` + recovery no longer drops unread event TRBs while resizing the primary event ring. +- `usbhubd` and `xhcid` now propagate USB 2 hub TT Think Time from the parent hub descriptor into + the xHCI Slot Context TT information bits using a bounded Linux-compatible encoding path. +- `xhcid` endpoint-context calculations are now protocol-speed-aware for SuperSpeedPlus, so + interval and ESIT-payload selection distinguish SSP paths from generic SuperSpeed using the + resolved port protocol speed rather than only endpoint companion presence. +- `usbscsid` now has a bounded native `SYNCHRONIZE CACHE(10/16)` heuristic gated by the existing + `needs_sync_cache` storage quirk, directly reflecting the planned Linux `sd.c` donor usage without + importing Linux SCSI midlayer structure. +- `ps2d` now performs an explicit controller-output flush during probe and at the key controller + reinitialization boundaries in `Ps2::init()`, matching the Linux `i8042_flush()` discipline in a + bounded Red Bear-native way without importing Linux input-core structure. + ### Phase D — Wi-Fi and GPU/DRM **Primary targets** @@ -337,6 +423,16 @@ Keep only: - DRM display-vs-render boundary remains explicit - no claim of full AMDGPU rewrite or Linux wireless-architecture import +**Current implementation progress (2026-04-18)** +- `redbear-wifictl` transport probing now uses the shared `redox-driver-sys` PCI parser and the + shared quirk-aware interrupt-support summary instead of relying only on local raw-config logic. +- This is a bounded helper extraction only: the native Wi-Fi control plane remains authoritative, + and there is still no import of Linux wireless subsystem structure. +- `redox-drm` now turns shared hotplug and vblank events into a queued scheme-visible + `EVENT_READ` surface for `card0`, with hotplug also reaching the matching connector handle. + That makes shared DRM event delivery observable without conflating it with render-fence + semantics. + ## 4. Subsystem-specific code guidelines ### ACPI / suspend diff --git a/local/docs/P2-AMD-GPU-DISPLAY.md b/local/docs/P2-AMD-GPU-DISPLAY.md deleted file mode 100644 index fe786abc..00000000 --- a/local/docs/P2-AMD-GPU-DISPLAY.md +++ /dev/null @@ -1,143 +0,0 @@ -# Historical Phase P2: AMD GPU Display Output - -## Status: Historical implementation milestone complete — hardware validation pending - -> **Planning authority note (2026-04-18):** this file is an AMD display implementation/status -> reference. For current GPU/DRM execution order, Intel/AMD parity criteria, and future task -> sequencing, use `local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md`. - -The original P2 implementation task was completed and compile-validated for its scoped deliverables. -This file is now a historical AMD display implementation/status reference rather than the canonical -GPU planning document. Hardware validation remains a separate milestone requiring physical AMD GPU -hardware. - -## Goal -Enable AMD GPU display output (modesetting) on Redox OS via a DRM scheme daemon -that ports the AMD Display Core (DC) from Linux kernel 7.0-rc7. - -## Architecture - -Userspace apps → scheme:drm → redox-drm daemon → AMD DC (C code, linux-kpi) → MMIO - -## Components - -### redox-drm (local/recipes/gpu/redox-drm/) -DRM scheme daemon. Registers scheme:drm/card0. -- PCI enumeration for AMD GPUs (vendor 0x1002) -- MMIO register mapping via redox-driver-sys -- KMS: connector detection, mode getting, CRTC programming -- GEM: buffer object create/mmap/close -- Dispatches to AMD driver backend - -### amdgpu C port (local/recipes/gpu/amdgpu/source/) -AMD GPU driver source extracted from Linux 7.0-rc7: -- drivers/gpu/drm/amd/ — full AMD driver (269k lines) -- drivers/gpu/drm/ttm/ — TTM memory manager -- include/drm/ — DRM core headers -- include/linux/ — Linux kernel headers (reference) - -### amdgpu build recipe (local/recipes/gpu/amdgpu/) -Compiles AMD DC display code against linux-kpi headers with -D__redox__: -- recipe.toml — custom build template -- redox_glue.h — type compatibility, function stubs, macro replacements -- redox_stubs.c — C implementations of Linux kernel API stubs -- amdgpu_redox_main.c — daemon entry point replacing module_init - -## Build Integration - -Config: config/redbear-desktop.toml (includes desktop.toml + Red Bear GPU packages) -- Includes redox-drm and amdgpu packages -- filesystem_size = 8196 (8GB, needs space for firmware blobs) - -pcid: local/config/pcid.d/amd_gpu.toml -- Auto-detects AMD GPU (vendor 0x1002, class 0x03) -- Launches redox-drm with PCI device location - -## Boot Sequence (P2) - -1. Kernel boots, initializes PCI subsystem -2. pcid detects AMD GPU (vendor 0x1002) -3. pcid-spawner launches: redox-drm $BUS $DEV $FUNC -4. redox-drm opens PCI device, verifies AMD GPU -5. redox-drm maps MMIO BAR0 (GPU registers) -6. redox-drm loads PSP firmware via scheme:firmware -7. redox-drm initializes AMD DC (Display Core) -8. AMD DC detects connectors, reads EDID -9. scheme:drm/card0 registered -10. Userspace can begin bounded display-side probing (for example `modetest`) once runtime validation is available; this is not yet a hardware-backed support claim by itself - -## Verification - -### Code Complete (P2 implementation task) -- [x] scheme:drm/card0 daemon compiles and registers scheme -- [x] KMS ioctl dispatch handles all 15 DRM ioctls -- [x] GEM buffer lifecycle: create/mmap/close with ownership tracking -- [x] FB lifecycle: ADDFB/RMFB with size validation, per-fd ownership -- [x] Page flip: one outstanding per CRTC, vblank-gated retirement -- [x] Firmware: Rust cache validates blob availability at startup; C code loads via request_firmware() from scheme:firmware at runtime -- [x] GTT page tables: free-list reuse, TLB-safe error rollback -- [x] Original implementation pass received repeated review of resource lifecycle, ownership, GTT, and page-flip behavior -- [x] All 4 Rust crates build with zero errors, zero warnings -- [x] C glue files pass gcc -fsyntax-only -- [x] Build symlinks and config files in place - -### Hardware Validation (requires physical AMD GPU) -- [ ] modetest -M amd shows connector info and modes -- [ ] modetest -M amd -s 0:1920x1080 sets mode and shows test pattern -- [ ] Works on real AMD hardware (RDNA2/RDNA3) - -Current bounded runtime harness: -- `redbear-drm-display-check` is now the in-guest bounded display checker. -- `local/scripts/test-amd-gpu.sh` now wraps the shared `local/scripts/test-drm-display-runtime.sh` harness. -- The checker now proves connector/mode enumeration directly and can perform a bounded direct modeset proof. -- A successful harness run is still display-only evidence, not render proof. - -## Key Files - -| File | Purpose | -|------|---------| -| local/recipes/gpu/redox-drm/ | DRM scheme daemon | -| local/recipes/gpu/amdgpu/ | Build recipe + integration glue | -| local/recipes/gpu/amdgpu/source/ | AMD driver C port source (from Linux 7.0-rc7) | -| config/redbear-desktop.toml | Build config | -| local/config/pcid.d/amd_gpu.toml | PCI auto-detection (AMD) | -| local/recipes/gpu/redox-drm/source/src/drivers/interrupt.rs | MSI-X/legacy interrupt abstraction | -| local/config/pcid.d/intel_gpu.toml | Intel GPU PCI auto-detection | -| local/patches/base/P0-pcid-config-endpoint.patch | pcid /config file endpoint | -| local/scripts/build-redbear.sh | Canonical build wrapper | -| local/scripts/test-amd-gpu.sh | Test script | - -## Dependencies (P1) - -| Crate | Status | Provides | -|-------|--------|----------| -| redox-driver-sys | ✅ | MmioRegion, PciDevice, IrqHandle, DmaBuffer | -| linux-kpi | ✅ | C headers, FFI stubs (kmalloc, mutex, spinlock...) | -| firmware-loader | ✅ | scheme:firmware daemon | - -## P1/P2 Changes Since Initial Implementation - -### pcid /config endpoint (T1) -- Added `Config { addr: PciAddress }` handle to pcid scheme -- Routes `/scheme/pci/{addr}/config` to raw PCI config space read/write -- Enables redox-driver-sys PciDevice to access config space for MSI-X, BAR parsing - -### MSI-X interrupt support (T2-T4) -- Created shared `InterruptHandle` enum in `redox-drm/src/drivers/interrupt.rs` -- Tries MSI-X first (find capability → parse → map table → mask_all → enable → request_vector) -- Falls back through MSI and then legacy IRQ, with `NO_MSIX`, `NO_MSI`, and `FORCE_LEGACY_IRQ` quirk gates applied before transport selection -- Both AMD and Intel drivers use `InterruptHandle::setup()` - -### Dynamic PCI device info (T6) -- Replaced hardcoded `redox_pci_find_amd_gpu()` stub with `redox_pci_set_device_info()` -- Rust side passes real PciDeviceInfo (vendor, device, revision, IRQ, BAR0/BAR2) to C via FFI -- C layer validates the struct is populated before `amdgpu_redox_init()` uses it - -### linux-kpi quirk consumption (current) -- `redox-drm` now also passes the real PCI BDF into the amdgpu C glue so linux-kpi quirk lookups resolve against the actual GPU, not a guessed location -- `amdgpu_redox_main.c` now calls `pci_get_quirk_flags()` / `pci_has_quirk()` in the live Redox init path -- firmware gating now stays at the Rust-side GPU driver boundary, while the AMD C backend logs quirk-driven IRQ expectations (`NO_MSI`, `NO_MSIX`, `FORCE_LEGACY`) via linux-kpi lookups on the real GPU BDF - -### Intel GPU support (T4-T5) -- Intel driver switched to shared `InterruptHandle` (MSI-X / MSI / legacy with quirk-aware fallback) -- Added `local/config/pcid.d/intel_gpu.toml` for auto-detection (vendor 0x8086, class 0x03) diff --git a/local/docs/QT6-PORT-STATUS.md b/local/docs/QT6-PORT-STATUS.md index dc2a10b2..1858929d 100644 --- a/local/docs/QT6-PORT-STATUS.md +++ b/local/docs/QT6-PORT-STATUS.md @@ -258,7 +258,7 @@ Plus: QML debug plugins, QtQuick/QML modules staged. Recent relibc implementation progress in this repo now also includes: -- source-visible `signalfd`, `timerfd`, `eventfd`, `open_memstream`, `F_DUPFD_CLOEXEC`, and `MSG_NOSIGNAL` +- source-visible plus strict Redox-target runtime-tested `signalfd`, `timerfd`, `eventfd`, `open_memstream`, `F_DUPFD_CLOEXEC`, and `MSG_NOSIGNAL` - a bounded `waitid()` path in relibc, replacing the old Qt-side waitid stub workaround - a bounded `eth0`-backed `net_if` / `ifaddrs` path in relibc - a minimal source-visible `resolv.h` surface in relibc @@ -307,7 +307,7 @@ Current downstream build proof in this repo now includes: - Non-udev shim (libudev stub for HAVE_UDEV=0) - Vendored Linux input.h selection for __redox__ - strtod_l() fallback - - timerfd fallback (tracks expiry without timerfd fd) +- timerfd path still warrants broader consumer-confidence review, but relibc now has strict Redox-target runtime proof for the bounded timerfd harness - Linux-only tool binaries skipped on Redox ### Phase 3 — KF6 Frameworks (✅ ALL 32 BUILT) diff --git a/local/docs/USB-IMPLEMENTATION-PLAN.md b/local/docs/USB-IMPLEMENTATION-PLAN.md index e1afcc86..3c1fb9e3 100644 --- a/local/docs/USB-IMPLEMENTATION-PLAN.md +++ b/local/docs/USB-IMPLEMENTATION-PLAN.md @@ -49,7 +49,8 @@ The Red Bear USB stack consists of: - native USB observability (`lsusb`, `usbctl`, `redbear-info`) - a low-level userspace client API through `xhcid_interface` - a hardware quirks system that applies USB device-specific workarounds at runtime -- three QEMU validation harnesses covering interrupt delivery, full stack, and storage autospawn +- four QEMU validation harnesses covering interrupt delivery, bounded device lifecycle hotplug, + full stack, and storage autospawn - an in-guest scheme-tree checker (`redbear-usb-check`) ### Red Bear xHCI Patch Layer @@ -104,12 +105,9 @@ source: Even with the Red Bear patch applied: - HID is still wired through the legacy mixed-stream `inputd` path -- SuperSpeedPlus differentiation requires Extended Port Status (not yet implemented) -- TTT (Think Time) in Slot Context hardcoded to 0 — needs parent hub descriptor propagation -- Composite devices and non-default alternate settings use first-match only - (`//TODO: USE ENDPOINTS FROM ALL INTERFACES`) -- `grow_event_ring()` swaps to a new ring but does not copy pending TRBs from the old one; under - sustained event-ring-full conditions this may lose in-flight events +- Any remaining USB composite/device-model issues now sit above the bounded helper fixes already + landed for active alternates, endpoint direction, real interface/alternate hub configuration, and + SSP-aware endpoint-context calculations. - ~57 TODO/FIXME comments remain across xHCI driver files - usbhubd: interrupt-driven change detection implemented; 1-second polling retained as fallback - usbscsid: `ReadCapacity16` now implemented with automatic fallback from `ReadCapacity10` @@ -168,6 +166,7 @@ Key files and their sizes: | Script | What it tests | Limitations | |---|---|---| +| `test-xhci-device-lifecycle-qemu.sh --check` | Bounded xHCI hotplug lifecycle proof: runtime attach → configure → driver spawn → detach for HID and storage devices | QEMU-only; monitor-driven hotplug; not a broad hardware stress test | | `test-usb-qemu.sh --check` | Full stack: xHCI interrupt mode, HID spawn, SCSI spawn, bounded sector-0 readback, BOS processing, crash errors | QEMU-only; log-grep based; no guest-side write proof | | `test-usb-storage-qemu.sh` | USB mass storage autospawn + sector-0 readback + crash patterns | No guest-side write proof yet; no multi-LUN; no UAS | | `test-xhci-irq-qemu.sh --check` | xHCI interrupt delivery mode (MSI/MSI-X/INTx) | No devices attached during check; single log grep | @@ -262,6 +261,12 @@ hardware; controller enumerates attached devices reliably across repeated boot c - USB 3 hub endpoint configuration stall handled - `endp_direction` off-by-one fixed (`checked_sub(1)`) - `cfg_idx` assigned after validation +- xHCI lifecycle gating prevents new I/O from entering while a port is detaching +- `attach_device()` no longer leaves a published partially-enumerated `PortState` on attach failure +- `detach_device()` now waits for in-flight lifecycle operations before removing the port state +- `configure_endpoints_once()` is transactional: endpoint state is staged locally, input-context + mutations are snapshotted, and rollback is attempted if `CONFIGURE_ENDPOINT` or + `SET_CONFIGURATION` fails - `CLEAR_FEATURE` uses correct USB endpoint address from descriptor - `usbhubd` status_change_buf sizing and bitmap parsing fixed - Hub interrupt EP1 status change detection replacing polling @@ -314,6 +319,9 @@ replug do not collapse all USB HID into one anonymous stream. - `usbscsid` SCSI layer: `plain::from_bytes().unwrap()` replaced with typed `ScsiError` and fallible `parse_bytes`/`parse_mut_bytes` helpers - `usbscsid` main.rs: fallible `run()` helper, event loop continues on individual failures - `ReadCapacity16` implemented with automatic fallback when `ReadCapacity10` returns max LBA (0xFFFFFFFF) +- `usbscsid` now issues bounded `SYNCHRONIZE CACHE(10/16)` commands when the runtime storage quirk + set includes `needs_sync_cache`, using Linux `sd.c` sync-cache behavior as a donor reference for + command selection and tolerant error handling. **Remaining** (all require hardware or design decisions): - Runtime I/O validation: prove stall recovery works under real device I/O (requires hardware) @@ -359,6 +367,7 @@ scope. - `test-usb-qemu.sh` — full USB stack validation harness (6 checks) - `test-usb-storage-qemu.sh` — USB mass storage autospawn check - `test-xhci-irq-qemu.sh` — xHCI interrupt delivery mode check +- `test-xhci-device-lifecycle-qemu.sh` — bounded xHCI attach/configure/detach hotplug proof - `USB-VALIDATION-RUNBOOK.md` — operator documentation with Paths A and B - `redbear-usb-check` — in-guest scheme-tree checker (now installed in image) - `lsusb` — full USB scheme walk with descriptor parsing and quirks integration @@ -367,7 +376,7 @@ scope. **Remaining** (all require hardware): - Add hardware-matrix coverage for target controllers and class families - Add USB storage data I/O validation (read/write to block device) -- Add hot-plug stress testing harness +- Add repeated hardware hot-plug stress testing beyond the bounded QEMU lifecycle slice **Exit criteria**: at least one profile can honestly claim a validated USB baseline for named controller/class scope; USB support language in docs matches real test evidence. @@ -613,24 +622,49 @@ zero `unwrap()`/`expect()` panics), interrupt-driven hub change detection, `Read for large disk support, and a USB quirk table expanded from 8 to 146 entries with 22 quirk flags mined from Linux 7.0. +Recent bounded maturity progress: + +- `xhcid` now tracks active alternate settings per interface in `PortState` and resolves endpoint + descriptors through that active-alternate map instead of flattening all interface descriptors + indiscriminately. +- Direct unit coverage now exists for both default-alternate endpoint selection and + alternate-setting-aware endpoint remapping. +- `xhcid` now also preserves previously selected alternates on the same configuration and applies a + requested interface/alternate override before endpoint planning, so alternate-setting + reconfiguration no longer silently falls back to all-zero defaults. +- `xhcid` endpoint-direction lookup now also follows the active interface/alternate selection state + instead of reading from the first configuration/interface pair unconditionally. +- `xhcid` driver spawning now also follows the selected configuration and active alternate map + instead of hardcoding the first configuration and ignoring non-zero alternates. +- `xhcid` now keeps per-port lifecycle state so detach blocks new transfer/configure/suspend/resume + work, waits for in-flight operations to drain, and removes the published port state only after + slot disable succeeds. +- `xhcid` endpoint configuration is now transactional: software endpoint bookkeeping stays staged + until `CONFIGURE_ENDPOINT` and optional `SET_CONFIGURATION` succeed, and the input context is + restored with an explicit rollback attempt on failure. +- the xHCI IRQ reactor now replaces the old `TODO: grow event ring` stub with a preserve-and-grow + path that copies unread event TRBs into a larger event ring and reprograms ERST registers instead + of dropping pending events during `EventRingFull` recovery. +- `usbhubd` now derives USB 2 hub TT Think Time from the hub descriptor using the same bounded + Linux-compatible encoding and passes it through `ConfigureEndpointsReq`, and `xhcid` now writes + that value into the Slot Context TT information bits for hub devices. +- xHCI endpoint-context calculations are now protocol-speed-aware for SuperSpeedPlus, so interval + and ESIT-payload selection use the resolved port protocol speed instead of relying only on + endpoint companion presence. + All validation is QEMU-only. No real hardware USB testing exists. -The remaining gaps fall into three categories: +The remaining gaps now fall into two categories: -**Still-open software work (implementable without hardware):** -- Composite-device endpoint selection across interfaces (xHCI scheme.rs — `//TODO: USE ENDPOINTS FROM ALL INTERFACES`) -- Non-default configuration and alternate-setting support (xHCI scheme.rs) -- SuperSpeedPlus differentiation via Extended Port Status -- TTT (Think Time) propagation from parent hub descriptor into Slot Context -- Event ring growth does not copy pending TRBs from old ring (may lose events under sustained load) - -**Architectural redesign (cross-cutting, not USB-internal):** +**Broader architectural work (cross-cutting, not a small bounded USB-only fix):** +- Any remaining USB composite/device-model issues now belong to wider device-model/design cleanup + rather than one more isolated helper patch. - HID producer modernization: per-device streams, hotplug add/remove (requires inputd redesign) - Userspace USB API: `libusb` WIP, no coherent native story **Hardware-dependent or design decisions:** - Real hardware validation: no controller tested outside QEMU -- Hot-plug stress testing +- Hot-plug stress testing beyond the new bounded QEMU lifecycle harness - Storage write validation (bounded sector-0 readback proof now exists in QEMU via `test-usb-storage-qemu.sh`, but guest-side write verification to the USB-backed block device is still open) - usbhubd 1-second polling fallback (only exercisable with real hub hardware) - Modern USB scope decision: device mode / USB-C / PD diff --git a/local/docs/USB-STORAGE-SPEED-AND-DEVICES.md b/local/docs/USB-STORAGE-SPEED-AND-DEVICES.md deleted file mode 100644 index 1ef66ac5..00000000 --- a/local/docs/USB-STORAGE-SPEED-AND-DEVICES.md +++ /dev/null @@ -1,365 +0,0 @@ -# Red Bear OS USB Storage, Speed, and Device Integration - -## Purpose - -This document covers USB subsystem areas that the main USB implementation plan -(`USB-IMPLEMENTATION-PLAN.md`) treats at a higher level: mass storage quality, filesystem -integration, device speed handling, backwards compatibility, and integrated USB device paths. - -It is a companion document, not a replacement. Read both together for the complete USB picture. - -## Current Headline - -USB mass storage is **present in the codebase but disabled**. The driver table entry for -`usbscsid` is commented out in `drivers.toml` with the note "#TODO: causes XHCI errors". HID -(keyboard/mouse) and hub handling are enabled and functional in QEMU. - -## USB Mass Storage - -### Architecture - -``` -USB device → xhcid (scheme:usb) → usbscsid → driver_block::DiskScheme → /scheme/disk.usb-* - → filesystem (redoxfs/ext4d) -``` - -| Layer | Component | Status | -|---|---|---| -| Transport | BOT (Bulk-Only Transport) | ✅ Implemented — CBW/CSW signatures validated, tag matching, stall recovery | -| Protocol | SCSI SBC | ⚠️ Partial — READ(16)/WRITE(16) only, missing READ(10)/WRITE(10) | -| Block device | driver_block::DiskScheme | ✅ Functional — registers as `/scheme/disk.usb-{scheme}+{port}-scsi` | -| Partitions | partitionlib (MBR/GPT) | ✅ Parsed on init — exposes `0p0`, `0p1`, etc. | -| Filesystems | redoxfs, ext4d | ✅ Can mount USB block devices via scheme path | - -### BOT Transport Quality - -The BOT transport in `usbscsid/src/protocol/bot.rs` is well-implemented: - -- **CBW handling**: Correct signature (`0x43425355`), per-command tag increment, direction bit, LUN field -- **CSW handling**: Signature validation, tag matching, residue tracking, short packet tolerance -- **Stall recovery**: `ClearFeature(ENDPOINT_HALT)` on both endpoints, `Bulk-Only Mass Storage Reset` - class request (0xFF) for full reset recovery, re-check for persistent stalls -- **Phase errors**: Detected and reported via `ProtocolError` - -### SCSI Command Completeness - -| Command | CDB Size | Status | Notes | -|---|---|---|---| -| INQUIRY | 6 | ✅ | Standard + vendor inquiry data | -| REQUEST SENSE | 6 | ✅ | Fixed-format sense data | -| READ CAPACITY(10) | 10 | ✅ | 32-bit LBA, used as first probe | -| READ CAPACITY(16) | 16 | ✅ | 64-bit LBA, auto-fallback from RC(10) max | -| READ(16) | 16 | ✅ | Primary read path | -| WRITE(16) | 16 | ✅ | Primary write path | -| MODE SENSE(6) | 6 | ✅ | Block descriptor fallback | -| MODE SENSE(10) | 10 | ✅ | Primary block size/count source | -| READ(10) | 10 | ❌ **Missing** | Required for older/simpler devices | -| WRITE(10) | 10 | ❌ **Missing** | Required for older/simpler devices | -| SYNCHRONIZE CACHE | 10 | ⚠️ Opcode only | Never issued | -| START STOP UNIT | 6 | ⚠️ Opcode only | Never issued | -| TEST UNIT READY | 6 | ⚠️ Opcode only | Never issued | -| REPORT LUNS | 12 | ❌ **Missing** | Needed for multi-LUN devices | -| MODE SELECT(6/10) | 6/10 | ❌ **Missing** | Needed for parameter negotiation | -| FORMAT UNIT | 6 | ❌ **Missing** | Rarely needed | - -**Critical gap**: READ(10)/WRITE(10) are not implemented. The daemon uses 16-byte CDBs exclusively. -Devices that only support 10-byte SCSI commands (some older USB flash drives, embedded firmware -devices) will fail. Adding READ(10)/WRITE(10) with automatic fallback (similar to how -READ CAPACITY(10)→(16) already works) is a concrete, bounded improvement. - -### LUN Support - -`get_max_lun()` reads the device's max LUN count via class-specific request, but the daemon -hardcodes `cbw.lun = 0` for all commands. Multi-LUN devices (card readers, multi-slot adapters) -will only expose the first LUN. Supporting multiple LUNs requires: - -1. Iterating from 0 to max_lun -2. Creating separate `UsbDisk` instances per LUN -3. Registering separate `DiskScheme` paths per LUN - -### UAS (USB Attached SCSI) - -UAS is not implemented. The protocol factory in `protocol/mod.rs` only matches protocol 0x50 (BOT). -Protocol 0x62 (UAS) is absent. UAS provides: - -- Multiple simultaneous command pipes (vs BOT's single serialize-execute-wait) -- Stream-based transfers for higher throughput -- Better error recovery semantics - -For USB 3.0 SuperSpeed devices, UAS can provide significantly higher throughput than BOT. - -### Transfer Size Limitations - -- BOT max transfer: 64KB per command (driver_interface.rs hard limit) -- Stream transfer chunk: 32KB per iteration (hardcoded in TransferStream) -- No scatter-gather: all data must fit in a single buffer (explicit TODO in scsi/mod.rs) - -### Why usbscsid is Disabled - -The comment says "#TODO: causes XHCI errors". This likely relates to: - -1. Bulk endpoint configuration issues in the xHCI driver -2. The 64KB transfer limit causing multi-block reads to fragment incorrectly -3. Missing endpoint stall handling during initial enumeration - -**Re-enabling usbscsid is a prerequisite for USB storage validation.** - -## USB Speed Handling - -### Speed Detection - -The xHCI driver detects device speed via PORTSC register bits 10–13. The `ProtocolSpeed` struct -(in `xhci/extended.rs`) classifies speeds: - -| Speed | Bitrate | Detection | Status | -|---|---|---|---| -| Low Speed | 1.5 Mbps | `is_lowspeed()` | ✅ Detected | -| Full Speed | 12 Mbps | `is_fullspeed()` | ✅ Detected | -| High Speed | 480 Mbps | `is_highspeed()` | ✅ Detected | -| SuperSpeed Gen1 x1 | 5 Gbps | `is_superspeed_gen1x1()` | ✅ Detected | -| SuperSpeedPlus Gen2 x1 | 10 Gbps | `is_superspeedplus_gen2x1()` | ✅ Detected | -| SuperSpeedPlus Gen1 x2 | 10 Gbps x2 | `is_superspeedplus_gen1x2()` | ✅ Detected | -| SuperSpeedPlus Gen2 x2 | 20 Gbps x2 | `is_superspeedplus_gen2x2()` | ✅ Detected | - -### Default Control Pipe Max Packet Size - -The driver sets the default control pipe max packet size based on speed: - -| Speed | Max Packet Size | Location | -|---|---|---| -| Low/Full Speed | 8 bytes | mod.rs:1128 | -| High Speed | 64 bytes | mod.rs:1131 | -| SuperSpeed | 512 bytes | mod.rs:1134 | - -### Transfer Type Support - -| Transfer Type | USB Role | Status | Notes | -|---|---|---|---| -| Control | Configuration, enumeration | ✅ Works | Endpoint 0 only | -| Bulk | Mass storage, network | ✅ Works | Used by usbscsid | -| Interrupt | HID, hub status | ✅ Works | Used by usbhubd, usbhidd | -| Isochronous | Audio, video | ❌ ENOSYS | `scheme.rs` explicitly returns `ENOSYS` | - -Isochronous transfers are required for USB audio devices, webcams, and streaming applications. -The driver returns `ENOSYS` (function not implemented) for all isochronous endpoint requests. - -## Backwards Compatibility - -### Transaction Translator (TT) Handling — STUBBED - -USB 1.x Low Speed and Full Speed devices connected behind USB 2.0 High Speed hubs require a -Transaction Translator (TT) to convert between USB 1.x and USB 2.0 protocols. The xHCI -specification handles TT internally in the controller, but the driver must provide correct -parent hub information in the Slot Context during device addressing. - -**Current state**: All TT-related fields are hardcoded: - -| Field | Value | Should Be | Location | -|---|---|---|---| -| `mtt` (Multi-TT) | `false` | Read from parent hub descriptor | mod.rs:1057 | -| `ttt` (TT Think Time) | `0` | Encoded from parent hub descriptor | mod.rs:1114 | -| `needs_parent_info` | `true` (forced) | Based on actual device speed topology | mod.rs:1070 | - -The TODOs at mod.rs:1066–1068 explicitly state the values need to be determined from actual -device speed and hub topology. Without correct TT information: - -- Low Speed devices (1.5 Mbps) behind USB 2.0 hubs may not enumerate correctly on real hardware -- Full Speed devices (12 Mbps) behind USB 2.0 hubs may fail during bulk transfers -- Multi-TT hubs with multiple LS/FS devices attached may have timing violations - -### USB 1.x Compatibility - -- **Low Speed (1.5 Mbps)**: Speed detection works. Default control pipe size correct (8 bytes). - TT handling stubbed — may fail on real hardware behind HS hubs. -- **Full Speed (12 Mbps)**: Speed detection works. Default control pipe size correct (8 bytes). - Same TT limitation. - -### USB 2.0 Compatibility - -- **High Speed (480 Mbps)**: Primary tested speed in QEMU. Bulk, Interrupt, Control all functional. - Max packet size 64 bytes correctly set. - -### USB 3.x Compatibility - -- **SuperSpeed (5 Gbps)**: Protocol speed detection works. BOS descriptor fetching implemented. - Max packet size 512 bytes correctly set. SuperSpeed Companion Descriptor parsed. -- **SuperSpeedPlus (10–20 Gbps)**: Protocol speed detection works. SuperSpeedPlus Isochronous - Companion Descriptor parsed. No functional testing. - -### Speed-Specific Gaps - -- No speed-specific timeout or retry tuning — all controller-level timeouts are 1-second hardcoded -- No burst transaction support for SuperSpeed bulk endpoints (burst field parsed but not used in - transfer scheduling) -- No streams support for USB 3.0 bulk endpoints (Stream ID capability parsed but not exercised) - -## Integrated USB Devices - -### Device Autospawn Flow - -``` -1. Device plugs in -2. xhcid detects port status change -3. xhcid resets port, enumerates device, reads config descriptor -4. spawn_drivers() iterates interfaces: - - For each interface with alternate_setting == 0: - - Match class code (+ optional subclass) against drivers.toml - - If match: spawn daemon with $SCHEME, $PORT, $IF_NUM/$IF_PROTO -5. Each spawned daemon opens its USB interface via xhcid_interface -6. Daemon registers its own scheme or connects to existing schemes -``` - -### Driver Table (`drivers.toml`) - -```toml -# Mass Storage — DISABLED (#TODO: causes XHCI errors) -#[[drivers]] -#name = "SCSI over USB" -#class = 8; subclass = 6 -#command = ["usbscsid", "$SCHEME", "$PORT", "$IF_PROTO"] - -[[drivers]] -name = "USB HUB"; class = 9; subclass = -1 -command = ["usbhubd", "$SCHEME", "$PORT", "$IF_NUM"] - -[[drivers]] -name = "USB HID"; class = 3; subclass = -1 -command = ["usbhidd", "$SCHEME", "$PORT", "$IF_NUM"] -``` - -### Supported Device Paths - -| Device Class | Daemon | Scheme Path | Integration | -|---|---|---|---| -| **Hub** (class 9) | `usbhubd` | Manages child ports via xhci scheme | Triggers nested device enumeration | -| **HID** (class 3) | `usbhidd` | Writes to `/scheme/input/producer` via inputd | Legacy display/input consumers read `/scheme/input` | -| **Mass Storage** (class 8) | `usbscsid` | Registers `disk.usb-{scheme}+{port}-scsi` | Filesystems mount via scheme path | - -### HID Integration Detail - -``` -USB keyboard/mouse → xhcid → usbhidd → inputd (scheme:input) → display/input consumer - ↑ - /scheme/input/producer (drivers write here) - /scheme/input/consumer (display server reads here) - /scheme/input/handle/{name} (per-device handles) -``` - -- `usbhidd` implements boot protocol HID (keyboard, mouse, scroll, button) -- Events: `orbclient::KeyEvent` for keyboards, `orbclient::MouseEvent`/`ButtonEvent`/`ScrollEvent` -- The `inputd` multiplexer collects from all input producers (USB HID, PS/2 via `ps2d`, etc.) - -### Storage Integration Detail - -``` -USB flash drive → xhcid → usbscsid → DiskScheme → /scheme/disk.usb-usb+1-scsi - ↓ - redoxfs/ext4d mount - ↓ - /scheme/file/{mount-point} -``` - -- `DiskScheme` from `driver-block` provides block I/O via scheme -- Partition table parsing via `partitionlib` (MBR + GPT) -- Partitions exposed as `0p0`, `0p1`, etc. under the disk scheme - -### Composite Device Handling - -Composite USB devices (e.g., keyboard+mouse combo, keyboard+trackpad) are partially supported: - -- xhcid iterates **all interfaces** in the first configuration -- Each interface matching a `drivers.toml` entry spawns its own daemon process -- Alternate settings (`alternate_setting != 0`) are explicitly skipped -- No vendor/product ID matching — class code only - -**What works**: A keyboard+mouse combo with two HID interfaces will spawn two `usbhidd` processes, -each handling one interface. Both produce input events through inputd. - -**What doesn't work**: Devices requiring alternate settings for full functionality. Devices needing -vendor-specific drivers. - -### Unsupported Device Classes - -These USB device classes have no driver in Red Bear OS: - -| Class | Name | Use Case | Blocker | -|---|---|---|---| -| 0x01 | Audio | USB headsets, speakers | Isochronous transfers not implemented (ENOSYS) | -| 0x0E | Video | Webcams | Isochronous transfers not implemented | -| 0x02 | CDC/ACM | USB serial, modems | No driver written | -| 0x0A | CDC-Data | USB networking | No driver written | -| 0x0B | Chip Card | Smart card readers | No driver written | -| 0x0D | Content Security | Conditional access | No driver written | -| 0x0F | Personal Healthcare | Medical devices | No driver written | -| 0x06 | Still Image | Cameras (PTP/MTP) | No driver written | -| 0x07 | Printer | USB printers | No driver written | -| 0x10 | Audio/Video | AV devices | Isochronous required | -| 0x11 | Billboard | USB-C alternate mode | No driver written | -| 0x12 | USB Type-C Bridge | USB-C muxes | No driver written | -| 0xDC | Diagnostic | USB debug | No driver written | -| 0xE0 | Wireless Controller | Bluetooth, Wi-Fi dongles | Separate (redbear-btusb) | -| 0xEF | Miscellaneous | Firmware update, etc. | No driver written | -| 0xFF | Vendor Specific | Custom devices | No driver written | - -## Implementation Priorities - -### Priority 1: Re-enable USB Mass Storage - -The most impactful single change. Requires diagnosing and fixing the "causes XHCI errors" issue. -Likely causes: - -1. Bulk endpoint configuration in scheme.rs — endpoint type mismatch during configuration -2. Transfer size handling — the 64KB limit may fragment BOT CBW/CSW sequences -3. Missing stall recovery during initial BOT reset - -### Priority 2: Add READ(10)/WRITE(10) - -Implement 10-byte CDB variants with automatic fallback. Pattern already exists for READ CAPACITY. -Required for device compatibility: - -```rust -// Proposed fallback pattern (matching existing RC10→RC16 pattern): -pub fn read(&mut self, lba: u64, buf: &mut [u8], protocol: &mut dyn Protocol) -> Result<()> { - if lba <= u32::MAX as u64 && buf.len() <= u32::MAX as usize { - // Try READ(10) first — wider device compatibility - let cmd = self.cmd_read10()?; - *cmd = cmds::Read10::new(lba as u32, ...); - ... - } - // Fall back to READ(16) for large addresses -} -``` - -### Priority 3: Fix Transaction Translator Handling - -Replace hardcoded TT values with actual parent hub descriptor data. This is required for real -hardware where LS/FS devices sit behind HS hubs. - -### Priority 4: Multi-LUN Support - -Iterate device LUNs and create separate disk scheme instances per LUN. Required for card readers -and multi-slot adapters. - -### Priority 5: Isochronous Transfers - -Implement the Isoch TRB path in scheme.rs to enable USB audio and video device classes. - -### Priority 6: UAS Transport - -Add USB Attached SCSI protocol support for SuperSpeed storage devices. Higher throughput than BOT -but requires stream ID support in the xHCI driver. - -## Summary - -USB mass storage exists in the codebase with a well-implemented BOT transport, proper SCSI command -set (with gaps in READ/WRITE(10)), and functional block device integration — but it is **disabled** -due to xHCI errors during device configuration. The most impactful work is diagnosing and fixing -that issue, then adding READ(10)/WRITE(10) for wider device compatibility. - -Speed handling covers the full range from Low Speed (1.5 Mbps) to SuperSpeedPlus (20 Gbps) at the -detection level, but TT handling is stubbed and isochronous transfers return ENOSYS. Backwards -compatibility for USB 1.x devices behind USB 2.0 hubs requires TT fix work. - -Device integration supports hubs and HID via autospawn. Composite devices get all interfaces -handled. No vendor/product matching exists. No audio, video, serial, or networking USB device -classes have drivers. diff --git a/local/docs/WAYLAND-IMPLEMENTATION-PLAN.md b/local/docs/WAYLAND-IMPLEMENTATION-PLAN.md new file mode 100644 index 00000000..c6c3f5a8 --- /dev/null +++ b/local/docs/WAYLAND-IMPLEMENTATION-PLAN.md @@ -0,0 +1,353 @@ +# Red Bear OS Wayland Implementation Plan + +**Version:** 1.0 (2026-04-19) +**Status:** Canonical Wayland subsystem plan +**Supersedes:** `docs/03-WAYLAND-ON-REDOX.md` as the active Wayland planning document + +## Purpose + +This is the single authoritative Red Bear Wayland subsystem plan. + +It replaces the planning role previously held by `docs/03-WAYLAND-ON-REDOX.md` and consolidates the +current Wayland story into one document that answers four questions clearly: + +1. what in the Wayland stack actually builds, +2. what has runtime proof, +3. what still blocks a trustworthy compositor/session claim, +4. and what work must happen next, in what order, to close those gaps. + +This plan is subordinate to the canonical desktop path in +`local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` and to the current build/runtime truth in +`local/docs/DESKTOP-STACK-CURRENT-STATUS.md`, but it is the canonical subsystem plan for the +Wayland layer beneath that desktop path. + +## Truth Statement + +Red Bear Wayland is **partially complete and still experimental**. + +What is true today: + +- the base package stack is substantially build-visible: `libwayland`, `wayland-protocols`, Mesa + EGL/GBM/GLES2, Qt Wayland, libinput, seatd, and KWin-related package surfaces all build in some + form, +- the tracked Wayland validation profile, `redbear-wayland`, builds and boots in QEMU, +- the bounded validation path reaches compositor early init, xkbcommon initialization, and Redox EGL + platform selection, +- `qt6-wayland-smoke` is a real bounded client-side proof target, +- but there is still **no complete Wayland compositor session**, **no runtime-trusted input/session + path**, and **no hardware-accelerated Wayland proof**. + +This means Wayland is no longer blocked mainly by package absence. It is blocked by the gap between +**build-visible packaging** and **runtime-trusted compositor/session behavior**. + +## Scope + +This plan covers the Red Bear Wayland subsystem from protocol/runtime substrate up to a bounded +working compositor session, and then its handoff into the KWin desktop path. + +In scope: + +- `libwayland`, `wayland-protocols`, protocol generation, and residual patch reduction, +- the `redbear-wayland` validation profile, +- compositor runtime validation, +- evdevd / udev-shim / libinput / seatd integration as they affect Wayland, +- Mesa/GBM/EGL software-path proof and the Wayland-facing graphics runtime, +- KWin as the intended production Wayland compositor path, +- local overlay ownership decisions for Wayland components and validation harnesses. + +Out of scope: + +- full KDE Plasma session assembly beyond its Wayland-facing dependencies, +- hardware GPU render enablement strategy in detail (owned by the DRM plan), +- Wi-Fi, Bluetooth, USB, and low-level controller work except where they directly block Wayland + runtime trust. + +## Authority Chain + +Use the doc set in this order: + +1. `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` — top-level desktop sequencing authority +2. `local/docs/DESKTOP-STACK-CURRENT-STATUS.md` — current desktop/Wayland truth +3. `local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` — Wayland subsystem plan beneath the desktop path +4. `local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md` — GPU/DRM execution detail +5. `local/docs/QT6-PORT-STATUS.md` — Qt/KF6/KWin package-level build status + +The following are historical or reference-only after this plan: + +- `docs/05-KDE-PLASMA-ON-REDOX.md` — historical KDE rationale +- older WIP compositor notes such as the `smallvil` path — historical bounded validation references + +## Evidence Model + +This plan uses the same strict evidence classes as the canonical desktop path: + +| Class | Meaning | Safe to say | Not safe to say | +|---|---|---|---| +| **builds** | package compiles and stages | “builds” | “works” | +| **boots** | image reaches prompt or known runtime surface | “boots” | “desktop works” | +| **enumerates** | scheme/device node appears and answers bounded queries | “enumerates” | “usable end to end” | +| **usable** | bounded runtime path performs intended task | “usable for this path” | “broadly stable” | +| **validated** | repeated proof on intended target class | “validated” | “complete everywhere” | +| **experimental** | partial, scaffolded, or runtime-untrusted | “experimental” | “done” | + +Rules: + +- compile-only success is still only **builds**, +- QEMU-only success stays QEMU-bounded, +- a compositor that reaches early init but never completes a session is still **experimental**, +- KWin and Plasma build success does not imply Wayland session viability. + +## Current State Assessment + +### Stable enough to rely on for planning + +| Area | Current state | Notes | +|---|---|---| +| `redbear-wayland` profile | builds, boots | bounded validation profile only | +| `libwayland` | builds | still carries Redox-specific recipe/source rewriting and residual patching | +| `wayland-protocols` | builds | protocol packaging is not the blocker | +| Qt6 Wayland client path | builds, partial runtime | `qt6-wayland-smoke` is installed, runs in the bounded harness, and leaves runtime markers; visible in-compositor window proof is still open | +| Mesa EGL + GBM + GLES2 | builds | software path via LLVMpipe proven in QEMU | +| evdevd / udev-shim / firmware-loader / redox-drm | builds, boots, enumerate | runtime trust still bounded | +| libinput | builds | udev disabled in recipe; runtime integration still open | +| seatd | builds | runtime trust still open; lease path still unproven | +| KWin reduced path | experimental build | honest reduced dependency path, but runtime/session proof missing | + +### What remains incomplete + +| Area | Current gap | +|---|---| +| Compositor runtime | no complete Wayland compositor session | +| Input path | no end-to-end proof that evdevd → libinput → compositor is trustworthy | +| Session path | no runtime-trusted seat/session proof for KWin path | +| Hardware graphics | no hardware-accelerated Wayland proof | +| KWin truthfulness | build is reduced and partially dependency-honest, but still not a runtime-ready session | +| WIP ownership | upstream WIP recipes and local overlays are mixed; forward path is not always explicit | + +## Stability / Completeness Verdict + +### Stability + +Wayland is **not stable enough** for a broad support claim. + +Reason: + +- runtime proof is still limited to a bounded QEMU validation harness, +- the compositor path reaches early init but not a complete session, +- input/session integration is not yet runtime-trusted, +- the intended production path (KWin) is still runtime-incomplete. + +### Completeness + +Wayland is **build-substantially-complete but runtime-incomplete**. + +The stack is no longer missing its main package layers. It is missing: + +- complete compositor runtime proof, +- complete input/session integration proof, +- hardware-path proof, +- and a cleaner local ownership story for the forward path versus historical references. + +## Main Gaps and Blockers + +### G1. Runtime trust trails build success + +This is the biggest real blocker. + +Current examples: + +- `libwayland` builds, but runtime behavior is not yet trusted as a full compositor foundation, +- libinput builds, but its runtime path through evdevd/udev-shim is still open, +- seatd builds, but the compositor/session path still lacks runtime proof, +- `redox-drm` enumerates and supports bounded display tooling, but Wayland compositor runtime is not + yet trusted on top of it. + +### G2. No complete compositor session + +The bounded validation compositor path is still an **early-init harness**, not a working session. + +Current proof stops at: + +- launch surface present, +- xkbcommon init reached, +- Redox EGL platform selected, +- Qt smoke markers present. + +That is useful, but it is still not the same thing as: + +- a visible, durable Wayland session, +- a client that connects and stays usable, +- input routing proven through the compositor, +- or a trustworthy handoff into KWin session work. + +### G3. KWin remains the intended path, but it is still runtime-incomplete + +KWin is the forward compositor direction, not smallvil or COSMIC. + +Current truth: + +- the recipe exists, +- the reduced path is more honest than before, +- but it still carries disabled features and incomplete runtime/session proof, +- therefore it must not yet be described as a working compositor path. + +### G4. The input/session stack is build-visible but still operationally incomplete + +Key issues: + +- libinput is still built with udev disabled, +- seatd runtime proof is still open, +- compositor-side device discovery and hotplug behavior remain bounded or incomplete, +- `seatd-redox` remains a live local TODO and not a closed runtime path. + +### G5. Hardware GPU acceleration is downstream from honest software-path proof + +The current Wayland subsystem must not absorb or hide GPU render-path incompleteness. + +Current truth: + +- software-path Mesa/GBM/EGL is the valid bounded proof path, +- hardware acceleration remains blocked on shared GPU/DRM work outside the Wayland package layer, +- therefore hardware claims must stay in the DRM plan, not be implied by Wayland package success. + +## Ownership and Forward Path + +### Red Bear-owned forward path + +The forward path is now: + +- `redbear-wayland` for bounded compositor/runtime validation, +- `redbear-kde` for the intended KWin Wayland desktop direction, +- local overlay ownership for validation harnesses and any shipping-critical Wayland recipe deltas. + +### Historical or non-forward references + +These should not be treated as the forward path: + +- `smallvil` — historical bounded validation compositor reference only, +- the generic upstream WIP compositor set (`wlroots`, `sway`, `hyprland`, etc.) — useful inputs, not + trusted Red Bear shipping surfaces, +- `docs/03-WAYLAND-ON-REDOX.md` — retired as a planning document. + +## Implementation Plan + +This plan keeps Wayland aligned with the canonical desktop path, but narrows the work specifically to +Wayland subsystem needs. + +### Wave 1 — Runtime substrate closure for Wayland consumers + +**Goal:** turn the Wayland substrate from build-visible into runtime-trusted. + +**Must prove:** + +1. `libwayland` runtime behavior against the current relibc event/fd surfaces, +2. evdevd → libinput → compositor-facing input viability, +3. udev-shim enumeration sufficient for current Wayland-facing consumers, +4. firmware-loader + `redox-drm` + bounded KMS/display evidence adequate for the validation path. + +**Acceptance criteria:** + +- bounded relibc/libwayland runtime smoke is repeatable, +- bounded input path reaches compositor-facing consumers without hand-wavy assumptions, +- bounded display path still passes the current runtime harness after the input/session wiring is + tightened, +- no current claim depends on a package merely compiling. + +### Wave 2 — Complete the bounded compositor validation path + +**Goal:** convert the current early-init harness into a real bounded software compositor proof. + +**What success means:** + +- compositor runs for a bounded interval without crashing, +- `WAYLAND_DISPLAY` is live, +- a client connects and survives, +- the current `qt6-wayland-smoke` path remains a visible bounded proof target, +- input is proven through the active compositor surface, not just through lower-layer scheme checks. + +**Important rule:** + +This wave is still a **validation compositor** wave, not a claim that KWin or Plasma is working. + +### Wave 3 — KWin runtime truthfulness + +**Goal:** turn the current reduced KWin build into an honest runtime target. + +**Required work:** + +- keep dependency honesty explicit, +- prove which remaining stubs/shims are still acceptable for bounded runtime work, +- establish one bounded KWin session proof before any Plasma support claim, +- keep disabled features and bounded providers visible in the support language. + +**Acceptance criteria:** + +- KWin starts as the compositor on the tracked path, +- the runtime session survives for a bounded interval, +- session/login1/D-Bus surfaces needed by KWin are observable, +- support claims still remain profile-scoped and bounded. + +### Wave 4 — Ownership cleanup and stale-path retirement + +**Goal:** make the doc/recipe story match the real forward path. + +**Required work:** + +- retire old planning authority from historical Wayland docs, +- demote or remove stale historical compositor references from the active guidance path, +- make the WIP recipe guidance reflect current truth instead of older partial states, +- keep local overlay ownership explicit wherever Red Bear is still the effective shipping owner. + +**Acceptance criteria:** + +- one canonical Wayland subsystem plan exists, +- stale planning references are removed, +- historical references are clearly marked historical, +- no active doc suggests that smallvil or generic upstream WIP compositor recipes are the forward + Red Bear desktop path. + +## What This Plan Supersedes + +This plan supersedes the active planning role previously held by: + +- `docs/03-WAYLAND-ON-REDOX.md` + +It also reduces ambiguity in these adjacent surfaces: + +- `recipes/wip/AGENTS.md` Wayland status notes, +- `docs/02-GAP-ANALYSIS.md` Wayland references, +- current-status and canonical-plan references that still pointed to the old Wayland roadmap. + +## Docs To Keep vs. Retire + +### Keep + +- `local/docs/WAYLAND-IMPLEMENTATION-PLAN.md` — canonical Wayland subsystem plan +- `local/docs/DESKTOP-STACK-CURRENT-STATUS.md` — current truth summary +- `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` — canonical desktop path +- `local/docs/DRM-MODERNIZATION-EXECUTION-PLAN.md` — GPU/DRM execution detail +- `local/docs/QT6-PORT-STATUS.md` — Qt/KF6/KWin package build status + +### Retire or demote + +- `docs/03-WAYLAND-ON-REDOX.md` — remove as an active planning document +- stale WIP Wayland status text that still implies `smallvil` is current or that package build status + equals runtime viability + +## Definition of Done + +Wayland can be called substantially complete for the current subsystem scope only when all of the +following are true: + +- the bounded Wayland runtime path completes a usable software compositor session, +- runtime input/session/device-enumeration behavior is trusted enough to support that claim, +- KWin has at least one honest bounded runtime proof path, +- current docs describe the same truth with no stale forward-path confusion, +- hardware acceleration remains either separately proven or explicitly outside the claim. + +## Current Bottom Line + +Red Bear Wayland is no longer blocked primarily by package absence. It is blocked by runtime trust, +compositor completion, session/input integration, and honest ownership of the forward path. + +That is the real work. This plan makes that explicit. diff --git a/local/docs/WIFI-IMPLEMENTATION-PLAN.md b/local/docs/WIFI-IMPLEMENTATION-PLAN.md index 496a8da2..c2f92f59 100644 --- a/local/docs/WIFI-IMPLEMENTATION-PLAN.md +++ b/local/docs/WIFI-IMPLEMENTATION-PLAN.md @@ -278,6 +278,14 @@ The best Red Bear Wi-Fi path is **native-first**: - Narrow `linux-kpi` glue only where useful (93 tests, 17 modules) - Native `smolnetd` / `netcfg` / `dhcpd` reused after association +Current bounded extraction progress: + +- `redbear-wifictl` transport probing now consumes the shared `redox-driver-sys` PCI parser + instead of relying only on ad hoc raw-config interpretation. +- Transport status now reports quirk-aware interrupt support (`none` / `legacy` / `msi` / `msix`) + from the shared substrate, which is the intended convergence direction for future GPU/Wi-Fi-only + donor usage under `linux-kpi`. + The codebase has 119 tests passing (93 linux-kpi + 8 redbear-iwlwifi + 18 redbear-wifictl), no production `unwrap()` in the Wi-Fi daemon request loop (startup uses `expect()`), atomic command handling, proper timer cancellation, honest timeout reporting, and real 802.11 frame parsing. The structural skeleton is solid. The next required step is **real hardware validation** with an diff --git a/local/docs/XHCID-DEVICE-IMPROVEMENT-PLAN.md b/local/docs/XHCID-DEVICE-IMPROVEMENT-PLAN.md new file mode 100644 index 00000000..c6935c72 --- /dev/null +++ b/local/docs/XHCID-DEVICE-IMPROVEMENT-PLAN.md @@ -0,0 +1,360 @@ +# xhcid Device-Level Improvement Plan + +## Purpose + +This document defines the implementation sequence for hardening `xhcid` at the device level in +Red Bear OS. + +It is a focused companion to `local/docs/USB-IMPLEMENTATION-PLAN.md`. The USB plan remains the +subsystem-wide authority; this document narrows scope to the `xhcid` device lifecycle, +configuration, teardown, PM behavior, enumerator robustness, and bounded proof coverage. + +## Scope + +In scope: + +- `recipes/core/base/source/drivers/usb/xhcid/src/xhci/device_enumerator.rs` +- `recipes/core/base/source/drivers/usb/xhcid/src/xhci/mod.rs` +- `recipes/core/base/source/drivers/usb/xhcid/src/xhci/scheme.rs` +- `recipes/core/base/source/drivers/usb/xhcid/src/xhci/irq_reactor.rs` +- bounded QEMU validation scripts under `local/scripts/` +- canonical USB documentation under `local/docs/` + +Out of scope: + +- generic USB redesign +- unrelated class-driver feature work +- hardware-validation claims beyond what the repo can currently prove + +## Repo-Fit Note + +Technical implementation targets live in upstream-owned source under +`recipes/core/base/source/...`, but durable Red Bear preservation belongs in +`local/patches/base/`. This plan names the technical work locations, not a recommendation to leave + work stranded only in upstream-owned trees. + +## Current Audited Findings + +The current `xhcid` tree has already improved materially: + +- lifecycle gating exists through `PortLifecycle` and `PortOperationGuard` +- `configure_endpoints_once()` is now transactional relative to earlier behavior +- detach waits before removing published state +- a bounded QEMU lifecycle proof exists + +Remaining risks: + +- partial attach visibility still exists around publication timing +- detach can still depend on bounded-but-incomplete purge semantics +- suspend/resume is still mainly software gating +- rollback failure is not yet a fully hardened degraded-state path +- enumerator logic still relies on timing- and assumption-heavy behavior +- proof coverage is still QEMU-bounded and misses key interleavings + +## Design Invariants + +The implementation should satisfy these invariants: + +1. No half-attached device is publicly usable. +2. No new work is admitted after detach begins. +3. Detach always reaches a bounded terminal outcome. +4. Failed configure leaves either the old config intact or the device explicitly + degraded/reset-required. +5. PM transitions reflect actual usable state, not only software policy. +6. Enumerator behavior is bounded and diagnosable, not panic-driven. +7. Validation claims match what scripts actually prove. + +## Phase 1 — Proof-First Expansion + +### Goal + +Make the current blind spots reproducible before changing behavior. + +### Work + +- extend `test-xhci-device-lifecycle-qemu.sh` +- extend `test-usb-qemu.sh` +- extend `test-xhci-irq-qemu.sh` +- add bounded injection hooks in `xhcid` for configure-failure and attach/detach timing cases + +### Required Cases + +- repeated attach/detach +- detach during storage startup +- transfer-during-detach surrogate +- configure failure injection +- suspend/resume admission checks +- rapid event ordering cases + +### Per-File Focus + +#### `local/scripts/test-xhci-device-lifecycle-qemu.sh` + +- add repeated HID/storage attach-detach loops +- add detach-during-driver-start for storage +- add storage attach long enough to exercise startup/read activity before unplug +- require explicit attach-entered, attach-finished, detach-completed evidence + +#### `local/scripts/test-usb-qemu.sh` + +- separate boot progress from proof failure +- keep result lines distinct for xHCI init, HID spawn, SCSI spawn, bounded readback, and crash scan +- add repeated full-stack run mode or bounded loop count if needed for ordering-sensitive regressions + +#### `local/scripts/test-xhci-irq-qemu.sh` + +- verify interrupt-mode evidence still holds under actual attached-device pressure, not only empty-controller boot + +#### `xhci` test hooks + +- add bounded test-only failure hooks in `scheme.rs` / `mod.rs` for: + - fail after `CONFIGURE_ENDPOINT` + - fail after `SET_CONFIGURATION` + - optional delay before final attach commit +- current bounded implementation uses one-shot guest-side commands written to + `/tmp/xhcid-test-hook`, consumed by `xhcid` on the next matching lifecycle point + +### Exit Criteria + +- scripts are syntax-clean +- new cases fail meaningfully on current gaps +- failures identify the specific missed milestone + +## Phase 2 — Atomic Attach Publication + +### Goal + +Prevent half-built devices from becoming publicly reachable. + +### Work + +- refactor `Xhci::attach_device` +- split attach staging from published `PortState` +- narrow lifecycle exposure so scheme paths cannot reach a device before final commit +- make attach cleanup direct for prepublication failure + +### Key Targets + +- `xhci/mod.rs::Xhci::attach_device` +- `xhci/mod.rs::PortLifecycle::*` +- `xhci/device_enumerator.rs::DeviceEnumerator::run` + +### Per-File Focus + +#### `xhci/mod.rs` + +- stop inserting into `port_states` before all attach substeps complete +- keep slot, input context, EP0 ring, quirks, and descriptors in a private staging carrier +- commit published `PortState` in one final block +- keep prepublication cleanup separate from `detach_device()` where possible + +#### `xhci/device_enumerator.rs` + +- ensure duplicate connect handling still treats `EAGAIN` or equivalent as "already published" rather than "half-built staging state" + +### Exit Criteria + +- no public state before attach commit +- attach failure leaves no published device and no child driver + +## Phase 3 — Bounded Detach and Purge + +### Goal + +Make teardown bounded, dominant, and safe against stale completions. + +### Work + +- bound `PortLifecycle::begin_detaching()` +- reject all new work immediately once detach starts +- purge or tombstone pending transfer/reactor state +- separate graceful drain from forced teardown +- preserve correct slot-disable/remove ordering +- ensure child-driver shutdown cannot wedge detach + +### Key Targets + +- `xhci/mod.rs` +- `xhci/irq_reactor.rs` +- transfer bookkeeping in `xhci/scheme.rs` + +### Per-File Focus + +#### `xhci/mod.rs` + +- add timeout or bounded wait to detach drain logic +- distinguish graceful drain from forced teardown +- keep `port_states.remove(...)` after terminal teardown outcome + +#### `xhci/irq_reactor.rs` + +- add per-port invalidation or tombstone behavior so stale completions cannot target removed state + +#### `xhci/scheme.rs` + +- ensure operation-entry helpers fail immediately once detach starts + +### Exit Criteria + +- detach cannot hang forever +- no stale completion can target removed device state +- unload-under-activity proof passes + +## Phase 4 — Configure Rollback Hardening + +### Goal + +Make configuration changes fully transactional and recoverable. + +### Work + +- formalize stage/program/commit boundaries +- ensure snapshots cover all mutated controller-facing state +- promote rollback failure into explicit degraded-state handling +- define deterministic behavior for post-`SET_CONFIGURATION` failure +- keep alternate/config bookkeeping coherent after rollback +- quarantine or reset on unrecoverable ambiguity + +### Key Targets + +- `xhci/scheme.rs::configure_endpoints_once` +- `restore_configure_input_context` +- `configure_endpoints` +- `set_configuration` +- `set_interface` + +### Per-File Focus + +#### `xhci/scheme.rs` + +- keep endpoint/ring state staged until commit +- verify snapshots cover every mutated slot/endpoint field +- treat rollback failure as a first-class degraded state +- ensure post-failure descriptor and alternate bookkeeping still reflect live state + +### Exit Criteria + +- injected configure failure preserves old state or explicitly degrades/resets device +- no staged endpoint state leaks into live software state + +## Phase 5 — Real PM Sequencing + +### Goal + +Replace software-only PM gating with meaningful quiesce/resume semantics. + +### Work + +- define richer PM transition states +- quiesce before suspend +- tie resume to controller/device validity +- define PM interaction with detach +- define PM interaction with configure +- add bounded PM proof cases + +### Key Targets + +- `xhci/scheme.rs::suspend_device` +- `xhci/scheme.rs::resume_device` +- `xhci/scheme.rs::ensure_port_active` +- supporting helpers in `xhci/mod.rs` + +### Exit Criteria + +- suspend blocks new I/O only after quiesce starts +- resume only returns success from a genuinely usable state +- PM/detach/configure interleavings are deterministic + +## Phase 6 — Enumerator Cleanup and Timing Hardening + +### Goal + +Remove panic-style and magic-delay behavior from the enumerator path. + +### Work + +- remove panic-class assumptions from `DeviceEnumerator::run` +- replace fixed sleeps with bounded readiness checks +- make duplicate/out-of-order event handling explicit +- align enumerator decisions with the new attach/detach state machine +- improve logging for reset/attach/detach milestones + +### Key Targets + +- `xhci/device_enumerator.rs` +- supporting interactions in `xhci/mod.rs` + +### Exit Criteria + +- no ordinary event path panics +- no unnecessary fixed sleep remains +- rapid event-order tests pass in QEMU + +## Phase 7 — Final Validation, Docs, and Preservation + +### Goal + +Close the loop with evidence, canonical docs, and durable patch carriers. + +### Work + +- rerun the full bounded proof matrix on a rebuilt image +- run source-level verification (`lsp_diagnostics`, `cargo check`, `cargo test`) +- update canonical docs: + - `local/docs/USB-IMPLEMENTATION-PLAN.md` + - `local/docs/USB-VALIDATION-RUNBOOK.md` +- refresh durable patch carriers under `local/patches/base/` +- delete only clearly stale, superseded docs after link sweep + +### Exit Criteria + +- all bounded USB/xHCI proofs pass on a fresh image +- changed files are diagnostics-clean +- canonical docs match actual proof scope +- patch carrier is refreshed and reapplicable + +## Validation Matrix + +Required final proofs: + +- `bash ./local/scripts/test-xhci-device-lifecycle-qemu.sh --check ` +- `bash ./local/scripts/test-usb-qemu.sh --check ` +- `bash ./local/scripts/test-xhci-irq-qemu.sh --check` +- `bash ./local/scripts/test-usb-maturity-qemu.sh ` + +Required source checks: + +- `lsp_diagnostics` on all changed files +- `cargo check` / `cargo test` for `xhcid` +- `cargo check` for any touched class daemon or helper crate + +## Commit Strategy + +1. proof/harness expansion +2. atomic attach publication +3. bounded detach and purge +4. configure rollback hardening +5. PM sequencing +6. enumerator cleanup +7. docs, patch preservation, stale-doc cleanup + +## Canonical Doc Authority + +Authoritative docs after cleanup: + +- `local/docs/USB-IMPLEMENTATION-PLAN.md` +- `local/docs/USB-VALIDATION-RUNBOOK.md` + +This xhcid plan is a focused implementation document beneath those subsystem-level authorities. + +## Completion Standard + +This work is complete only when: + +- all seven phases are done in order +- no changed-file diagnostics remain +- `xhcid` builds/tests cleanly +- bounded QEMU proof matrix passes on a rebuilt image +- canonical docs are synchronized +- durable patch carrier is refreshed +- remaining gaps, if any, are explicitly documented as future or hardware-only work diff --git a/local/patches/base/P1-xhcid-device-lifecycle.patch b/local/patches/base/P1-xhcid-device-lifecycle.patch new file mode 100644 index 00000000..aba95b91 --- /dev/null +++ b/local/patches/base/P1-xhcid-device-lifecycle.patch @@ -0,0 +1,2351 @@ +diff --git a/drivers/storage/usbscsid/src/main.rs b/drivers/storage/usbscsid/src/main.rs +index 5382d118..4130a5df 100644 +--- a/drivers/storage/usbscsid/src/main.rs ++++ b/drivers/storage/usbscsid/src/main.rs +@@ -1,20 +1,32 @@ + use std::collections::BTreeMap; + use std::env; ++use std::io::Write; + + use driver_block::{Disk, DiskScheme, ExecutorTrait}; +-use syscall::{Error, EIO}; +-use xhcid_interface::{ConfigureEndpointsReq, PortId, XhciClientHandle}; ++use syscall::{Error, EBADF, EBUSY, EIO}; ++use xhcid_interface::{ConfigureEndpointsReq, PortId, XhciClientHandle, XhciClientHandleError}; + + pub mod protocol; ++pub mod quirks; + pub mod scsi; + + use crate::protocol::Protocol; + use crate::scsi::Scsi; + ++fn is_startup_detach_error(err: &XhciClientHandleError) -> bool { ++ match err { ++ XhciClientHandleError::IoError(io_err) => matches!( ++ io_err.raw_os_error(), ++ Some(code) if code == EIO || code == EBADF || code == EBUSY ++ ), ++ _ => false, ++ } ++} ++ + fn main() { +- daemon::Daemon::new(daemon); ++ daemon(); + } +-fn daemon(daemon: daemon::Daemon) -> ! { ++fn daemon() -> ! { + let mut args = env::args().skip(1); + + const USAGE: &'static str = "usbscsid "; +@@ -67,17 +79,56 @@ fn daemon(daemon: daemon::Daemon) -> ! { + }) + .expect("Failed to find suitable configuration"); + +- handle +- .configure_endpoints(&ConfigureEndpointsReq { +- config_desc: configuration_value, +- interface_desc: Some(interface_num), +- alternate_setting: Some(alternate_setting), +- hub_ports: None, +- }) +- .expect("Failed to configure endpoints"); ++ println!( ++ "usbscsid: selected config {} iface {} alt {} endpoints {:?}", ++ configuration_value, ++ interface_num, ++ alternate_setting, ++ if_desc ++ .endpoints ++ .iter() ++ .map(|ep| ep.address) ++ .collect::>() ++ ); ++ let _ = std::io::stdout().flush(); ++ ++ if let Err(err) = handle.configure_endpoints(&ConfigureEndpointsReq { ++ config_desc: configuration_value, ++ interface_desc: None, ++ alternate_setting: None, ++ hub_ports: None, ++ hub_think_time: None, ++ }) { ++ if is_startup_detach_error(&err) { ++ eprintln!( ++ "usbscsid: device disappeared during endpoint configuration on port {}: {}", ++ port, err ++ ); ++ std::process::exit(0); ++ } + +- let mut protocol = protocol::setup(&handle, protocol, &desc, &conf_desc, &if_desc) ++ eprintln!( ++ "usbscsid: failed to configure endpoints on port {}: {}", ++ port, err ++ ); ++ std::process::exit(1); ++ } ++ ++ let storage_quirks = quirks::lookup_usb_storage_quirks(desc.vendor, desc.product); ++ ++ println!("usbscsid: setting up transport"); ++ let _ = std::io::stdout().flush(); ++ let mut protocol = protocol::setup( ++ &handle, ++ protocol, ++ &desc, ++ &conf_desc, ++ &if_desc, ++ storage_quirks, ++ ) + .expect("Failed to setup protocol"); ++ println!("usbscsid: transport ready"); ++ let _ = std::io::stdout().flush(); + + // TODO: Let all of the USB drivers fork or be managed externally, and xhcid won't have to keep + // track of all the drivers. +@@ -108,9 +159,6 @@ fn daemon(daemon: daemon::Daemon) -> ! { + &driver_block::FuturesExecutor, + ); + +- // FIXME should this wait notifying readiness until the disk scheme is created? +- daemon.ready(); +- + //libredox::call::setrens(0, 0).expect("nvmed: failed to enter null namespace"); + + event_queue +diff --git a/drivers/usb/xhcid/src/xhci/mod.rs b/drivers/usb/xhcid/src/xhci/mod.rs +index f2143676..4d3bea40 100644 +--- a/drivers/usb/xhcid/src/xhci/mod.rs ++++ b/drivers/usb/xhcid/src/xhci/mod.rs +@@ -13,10 +13,10 @@ use std::collections::BTreeMap; + use std::convert::TryFrom; + use std::fs::File; + use std::sync::atomic::AtomicUsize; +-use std::sync::{Arc, Mutex}; ++use std::sync::{Arc, Condvar, Mutex}; + + use std::{mem, process, slice, thread}; +-use syscall::error::{Error, Result, EBADF, EBADMSG, EIO, ENOENT}; ++use syscall::error::{Error, Result, EBADF, EBADMSG, EBUSY, EIO, ENOENT}; + use syscall::{EAGAIN, PAGE_SIZE}; + + use chashmap::CHashMap; +@@ -150,7 +150,7 @@ impl Xhci { + trace!("Handling the transfer event TRB!"); + self::scheme::handle_transfer_event_trb("GET_DESC", &event_trb, &status_trb)?; + +- //self.event_handler_finished(); ++ self.event_handler_finished(); + Ok(()) + } + +@@ -311,6 +311,144 @@ struct PortState { + input_context: Mutex>>, + dev_desc: Option, + endpoint_states: BTreeMap, ++ quirks: crate::usb_quirks::UsbQuirkFlags, ++ pm_state: PortPmState, ++ lifecycle: Arc, ++} ++ ++#[derive(Clone, Copy, Debug, Eq, PartialEq)] ++pub(crate) enum PortLifecycleState { ++ Attaching, ++ Attached, ++ Detaching, ++} ++ ++struct PortLifecycleInner { ++ state: PortLifecycleState, ++ active_operations: usize, ++} ++ ++pub(crate) struct PortLifecycle { ++ inner: Mutex, ++ idle: Condvar, ++} ++ ++impl PortLifecycle { ++ pub(crate) fn new_attaching() -> Self { ++ Self { ++ inner: Mutex::new(PortLifecycleInner { ++ state: PortLifecycleState::Attaching, ++ active_operations: 1, ++ }), ++ idle: Condvar::new(), ++ } ++ } ++ ++ fn lock_inner(&self) -> std::sync::MutexGuard<'_, PortLifecycleInner> { ++ self.inner.lock().unwrap_or_else(|err| err.into_inner()) ++ } ++ ++ pub(crate) fn state(&self) -> PortLifecycleState { ++ self.lock_inner().state ++ } ++ ++ pub(crate) fn begin_operation(&self, allow_attaching: bool) -> Result<()> { ++ let mut inner = self.lock_inner(); ++ ++ let allowed = match inner.state { ++ PortLifecycleState::Attached => true, ++ PortLifecycleState::Attaching => allow_attaching, ++ PortLifecycleState::Detaching => false, ++ }; ++ ++ if !allowed { ++ return Err(Error::new(EBUSY)); ++ } ++ ++ inner.active_operations += 1; ++ Ok(()) ++ } ++ ++ pub(crate) fn finish_operation(&self) { ++ let mut inner = self.lock_inner(); ++ ++ if inner.active_operations == 0 { ++ return; ++ } ++ ++ inner.active_operations -= 1; ++ if inner.active_operations == 0 { ++ self.idle.notify_all(); ++ } ++ } ++ ++ pub(crate) fn finish_attach_success(&self) -> PortLifecycleState { ++ let mut inner = self.lock_inner(); ++ ++ if inner.state == PortLifecycleState::Attaching { ++ inner.state = PortLifecycleState::Attached; ++ } ++ ++ if inner.active_operations != 0 { ++ inner.active_operations -= 1; ++ } ++ if inner.active_operations == 0 { ++ self.idle.notify_all(); ++ } ++ ++ inner.state ++ } ++ ++ pub(crate) fn finish_attach_failure(&self) { ++ let mut inner = self.lock_inner(); ++ inner.state = PortLifecycleState::Detaching; ++ ++ if inner.active_operations != 0 { ++ inner.active_operations -= 1; ++ } ++ if inner.active_operations == 0 { ++ self.idle.notify_all(); ++ } ++ } ++ ++ pub(crate) fn begin_detaching(&self) { ++ let mut inner = self.lock_inner(); ++ inner.state = PortLifecycleState::Detaching; ++ ++ while inner.active_operations != 0 { ++ inner = self.idle.wait(inner).unwrap_or_else(|err| err.into_inner()); ++ } ++ } ++} ++ ++pub(crate) struct PortOperationGuard { ++ lifecycle: Arc, ++} ++ ++impl PortOperationGuard { ++ pub(crate) fn new(lifecycle: Arc) -> Self { ++ Self { lifecycle } ++ } ++} ++ ++impl Drop for PortOperationGuard { ++ fn drop(&mut self) { ++ self.lifecycle.finish_operation(); ++ } ++} ++ ++#[derive(Clone, Copy, Debug, Eq, PartialEq)] ++pub(crate) enum PortPmState { ++ Active, ++ Suspended, ++} ++impl PortPmState { ++ pub fn as_str(&self) -> &'static str { ++ match self { ++ Self::Active => "active", ++ Self::Suspended => "suspended", ++ } ++ } + } + + impl PortState { +@@ -615,29 +753,24 @@ impl Xhci { + route_string: 0, + }; + +- //Get the CCS and CSC flags +- let (ccs, csc, flags) = { ++ // Only queue ports that are actually connected at startup. A stale CSC bit on an ++ // otherwise disconnected port should not trigger a full attach attempt. ++ let (ccs, flags) = { + let mut ports = self.ports.lock().unwrap(); + let port = &mut ports[port_id.root_hub_port_index()]; + let flags = port.flags(); + let ccs = flags.contains(PortFlags::CCS); +- let csc = flags.contains(PortFlags::CSC); + +- (ccs, csc, flags) ++ (ccs, flags) + }; + + debug!("Port {} has flags {:?}", port_id, flags); + +- match (ccs, csc) { +- (false, false) => { // Nothing is connected, and there was no port status change +- //Do nothing +- } +- _ => { +- //Either something is connected, or nothing is connected and a port status change was asserted. +- self.device_enumerator_sender +- .send(DeviceEnumerationRequest { port_id }) +- .expect("Failed to generate the port enumeration request!"); +- } ++ if ccs { ++ info!("xhcid: queueing initial enumeration for port {} with flags {:?}", port_id, flags); ++ self.device_enumerator_sender ++ .send(DeviceEnumerationRequest { port_id }) ++ .expect("Failed to generate the port enumeration request!"); + } + } + } +@@ -757,7 +890,7 @@ impl Xhci { + + trace!("Slot is enabled!"); + self::scheme::handle_event_trb("ENABLE_SLOT", &event_trb, &command_trb)?; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + Ok(event_trb.event_slot()) + } +@@ -768,7 +901,7 @@ impl Xhci { + .await; + + self::scheme::handle_event_trb("DISABLE_SLOT", &event_trb, &command_trb)?; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + Ok(()) + } +@@ -798,6 +931,8 @@ impl Xhci { + return Err(syscall::Error::new(EAGAIN)); + } + ++ info!("xhcid: begin attach for port {}", port_id); ++ + let (data, state, speed, flags) = { + let port = &self.ports.lock().unwrap()[port_id.root_hub_port_index()]; + (port.read(), port.state(), port.speed(), port.flags()) +@@ -808,74 +943,111 @@ impl Xhci { + port_id, data, state, speed, flags + ); + +- if flags.contains(port::PortFlags::CCS) { +- let slot_ty = match self.supported_protocol(port_id) { +- Some(protocol) => protocol.proto_slot_ty(), +- None => { +- warn!("Failed to find supported protocol information for port"); +- 0 +- } +- }; +- +- debug!("Slot type: {}", slot_ty); +- debug!("Enabling slot."); +- let slot = match self.enable_port_slot(slot_ty).await { +- Ok(ok) => ok, +- Err(err) => { +- error!("Failed to enable slot for port {}: {}", port_id, err); +- return Err(err); +- } +- }; ++ if !flags.contains(port::PortFlags::CCS) { ++ warn!("Attempted to attach a device that didnt have CCS=1"); ++ return Ok(()); ++ } + +- debug!("Enabled port {}, which the xHC mapped to {}", port_id, slot); ++ let early_quirks = crate::usb_quirks::lookup_usb_quirks_early(port_id); ++ let slot_ty = match self.supported_protocol(port_id) { ++ Some(protocol) => protocol.proto_slot_ty(), ++ None => { ++ warn!("Failed to find supported protocol information for port {}", port_id); ++ 0 ++ } ++ }; + +- //TODO: get correct speed for child devices +- let protocol_speed = self +- .lookup_psiv(port_id, speed) +- .expect("Failed to retrieve speed ID"); ++ debug!("Slot type: {}", slot_ty); ++ debug!("Enabling slot."); ++ let slot = match self.enable_port_slot(slot_ty).await { ++ Ok(ok) => ok, ++ Err(err) => { ++ error!("Failed to enable slot for port {}: {}", port_id, err); ++ return Err(err); ++ } ++ }; + +- let mut input = unsafe { self.alloc_dma_zeroed::>()? }; ++ debug!("Enabled port {}, which the xHC mapped to {}", port_id, slot); ++ info!("xhcid: enabled slot {} for port {}", slot, port_id); + +- debug!("Attempting to address the device"); +- let mut ring = match self +- .address_device(&mut input, port_id, slot_ty, slot, protocol_speed, speed) +- .await +- { +- Ok(device_ring) => device_ring, +- Err(err) => { +- error!("Failed to address device for port {}: `{}`", port_id, err); +- return Err(err); ++ let protocol_speed = match self.lookup_psiv(port_id, speed) { ++ Some(protocol_speed) => protocol_speed, ++ None => { ++ let err = Error::new(EIO); ++ error!("Failed to retrieve speed ID for port {}", port_id); ++ if let Err(disable_err) = self.disable_port_slot(slot).await { ++ warn!( ++ "Failed to disable slot {} after speed lookup failure on port {}: {}", ++ slot, port_id, disable_err ++ ); + } +- }; +- +- debug!("Addressed device"); ++ return Err(err); ++ } ++ }; + +- // TODO: Should the descriptors be cached in PortState, or refetched? ++ let mut input = unsafe { self.alloc_dma_zeroed::>()? }; + +- let mut port_state = PortState { ++ debug!("Attempting to address the device"); ++ let ring = match self ++ .address_device( ++ &mut input, ++ port_id, ++ slot_ty, + slot, + protocol_speed, +- input_context: Mutex::new(input), +- dev_desc: None, +- cfg_idx: None, +- endpoint_states: std::iter::once(( +- 0, +- EndpointState { +- transfer: RingOrStreams::Ring(ring), +- driver_if_state: EndpIfState::Init, +- }, +- )) +- .collect::>(), +- }; +- self.port_states.insert(port_id, port_state); +- debug!("Got port states!"); ++ speed, ++ early_quirks, ++ ) ++ .await ++ { ++ Ok(device_ring) => device_ring, ++ Err(err) => { ++ error!("Failed to address device for port {}: `{}`", port_id, err); ++ if let Err(disable_err) = self.disable_port_slot(slot).await { ++ warn!( ++ "Failed to disable slot {} after address failure on port {}: {}", ++ slot, port_id, disable_err ++ ); ++ } ++ return Err(err); ++ } ++ }; ++ ++ debug!("Addressed device"); ++ info!("xhcid: addressed device on port {} slot {}", port_id, slot); ++ ++ let lifecycle = Arc::new(PortLifecycle::new_attaching()); ++ let port_state = PortState { ++ slot, ++ protocol_speed, ++ input_context: Mutex::new(input), ++ dev_desc: None, ++ cfg_idx: None, ++ endpoint_states: std::iter::once(( ++ 0, ++ EndpointState { ++ transfer: RingOrStreams::Ring(ring), ++ driver_if_state: EndpIfState::Init, ++ }, ++ )) ++ .collect::>(), ++ quirks: early_quirks, ++ pm_state: PortPmState::Active, ++ lifecycle: Arc::clone(&lifecycle), ++ }; ++ self.port_states.insert(port_id, port_state); ++ debug!("Got port states!"); + +- // Ensure correct packet size is used ++ let attach_result = async { + let dev_desc_8_byte = self.fetch_dev_desc_8_byte(port_id, slot).await?; ++ info!("xhcid: fetched 8-byte device descriptor for port {}", port_id); + { +- let mut port_state = self.port_states.get_mut(&port_id).unwrap(); ++ let mut port_state = self.port_states.get_mut(&port_id).ok_or(Error::new(ENOENT))?; + +- let mut input = port_state.input_context.lock().unwrap(); ++ let mut input = port_state ++ .input_context ++ .lock() ++ .unwrap_or_else(|err| err.into_inner()); + + self.update_max_packet_size(&mut *input, slot, dev_desc_8_byte) + .await?; +@@ -884,37 +1056,87 @@ impl Xhci { + debug!("Got the 8 byte dev descriptor: {:X?}", dev_desc_8_byte); + + let dev_desc = self.get_desc(port_id, slot).await?; ++ info!( ++ "xhcid: got descriptors for port {} vendor {:04x} product {:04x}", ++ port_id, ++ dev_desc.vendor, ++ dev_desc.product ++ ); ++ let quirks = early_quirks ++ | crate::usb_quirks::lookup_usb_quirks(dev_desc.vendor, dev_desc.product); + debug!("Got the full device descriptor!"); +- self.port_states.get_mut(&port_id).unwrap().dev_desc = Some(dev_desc); ++ { ++ let mut port_state = self.port_states.get_mut(&port_id).ok_or(Error::new(ENOENT))?; ++ port_state.quirks = quirks; ++ port_state.dev_desc = Some(dev_desc); ++ } + + debug!("Got the port states again!"); + { +- let mut port_state = self.port_states.get_mut(&port_id).unwrap(); ++ let mut port_state = self.port_states.get_mut(&port_id).ok_or(Error::new(ENOENT))?; + +- let mut input = port_state.input_context.lock().unwrap(); ++ let mut input = port_state ++ .input_context ++ .lock() ++ .unwrap_or_else(|err| err.into_inner()); + debug!("Got the input context!"); +- let dev_desc = port_state.dev_desc.as_ref().unwrap(); ++ let dev_desc = port_state.dev_desc.as_ref().ok_or(Error::new(EIO))?; + + self.update_default_control_pipe(&mut *input, slot, dev_desc) + .await?; + } + + debug!("Updated the default control pipe"); ++ Ok(()) ++ } ++ .await; ++ ++ match attach_result { ++ Ok(()) => { ++ if lifecycle.finish_attach_success() != PortLifecycleState::Attached { ++ warn!( ++ "attach for port {} completed after detach already started; skipping publication", ++ port_id ++ ); ++ return Err(Error::new(EBUSY)); ++ } + +- match self.spawn_drivers(port_id) { +- Ok(()) => (), +- Err(err) => { +- error!("Failed to spawn driver for port {}: `{}`", port_id, err) ++ match self.spawn_drivers(port_id) { ++ Ok(()) => (), ++ Err(err) => { ++ error!("Failed to spawn driver for port {}: `{}`", port_id, err) ++ } + } ++ info!("xhcid: finished attach for port {}", port_id); ++ Ok(()) ++ } ++ Err(err) => { ++ lifecycle.finish_attach_failure(); ++ if let Err(detach_err) = self.detach_device(port_id).await { ++ warn!( ++ "failed to clean up attach failure on port {}: {}", ++ port_id, detach_err ++ ); ++ } ++ Err(err) + } +- } else { +- warn!("Attempted to attach a device that didnt have CCS=1"); + } +- +- Ok(()) + } + + pub async fn detach_device(&self, port_id: PortId) -> Result { ++ let (slot, lifecycle) = match self.port_states.get(&port_id) { ++ Some(state) => (state.slot, Arc::clone(&state.lifecycle)), ++ None => { ++ debug!( ++ "Attempted to detach from port {}, which wasn't previously attached.", ++ port_id ++ ); ++ return Ok(false); ++ } ++ }; ++ ++ lifecycle.begin_detaching(); ++ + if let Some(children) = self.drivers.remove(&port_id) { + for mut child in children { + info!("killing driver process {} for port {}", child.id(), port_id); +@@ -962,20 +1184,20 @@ impl Xhci { + } + } + +- if let Some(state) = self.port_states.remove(&port_id) { +- debug!("disabling port slot {} for port {}", state.slot, port_id); +- let result = self.disable_port_slot(state.slot).await.and(Ok(true)); +- debug!( +- "disabled port slot {} for port {} with result: {:?}", +- state.slot, port_id, result +- ); +- result +- } else { +- debug!( +- "Attempted to detach from port {}, which wasn't previously attached.", +- port_id +- ); +- Ok(false) ++ debug!("disabling port slot {} for port {}", slot, port_id); ++ match self.disable_port_slot(slot).await { ++ Ok(()) => { ++ let _ = self.port_states.remove(&port_id); ++ debug!("disabled port slot {} for port {}", slot, port_id); ++ Ok(true) ++ } ++ Err(err) => { ++ warn!( ++ "failed to disable port slot {} for port {}: {}", ++ slot, port_id, err ++ ); ++ Err(err) ++ } + } + } + +@@ -1004,7 +1226,7 @@ impl Xhci { + .await; + + self::scheme::handle_event_trb("EVALUATE_CONTEXT", &event_trb, &command_trb)?; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + Ok(()) + } +@@ -1039,7 +1261,7 @@ impl Xhci { + debug!("Completed the command to update the default control pipe"); + + self::scheme::handle_event_trb("EVALUATE_CONTEXT", &event_trb, &command_trb)?; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + Ok(()) + } +@@ -1052,6 +1274,7 @@ impl Xhci { + slot: u8, + protocol_speed: &ProtocolSpeed, + speed: u8, ++ quirks: crate::usb_quirks::UsbQuirkFlags, + ) -> Result { + // Collect MTT, parent port number, parent slot ID + let mut mtt = false; +@@ -1162,11 +1385,16 @@ impl Xhci { + + let input_context_physical = input_context.physical(); + +- let (event_trb, _) = self +- .execute_command(|trb, cycle| { +- trb.address_device(slot, input_context_physical, false, cycle) +- }) +- .await; ++ let address_timeout = if quirks.contains(crate::usb_quirks::UsbQuirkFlags::SHORT_SET_ADDR_TIMEOUT) ++ { ++ Timeout::from_millis(100) ++ } else { ++ Timeout::from_secs(1) ++ }; ++ ++ let (event_trb, _) = self.execute_command_with_timeout(address_timeout, |trb, cycle| { ++ trb.address_device(slot, input_context_physical, false, cycle) ++ })?; + + if event_trb.completion_code() != TrbCompletionCode::Success as u8 { + error!( +@@ -1175,10 +1403,10 @@ impl Xhci { + port, + event_trb.completion_code() + ); +- //self.event_handler_finished(); ++ self.event_handler_finished(); + return Err(Error::new(EIO)); + } +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + Ok(ring) + } +@@ -1281,6 +1509,12 @@ impl Xhci { + ifdesc.sub_class, + ifdesc.protocol, + ); ++ match driver.name.as_str() { ++ "USB HID" => info!("USB HID driver spawned"), ++ "SCSI over USB" => info!("USB SCSI driver spawned"), ++ "USB HUB" => info!("USB HUB driver spawned"), ++ _ => {} ++ } + let (command, args) = driver.command.split_first().ok_or(Error::new(EBADMSG))?; + + let command = if command.starts_with('/') { +diff --git a/drivers/usb/xhcid/src/xhci/scheme.rs b/drivers/usb/xhcid/src/xhci/scheme.rs +index f2d439a4..53770407 100644 +--- a/drivers/usb/xhcid/src/xhci/scheme.rs ++++ b/drivers/usb/xhcid/src/xhci/scheme.rs +@@ -18,12 +18,15 @@ + //! port/endpoints//data + use std::convert::TryFrom; + use std::io::prelude::*; ++use std::io::Write; + use std::ops::Deref; ++use std::sync::Arc; + use std::sync::atomic; + use std::{cmp, fmt, io, mem, str}; + + use common::dma::Dma; + use futures::executor::block_on; ++use futures::FutureExt; + use log::{debug, error, info, trace, warn}; + use redox_scheme::scheme::SchemeSync; + use smallvec::SmallVec; +@@ -32,16 +35,16 @@ use common::io::Io; + use redox_scheme::{CallerCtx, OpenResult}; + use syscall::schemev2::NewFdFlags; + use syscall::{ +- Error, Result, Stat, EACCES, EBADF, EBADFD, EBADMSG, EINVAL, EIO, EISDIR, ENOENT, ENOSYS, +- ENOTDIR, EOPNOTSUPP, EPROTO, ESPIPE, MODE_CHR, MODE_DIR, MODE_FILE, O_DIRECTORY, O_RDWR, +- O_STAT, O_WRONLY, SEEK_CUR, SEEK_END, SEEK_SET, ++ Error, Result, Stat, EACCES, EBADF, EBADFD, EBADMSG, EBUSY, EINVAL, EIO, EISDIR, ENOENT, ++ ENOSYS, ENOTDIR, EOPNOTSUPP, EPROTO, ESPIPE, MODE_CHR, MODE_DIR, MODE_FILE, O_DIRECTORY, ++ O_RDWR, O_STAT, O_WRONLY, SEEK_CUR, SEEK_END, SEEK_SET, + }; + + use super::{port, usb}; + use super::{EndpointState, PortId, Xhci}; + + use super::context::{ +- SlotState, StreamContextArray, StreamContextType, CONTEXT_32, CONTEXT_64, ++ EndpointContext, SlotState, StreamContextArray, StreamContextType, CONTEXT_32, CONTEXT_64, + SLOT_CONTEXT_STATE_MASK, SLOT_CONTEXT_STATE_SHIFT, + }; + use super::extended::ProtocolSpeed; +@@ -60,10 +63,16 @@ lazy_static! { + .expect("Failed to create the regex for the port/attach scheme."); + static ref REGEX_PORT_DETACH: Regex = Regex::new(r"^port([\d\.]+)/detach$") + .expect("Failed to create the regex for the port/detach scheme."); ++ static ref REGEX_PORT_SUSPEND: Regex = Regex::new(r"^port([\d\.]+)/suspend$") ++ .expect("Failed to create the regex for the port/suspend scheme."); ++ static ref REGEX_PORT_RESUME: Regex = Regex::new(r"^port([\d\.]+)/resume$") ++ .expect("Failed to create the regex for the port/resume scheme."); + static ref REGEX_PORT_DESCRIPTORS: Regex = Regex::new(r"^port([\d\.]+)/descriptors$") + .expect("Failed to create the regex for the port/descriptors"); + static ref REGEX_PORT_STATE: Regex = Regex::new(r"^port([\d\.]+)/state$") + .expect("Failed to create the regex for the port/state scheme"); ++ static ref REGEX_PORT_PM_STATE: Regex = Regex::new(r"^port([\d\.]+)/pm_state$") ++ .expect("Failed to create the regex for the port/pm_state scheme"); + static ref REGEX_PORT_REQUEST: Regex = Regex::new(r"^port([\d\.]+)/request$") + .expect("Failed to create the regex for the port/request scheme"); + static ref REGEX_PORT_ENDPOINTS: Regex = Regex::new(r"^port([\d\.]+)/endpoints$") +@@ -137,12 +146,15 @@ pub enum Handle { + Port(PortId, Vec), // port, contents + PortDesc(PortId, Vec), // port, contents + PortState(PortId), // port ++ PortPmState(PortId), // port + PortReq(PortId, PortReqState), // port, state + Endpoints(PortId, Vec), // port, contents + Endpoint(PortId, u8, EndpointHandleTy), // port, endpoint, state + ConfigureEndpoints(PortId), // port + AttachDevice(PortId), // port + DetachDevice(PortId), // port ++ SuspendDevice(PortId), // port ++ ResumeDevice(PortId), // port + SchemeRoot, + } + +@@ -172,6 +184,8 @@ enum SchemeParameters { + PortDesc(PortId), // port number + /// /port/state + PortState(PortId), // port number ++ /// /port/pm_state ++ PortPmState(PortId), // port number + /// /port/request + PortReq(PortId), // port number + /// /port/endpoints +@@ -187,6 +201,10 @@ enum SchemeParameters { + AttachDevice(PortId), // port number + /// /port/detach + DetachDevice(PortId), // port number ++ /// /port/suspend ++ SuspendDevice(PortId), // port number ++ /// /port/resume ++ ResumeDevice(PortId), // port number + } + + impl Handle { +@@ -209,6 +227,9 @@ impl Handle { + Handle::PortState(port_num) => { + format!("port{}/state", port_num) + } ++ Handle::PortPmState(port_num) => { ++ format!("port{}/pm_state", port_num) ++ } + Handle::PortReq(port_num, _) => { + format!("port{}/request", port_num) + } +@@ -235,6 +256,12 @@ impl Handle { + Handle::DetachDevice(port_num) => { + format!("port{}/detach", port_num) + } ++ Handle::SuspendDevice(port_num) => { ++ format!("port{}/suspend", port_num) ++ } ++ Handle::ResumeDevice(port_num) => { ++ format!("port{}/resume", port_num) ++ } + Handle::SchemeRoot => String::from(""), + } + } +@@ -258,10 +285,13 @@ impl Handle { + &Handle::PortReq(_, PortReqState::Tmp) => unreachable!(), + &Handle::PortReq(_, PortReqState::TmpSetup(_)) => unreachable!(), + &Handle::PortState(_) => HandleType::Character, ++ &Handle::PortPmState(_) => HandleType::Character, + &Handle::PortReq(_, _) => HandleType::Character, + &Handle::ConfigureEndpoints(_) => HandleType::Character, + &Handle::AttachDevice(_) => HandleType::Character, + &Handle::DetachDevice(_) => HandleType::Character, ++ &Handle::SuspendDevice(_) => HandleType::Character, ++ &Handle::ResumeDevice(_) => HandleType::Character, + &Handle::Endpoint(_, _, ref st) => match st { + EndpointHandleTy::Data => HandleType::Character, + EndpointHandleTy::Ctl => HandleType::Character, +@@ -289,10 +319,13 @@ impl Handle { + &Handle::PortReq(_, PortReqState::Tmp) => None, + &Handle::PortReq(_, PortReqState::TmpSetup(_)) => None, + &Handle::PortState(_) => None, ++ &Handle::PortPmState(_) => None, + &Handle::PortReq(_, _) => None, + &Handle::ConfigureEndpoints(_) => None, + &Handle::AttachDevice(_) => None, + &Handle::DetachDevice(_) => None, ++ &Handle::SuspendDevice(_) => None, ++ &Handle::ResumeDevice(_) => None, + &Handle::Endpoint(_, _, ref st) => match st { + EndpointHandleTy::Data => None, + EndpointHandleTy::Ctl => None, +@@ -383,6 +416,14 @@ impl SchemeParameters { + let port_num = get_port_id_from_regex(®EX_PORT_DETACH, scheme, 0)?; + + Ok(Self::DetachDevice(port_num)) ++ } else if REGEX_PORT_SUSPEND.is_match(scheme) { ++ let port_num = get_port_id_from_regex(®EX_PORT_SUSPEND, scheme, 0)?; ++ ++ Ok(Self::SuspendDevice(port_num)) ++ } else if REGEX_PORT_RESUME.is_match(scheme) { ++ let port_num = get_port_id_from_regex(®EX_PORT_RESUME, scheme, 0)?; ++ ++ Ok(Self::ResumeDevice(port_num)) + } else if REGEX_PORT_DESCRIPTORS.is_match(scheme) { + let port_num = get_port_id_from_regex(®EX_PORT_DESCRIPTORS, scheme, 0)?; + +@@ -391,6 +432,10 @@ impl SchemeParameters { + let port_num = get_port_id_from_regex(®EX_PORT_STATE, scheme, 0)?; + + Ok(Self::PortState(port_num)) ++ } else if REGEX_PORT_PM_STATE.is_match(scheme) { ++ let port_num = get_port_id_from_regex(®EX_PORT_PM_STATE, scheme, 0)?; ++ ++ Ok(Self::PortPmState(port_num)) + } else if REGEX_PORT_REQUEST.is_match(scheme) { + let port_num = get_port_id_from_regex(®EX_PORT_REQUEST, scheme, 0)?; + +@@ -556,6 +601,47 @@ impl AnyDescriptor { + } + + impl Xhci { ++ fn begin_port_operation( ++ &self, ++ port: PortId, ++ allow_attaching: bool, ++ require_active_pm: bool, ++ ) -> Result { ++ let lifecycle = { ++ let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; ++ Arc::clone(&port_state.lifecycle) ++ }; ++ ++ lifecycle.begin_operation(allow_attaching)?; ++ let guard = super::PortOperationGuard::new(lifecycle); ++ ++ if require_active_pm { ++ let pm_state = self ++ .port_states ++ .get(&port) ++ .ok_or(Error::new(EBADFD))? ++ .pm_state; ++ if pm_state != super::PortPmState::Active { ++ drop(guard); ++ return Err(Error::new(EBUSY)); ++ } ++ } ++ ++ Ok(guard) ++ } ++ ++ fn begin_transfer_operation(&self, port: PortId) -> Result { ++ self.begin_port_operation(port, true, true) ++ } ++ ++ fn begin_routable_operation(&self, port: PortId) -> Result { ++ self.begin_port_operation(port, false, true) ++ } ++ ++ fn begin_attached_operation(&self, port: PortId) -> Result { ++ self.begin_port_operation(port, false, false) ++ } ++ + async fn new_if_desc( + &self, + port_id: PortId, +@@ -564,15 +650,22 @@ impl Xhci { + endps: impl IntoIterator, + hid_descs: impl IntoIterator, + lang_id: u16, ++ quirks: crate::usb_quirks::UsbQuirkFlags, + ) -> Result { + Ok(IfDesc { + alternate_setting: desc.alternate_setting, + class: desc.class, + interface_str: if desc.interface_str > 0 { +- Some( ++ if quirks.contains(crate::usb_quirks::UsbQuirkFlags::BAD_DESCRIPTOR) { + self.fetch_string_desc(port_id, slot, desc.interface_str, lang_id) +- .await?, +- ) ++ .await ++ .ok() ++ } else { ++ Some( ++ self.fetch_string_desc(port_id, slot, desc.interface_str, lang_id) ++ .await?, ++ ) ++ } + } else { + None + }, +@@ -590,10 +683,9 @@ impl Xhci { + /// # Locking + /// This function will lock `Xhci::cmd` and `Xhci::dbs`. + pub async fn execute_command(&self, f: F) -> (Trb, Trb) { +- //TODO: find out why this bit is set earlier! + if self.interrupt_is_pending(0) { + debug!("The EHB bit is already set!"); +- //self.force_clear_interrupt(0); ++ self.force_clear_interrupt(0); + } + + let next_event = { +@@ -628,6 +720,54 @@ impl Xhci { + + (event_trb, command_trb) + } ++ pub fn execute_command_with_timeout( ++ &self, ++ timeout: common::timeout::Timeout, ++ f: F, ++ ) -> Result<(Trb, Trb)> { ++ if self.interrupt_is_pending(0) { ++ debug!("The EHB bit is already set!"); ++ self.force_clear_interrupt(0); ++ } ++ ++ let next_event = { ++ let mut command_ring = self.cmd.lock().unwrap(); ++ let (cmd_index, cycle) = (command_ring.next_index(), command_ring.cycle); ++ ++ debug!("Sending command with cycle bit {}", cycle as u8); ++ ++ { ++ let command_trb = &mut command_ring.trbs[cmd_index]; ++ f(command_trb, cycle); ++ } ++ ++ let command_trb = &command_ring.trbs[cmd_index]; ++ self.next_command_completion_event_trb( ++ &*command_ring, ++ command_trb, ++ EventDoorbell::new(self, 0, 0), ++ ) ++ }; ++ ++ let mut next_event = Box::pin(next_event); ++ ++ loop { ++ if let Some(trbs) = next_event.as_mut().now_or_never() { ++ let event_trb = trbs.event_trb; ++ let command_trb = trbs.src_trb.ok_or(Error::new(EIO))?; ++ ++ assert_eq!( ++ event_trb.trb_type(), ++ TrbType::CommandCompletion as u8, ++ "The IRQ reactor (or the xHC) gave an invalid event TRB" ++ ); ++ ++ return Ok((event_trb, command_trb)); ++ } ++ ++ timeout.run().map_err(|()| Error::new(EIO))?; ++ } ++ } + pub async fn execute_control_transfer( + &self, + port_num: PortId, +@@ -639,6 +779,9 @@ impl Xhci { + where + D: FnMut(&mut Trb, bool) -> ControlFlow, + { ++ let _op = self.begin_transfer_operation(port_num)?; ++ self.ensure_port_active(port_num)?; ++ + let future = { + let mut port_state = self.port_state_mut(port_num)?; + let slot = port_state.slot; +@@ -690,7 +833,21 @@ impl Xhci { + + handle_transfer_event_trb("CONTROL_TRANSFER", &event_trb, &status_trb)?; + +- //self.event_handler_finished(); ++ let delay_ctrl_msg = self ++ .port_states ++ .get(&port_num) ++ .map(|port_state| { ++ port_state ++ .quirks ++ .contains(crate::usb_quirks::UsbQuirkFlags::DELAY_CTRL_MSG) ++ }) ++ .unwrap_or(false); ++ ++ if delay_ctrl_msg { ++ std::thread::sleep(std::time::Duration::from_millis(20)); ++ } ++ ++ self.event_handler_finished(); + + Ok(event_trb) + } +@@ -709,6 +866,9 @@ impl Xhci { + where + D: FnMut(&mut Trb, bool) -> ControlFlow, + { ++ let _op = self.begin_transfer_operation(port_num)?; ++ self.ensure_port_active(port_num)?; ++ + let endp_idx = endp_num.checked_sub(1).ok_or(Error::new(EIO))?; + let mut port_state = self.port_state_mut(port_num)?; + +@@ -785,7 +945,31 @@ impl Xhci { + let event_trb = trbs.event_trb; + let transfer_trb = trbs.src_trb.ok_or(Error::new(EIO))?; + +- handle_transfer_event_trb("EXECUTE_TRANSFER", &event_trb, &transfer_trb)?; ++ if let Err(err) = handle_transfer_event_trb("EXECUTE_TRANSFER", &event_trb, &transfer_trb) ++ { ++ let need_reset = self ++ .port_states ++ .get(&port_num) ++ .map(|port_state| { ++ port_state ++ .quirks ++ .contains(crate::usb_quirks::UsbQuirkFlags::NEED_RESET) ++ }) ++ .unwrap_or(false); ++ ++ if need_reset { ++ if let Err(reset_err) = self.reset_device_slot(port_num).await { ++ error!( ++ "EXECUTE_TRANSFER reset recovery failed for port {}: {}", ++ port_num, reset_err ++ ); ++ } ++ } ++ ++ self.event_handler_finished(); ++ ++ return Err(err); ++ } + + // FIXME: EDTLA if event data was set + if event_trb.completion_code() != TrbCompletionCode::ShortPacket as u8 +@@ -798,6 +982,8 @@ impl Xhci { + // TODO: Handle event data + trace!("EVENT DATA: {:?}", event_trb.event_data()); + ++ self.event_handler_finished(); ++ + Ok(event_trb) + } + async fn device_req_no_data(&self, port: PortId, req: usb::Setup) -> Result<()> { +@@ -857,10 +1043,27 @@ impl Xhci { + trb.reset_endpoint(slot, endp_num_xhc, tsp, cycle); + }) + .await; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + handle_event_trb("RESET_ENDPOINT", &event_trb, &command_trb) + } ++ async fn reset_device_slot(&self, port_num: PortId) -> Result<()> { ++ let slot = self ++ .port_states ++ .get(&port_num) ++ .ok_or(Error::new(EBADF))? ++ .slot; ++ ++ let (event_trb, command_trb) = self ++ .execute_command(|trb, cycle| { ++ trb.reset_device(slot, cycle); ++ }) ++ .await; ++ ++ self.event_handler_finished(); ++ ++ handle_event_trb("RESET_DEVICE", &event_trb, &command_trb) ++ } + + fn endp_ctx_interval(speed_id: &ProtocolSpeed, endp_desc: &EndpDesc) -> u8 { + /// Logarithmic (base 2) 125 µs periods per millisecond. +@@ -949,35 +1152,65 @@ impl Xhci { + self.port_states.get_mut(&port).ok_or(Error::new(EBADF)) + } + ++ fn restore_configure_input_context( ++ &self, ++ port: PortId, ++ snapshot: ConfigureContextSnapshot, ++ endpoint_snapshots: &[(usize, EndpointContextSnapshot)], ++ ) -> Result { ++ let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; ++ let mut input_context = port_state ++ .input_context ++ .lock() ++ .unwrap_or_else(|err| err.into_inner()); ++ ++ input_context.add_context.write(snapshot.add_context); ++ input_context.drop_context.write(snapshot.drop_context); ++ input_context.control.write(snapshot.control); ++ input_context.device.slot.a.write(snapshot.slot_a); ++ input_context.device.slot.b.write(snapshot.slot_b); ++ input_context.device.slot.c.write(snapshot.slot_c); ++ ++ for (endp_i, endp_snapshot) in endpoint_snapshots { ++ input_context.device.endpoints[*endp_i].a.write(endp_snapshot.a); ++ input_context.device.endpoints[*endp_i].b.write(endp_snapshot.b); ++ input_context.device.endpoints[*endp_i].trl.write(endp_snapshot.trl); ++ input_context.device.endpoints[*endp_i].trh.write(endp_snapshot.trh); ++ input_context.device.endpoints[*endp_i].c.write(endp_snapshot.c); ++ } ++ ++ Ok(input_context.physical()) ++ } ++ + async fn configure_endpoints_once( + &self, + port: PortId, + req: &ConfigureEndpointsReq, + ) -> Result<()> { +- let (endp_desc_count, new_context_entries, configuration_value) = { +- let mut port_state = self.port_states.get_mut(&port).ok_or(Error::new(EBADFD))?; +- +- port_state.cfg_idx = Some(req.config_desc); ++ let (dev_desc, endpoint_descs, new_context_entries, configuration_value) = { ++ let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; ++ let dev_desc = port_state.dev_desc.as_ref().ok_or(Error::new(EBADFD))?.clone(); + +- let config_desc = port_state +- .dev_desc +- .as_ref() +- .unwrap() ++ let config_desc = dev_desc + .config_descs + .iter() + .find(|desc| desc.configuration_value == req.config_desc) + .ok_or(Error::new(EBADFD))?; ++ let configuration_value = config_desc.configuration_value; + +- //TODO: USE ENDPOINTS FROM ALL INTERFACES +- let mut endp_desc_count = 0; +- let mut new_context_entries = 1; +- for if_desc in config_desc.interface_descs.iter() { +- for endpoint in if_desc.endpoints.iter() { +- endp_desc_count += 1; +- let entry = Self::endp_num_to_dci(endp_desc_count, endpoint); +- if entry > new_context_entries { +- new_context_entries = entry; +- } ++ let endpoint_descs = config_desc ++ .interface_descs ++ .iter() ++ .flat_map(|if_desc| if_desc.endpoints.iter().copied()) ++ .collect::>(); ++ ++ let endp_desc_count = endpoint_descs.len(); ++ let mut new_context_entries = 1u8; ++ for (endp_idx, endpoint) in endpoint_descs.iter().enumerate() { ++ let endp_num = endp_idx as u8 + 1; ++ let entry = Self::endp_num_to_dci(endp_num, endpoint); ++ if entry > new_context_entries { ++ new_context_entries = entry; + } + } + new_context_entries += 1; +@@ -988,11 +1221,13 @@ impl Xhci { + } + + ( +- endp_desc_count, ++ dev_desc, ++ endpoint_descs, + new_context_entries, +- config_desc.configuration_value, ++ configuration_value, + ) + }; ++ let endp_desc_count = endpoint_descs.len(); + let lec = self.cap.lec(); + let log_max_psa_size = self.cap.max_psa_size(); + +@@ -1002,9 +1237,160 @@ impl Xhci { + Error::new(EIO) + })?; + ++ let mut endpoint_programs = Vec::with_capacity(endp_desc_count as usize); ++ let mut staged_endpoint_states = Vec::with_capacity(endp_desc_count as usize); ++ + { ++ for (endp_idx, endp_desc) in endpoint_descs.iter().enumerate() { ++ let endp_num = endp_idx as u8 + 1; ++ ++ let endp_num_xhc = Self::endp_num_to_dci(endp_num, endp_desc); ++ let usb_log_max_streams = endp_desc.log_max_streams(); ++ ++ let primary_streams = if let Some(log_max_streams) = usb_log_max_streams { ++ if log_max_psa_size != 0 { ++ cmp::min(u8::from(log_max_streams), log_max_psa_size + 1) - 1 ++ } else { ++ 0 ++ } ++ } else { ++ 0 ++ }; ++ let linear_stream_array = primary_streams != 0; ++ ++ let mult = endp_desc.isoch_mult(lec); ++ ++ let max_packet_size = Self::endp_ctx_max_packet_size(endp_desc); ++ let max_burst_size = Self::endp_ctx_max_burst(speed_id, &dev_desc, endp_desc); ++ ++ let max_esit_payload = Self::endp_ctx_max_esit_payload( ++ speed_id, ++ &dev_desc, ++ endp_desc, ++ max_packet_size, ++ max_burst_size, ++ ); ++ let max_esit_payload_lo = max_esit_payload as u16; ++ let max_esit_payload_hi = ((max_esit_payload & 0x00FF_0000) >> 16) as u8; ++ ++ let interval = Self::endp_ctx_interval(speed_id, endp_desc); ++ ++ let max_error_count = 3; ++ let ep_ty = endp_desc.xhci_ep_type()?; ++ let host_initiate_disable = false; ++ ++ let avg_trb_len: u16 = match endp_desc.ty() { ++ EndpointTy::Ctrl => { ++ warn!("trying to use control endpoint"); ++ return Err(Error::new(EIO)); ++ } ++ EndpointTy::Bulk | EndpointTy::Isoch => 3072, ++ EndpointTy::Interrupt => 1024, ++ }; ++ ++ assert_eq!(ep_ty & 0x7, ep_ty); ++ assert_eq!(mult & 0x3, mult); ++ assert_eq!(max_error_count & 0x3, max_error_count); ++ assert_ne!(ep_ty, 0); ++ ++ let ring_ptr = if usb_log_max_streams.is_some() { ++ let mut array = ++ StreamContextArray::new::(self.cap.ac64(), 1 << (primary_streams + 1))?; ++ ++ array.add_ring::(self.cap.ac64(), 1, true)?; ++ let array_ptr = array.register(); ++ ++ assert_eq!( ++ array_ptr & 0xFFFF_FFFF_FFFF_FF81, ++ array_ptr, ++ "stream ctx ptr not aligned to 16 bytes" ++ ); ++ ++ staged_endpoint_states.push(( ++ endp_num, ++ EndpointState { ++ transfer: super::RingOrStreams::Streams(array), ++ driver_if_state: EndpIfState::Init, ++ }, ++ )); ++ ++ array_ptr ++ } else { ++ let ring = Ring::new::(self.cap.ac64(), 16, true)?; ++ let ring_ptr = ring.register(); ++ ++ assert_eq!( ++ ring_ptr & 0xFFFF_FFFF_FFFF_FF81, ++ ring_ptr, ++ "ring pointer not aligned to 16 bytes" ++ ); ++ ++ staged_endpoint_states.push(( ++ endp_num, ++ EndpointState { ++ transfer: super::RingOrStreams::Ring(ring), ++ driver_if_state: EndpIfState::Init, ++ }, ++ )); ++ ++ ring_ptr ++ }; ++ assert_eq!(primary_streams & 0x1F, primary_streams); ++ ++ endpoint_programs.push(EndpointProgram { ++ endp_num, ++ endp_num_xhc, ++ a: u32::from(mult) << 8 ++ | u32::from(primary_streams) << 10 ++ | u32::from(linear_stream_array) << 15 ++ | u32::from(interval) << 16 ++ | u32::from(max_esit_payload_hi) << 24, ++ b: max_error_count << 1 ++ | u32::from(ep_ty) << 3 ++ | u32::from(host_initiate_disable) << 7 ++ | u32::from(max_burst_size) << 8 ++ | u32::from(max_packet_size) << 16, ++ trl: ring_ptr as u32, ++ trh: (ring_ptr >> 32) as u32, ++ c: u32::from(avg_trb_len) | (u32::from(max_esit_payload_lo) << 16), ++ }); ++ ++ log::debug!("staged endpoint {}", endp_num); ++ } ++ } ++ ++ let (configure_snapshot, endpoint_snapshots, input_context_physical) = { + let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; +- let mut input_context = port_state.input_context.lock().unwrap(); ++ let mut input_context = port_state ++ .input_context ++ .lock() ++ .unwrap_or_else(|err| err.into_inner()); ++ ++ let configure_snapshot = ConfigureContextSnapshot { ++ add_context: input_context.add_context.read(), ++ drop_context: input_context.drop_context.read(), ++ control: input_context.control.read(), ++ slot_a: input_context.device.slot.a.read(), ++ slot_b: input_context.device.slot.b.read(), ++ slot_c: input_context.device.slot.c.read(), ++ }; ++ ++ let endpoint_snapshots = endpoint_programs ++ .iter() ++ .map(|program| { ++ let endp_i = program.endp_num_xhc as usize - 1; ++ ( ++ endp_i, ++ EndpointContextSnapshot::capture_values( ++ input_context.device.endpoints[endp_i].a.read(), ++ input_context.device.endpoints[endp_i].b.read(), ++ input_context.device.endpoints[endp_i].trl.read(), ++ input_context.device.endpoints[endp_i].trh.read(), ++ input_context.device.endpoints[endp_i].c.read(), ++ ), ++ ) ++ }) ++ .collect::>(); + + // Configure the slot context as well, which holds the last index of the endp descs. + input_context.add_context.write(1); +@@ -1015,25 +1401,26 @@ impl Xhci { + + const HUB_PORTS_MASK: u32 = 0xFF00_0000; + const HUB_PORTS_SHIFT: u8 = 24; ++ let mut current_slot_c = input_context.device.slot.c.read(); + + let mut current_slot_a = input_context.device.slot.a.read(); + let mut current_slot_b = input_context.device.slot.b.read(); + +- // Set context entries + current_slot_a &= !CONTEXT_ENTRIES_MASK; + current_slot_a |= + (u32::from(new_context_entries) << CONTEXT_ENTRIES_SHIFT) & CONTEXT_ENTRIES_MASK; + +- // Set hub data + current_slot_a &= !(1 << 26); + current_slot_b &= !HUB_PORTS_MASK; + if let Some(hub_ports) = req.hub_ports { + current_slot_a |= 1 << 26; + current_slot_b |= (u32::from(hub_ports) << HUB_PORTS_SHIFT) & HUB_PORTS_MASK; + } ++ current_slot_c = apply_hub_tt_info(current_slot_c, req); + + input_context.device.slot.a.write(current_slot_a); + input_context.device.slot.b.write(current_slot_b); ++ input_context.device.slot.c.write(current_slot_c); + + let control = if self.op.lock().unwrap().cie() { + (u32::from(req.alternate_setting.unwrap_or(0)) << 16) +@@ -1043,174 +1430,132 @@ impl Xhci { + 0 + }; + input_context.control.write(control); +- } +- +- for endp_idx in 0..endp_desc_count as u8 { +- let endp_num = endp_idx + 1; +- +- let mut port_state = self.port_states.get_mut(&port).ok_or(Error::new(EBADFD))?; +- let dev_desc = port_state.dev_desc.as_ref().unwrap(); +- let endp_desc = port_state.get_endp_desc(endp_idx).ok_or_else(|| { +- warn!("failed to find endpoint {}", endp_idx); +- Error::new(EIO) +- })?; + +- let endp_num_xhc = Self::endp_num_to_dci(endp_num, endp_desc); +- +- let usb_log_max_streams = endp_desc.log_max_streams(); +- +- // TODO: Secondary streams. +- let primary_streams = if let Some(log_max_streams) = usb_log_max_streams { +- // TODO: Can streams-capable be configured to not use streams? +- if log_max_psa_size != 0 { +- cmp::min(u8::from(log_max_streams), log_max_psa_size + 1) - 1 +- } else { +- 0 +- } +- } else { +- 0 +- }; +- let linear_stream_array = if primary_streams != 0 { true } else { false }; ++ for program in &endpoint_programs { ++ let endp_i = program.endp_num_xhc as usize - 1; ++ input_context.add_context.writef(1 << program.endp_num_xhc, true); ++ input_context.device.endpoints[endp_i].a.write(program.a); ++ input_context.device.endpoints[endp_i].b.write(program.b); ++ input_context.device.endpoints[endp_i].trl.write(program.trl); ++ input_context.device.endpoints[endp_i].trh.write(program.trh); ++ input_context.device.endpoints[endp_i].c.write(program.c); ++ } + +- // TODO: Interval related fields +- // TODO: Max ESIT payload size. ++ (configure_snapshot, endpoint_snapshots, input_context.physical()) ++ }; + +- let mult = endp_desc.isoch_mult(lec); ++ let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; ++ let slot = port_state.slot; + +- let max_packet_size = Self::endp_ctx_max_packet_size(endp_desc); +- let max_burst_size = Self::endp_ctx_max_burst(speed_id, dev_desc, endp_desc); ++ let (event_trb, command_trb) = self ++ .execute_command(|trb, cycle| trb.configure_endpoint(slot, input_context_physical, cycle)) ++ .await; + +- let max_esit_payload = Self::endp_ctx_max_esit_payload( +- speed_id, +- dev_desc, +- endp_desc, +- max_packet_size, +- max_burst_size, +- ); +- let max_esit_payload_lo = max_esit_payload as u16; +- let max_esit_payload_hi = ((max_esit_payload & 0x00FF_0000) >> 16) as u8; +- +- let interval = Self::endp_ctx_interval(speed_id, endp_desc); +- +- let max_error_count = 3; +- let ep_ty = endp_desc.xhci_ep_type()?; +- let host_initiate_disable = false; +- +- // TODO: Maybe this value is out of scope for xhcid, because the actual usb device +- // driver probably knows better. The spec says that the initial value should be 8 bytes +- // for control, 1KiB for interrupt and 3KiB for bulk and isoch. +- let avg_trb_len: u16 = match endp_desc.ty() { +- EndpointTy::Ctrl => { +- warn!("trying to use control endpoint"); +- return Err(Error::new(EIO)); // only endpoint zero is of type control, and is configured separately with the address device command. ++ self.event_handler_finished(); ++ ++ if let Err(err) = handle_event_trb("CONFIGURE_ENDPOINT", &event_trb, &command_trb) { ++ let rollback_input_context_physical = match self.restore_configure_input_context( ++ port, ++ configure_snapshot, ++ &endpoint_snapshots, ++ ) { ++ Ok(physical) => physical, ++ Err(restore_err) => { ++ warn!( ++ "failed to restore configure input context after CONFIGURE_ENDPOINT failure: {:?}", ++ restore_err ++ ); ++ return Err(err); + } +- EndpointTy::Bulk | EndpointTy::Isoch => 3072, // 3 KiB +- EndpointTy::Interrupt => 1024, // 1 KiB + }; + +- assert_eq!(ep_ty & 0x7, ep_ty); +- assert_eq!(mult & 0x3, mult); +- assert_eq!(max_error_count & 0x3, max_error_count); +- assert_ne!(ep_ty, 0); // 0 means invalid. +- +- let ring_ptr = if usb_log_max_streams.is_some() { +- let mut array = +- StreamContextArray::new::(self.cap.ac64(), 1 << (primary_streams + 1))?; ++ let (rollback_event_trb, rollback_command_trb) = self ++ .execute_command(|trb, cycle| { ++ trb.configure_endpoint(slot, rollback_input_context_physical, cycle) ++ }) ++ .await; + +- // TODO: Use as many stream rings as needed. +- array.add_ring::(self.cap.ac64(), 1, true)?; +- let array_ptr = array.register(); ++ self.event_handler_finished(); + +- assert_eq!( +- array_ptr & 0xFFFF_FFFF_FFFF_FF81, +- array_ptr, +- "stream ctx ptr not aligned to 16 bytes" +- ); +- port_state.endpoint_states.insert( +- endp_num, +- EndpointState { +- transfer: super::RingOrStreams::Streams(array), +- driver_if_state: EndpIfState::Init, +- }, ++ if let Err(rollback_err) = ++ handle_event_trb("CONFIGURE_ENDPOINT_ROLLBACK", &rollback_event_trb, &rollback_command_trb) ++ { ++ warn!( ++ "failed to roll back CONFIGURE_ENDPOINT after failure {:?}: {:?}", ++ err, ++ rollback_err + ); ++ } + +- array_ptr +- } else { +- let ring = Ring::new::(self.cap.ac64(), 16, true)?; +- let ring_ptr = ring.register(); +- +- assert_eq!( +- ring_ptr & 0xFFFF_FFFF_FFFF_FF81, +- ring_ptr, +- "ring pointer not aligned to 16 bytes" +- ); +- port_state.endpoint_states.insert( +- endp_num, +- EndpointState { +- transfer: super::RingOrStreams::Ring(ring), +- driver_if_state: EndpIfState::Init, +- }, +- ); +- ring_ptr +- }; +- assert_eq!(primary_streams & 0x1F, primary_streams); +- +- let mut input_context = port_state.input_context.lock().unwrap(); +- input_context.add_context.writef(1 << endp_num_xhc, true); +- +- let endp_i = endp_num_xhc as usize - 1; +- input_context.device.endpoints[endp_i].a.write( +- u32::from(mult) << 8 +- | u32::from(primary_streams) << 10 +- | u32::from(linear_stream_array) << 15 +- | u32::from(interval) << 16 +- | u32::from(max_esit_payload_hi) << 24, +- ); +- input_context.device.endpoints[endp_i].b.write( +- max_error_count << 1 +- | u32::from(ep_ty) << 3 +- | u32::from(host_initiate_disable) << 7 +- | u32::from(max_burst_size) << 8 +- | u32::from(max_packet_size) << 16, +- ); ++ return Err(err); ++ } + +- input_context.device.endpoints[endp_i] +- .trl +- .write(ring_ptr as u32); +- input_context.device.endpoints[endp_i] +- .trh +- .write((ring_ptr >> 32) as u32); ++ // Tell the device about this configuration. ++ let skip_set_configuration = self ++ .port_states ++ .get(&port) ++ .map(|port_state| { ++ port_state ++ .quirks ++ .contains(crate::usb_quirks::UsbQuirkFlags::NO_SET_CONFIG) ++ }) ++ .unwrap_or(false); ++ ++ if !skip_set_configuration { ++ if let Err(err) = self.set_configuration(port, configuration_value).await { ++ let rollback_input_context_physical = match self.restore_configure_input_context( ++ port, ++ configure_snapshot, ++ &endpoint_snapshots, ++ ) { ++ Ok(physical) => physical, ++ Err(restore_err) => { ++ warn!( ++ "failed to restore configure input context after set_configuration failure: {:?}", ++ restore_err ++ ); ++ return Err(err); ++ } ++ }; + +- input_context.device.endpoints[endp_i] +- .c +- .write(u32::from(avg_trb_len) | (u32::from(max_esit_payload_lo) << 16)); ++ let (rollback_event_trb, rollback_command_trb) = self ++ .execute_command(|trb, cycle| { ++ trb.configure_endpoint(slot, rollback_input_context_physical, cycle) ++ }) ++ .await; ++ ++ self.event_handler_finished(); ++ ++ if let Err(rollback_err) = handle_event_trb( ++ "CONFIGURE_ENDPOINT_ROLLBACK", ++ &rollback_event_trb, ++ &rollback_command_trb, ++ ) { ++ warn!( ++ "failed to roll back CONFIGURE_ENDPOINT after set_configuration failure {:?}: {:?}", ++ err, ++ rollback_err ++ ); ++ } + +- log::debug!("initialized endpoint {}", endp_num); ++ return Err(err); ++ } + } + + { +- let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; +- let slot = port_state.slot; +- let input_context_physical = port_state.input_context.lock().unwrap().physical(); +- +- let (event_trb, command_trb) = self +- .execute_command(|trb, cycle| { +- trb.configure_endpoint(slot, input_context_physical, cycle) +- }) +- .await; +- +- //self.event_handler_finished(); +- +- handle_event_trb("CONFIGURE_ENDPOINT", &event_trb, &command_trb)?; ++ let mut port_state = self.port_states.get_mut(&port).ok_or(Error::new(EBADFD))?; ++ port_state.cfg_idx = Some(configuration_value); ++ port_state.endpoint_states.retain(|endp_num, _| *endp_num == 0); ++ for (endp_num, endpoint_state) in staged_endpoint_states { ++ port_state.endpoint_states.insert(endp_num, endpoint_state); ++ } + } + +- // Tell the device about this configuration. +- self.set_configuration(port, configuration_value).await?; +- + Ok(()) + } + + async fn configure_endpoints(&self, port: PortId, json_buf: &[u8]) -> Result<()> { ++ let _op = self.begin_routable_operation(port)?; + let mut req: ConfigureEndpointsReq = + serde_json::from_slice(json_buf).or(Err(Error::new(EBADMSG)))?; + +@@ -1234,8 +1579,20 @@ impl Xhci { + + if let Some(interface_num) = req.interface_desc { + if let Some(alternate_setting) = req.alternate_setting { +- self.set_interface(port, interface_num, alternate_setting) +- .await?; ++ let skip_set_interface = self ++ .port_states ++ .get(&port) ++ .map(|port_state| { ++ port_state ++ .quirks ++ .contains(crate::usb_quirks::UsbQuirkFlags::NO_SET_INTF) ++ }) ++ .unwrap_or(false); ++ ++ if !skip_set_interface { ++ self.set_interface(port, interface_num, alternate_setting) ++ .await?; ++ } + } + } + +@@ -1432,7 +1789,7 @@ impl Xhci { + }, + ) + .await?; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + let bytes_transferred = dma_buf + .as_ref() +@@ -1453,52 +1810,109 @@ impl Xhci { + let raw_dd = self.fetch_dev_desc(port_id, slot).await?; + log::debug!("port {} slot {} desc {:X?}", port_id, slot, raw_dd); + ++ let vendor = raw_dd.vendor; ++ let product = raw_dd.product; ++ let quirks = crate::usb_quirks::lookup_usb_quirks(vendor, product); ++ if !quirks.is_empty() { ++ log::info!( ++ "port {}: USB quirks for {:04x}:{:04x}: {:?}", ++ port_id, vendor, product, quirks ++ ); ++ } ++ + // Only fetch language IDs if we need to. Some devices will fail to return this descriptor + //TODO: also check configurations and interfaces for defined strings? ++ let bad_descriptor = quirks.contains(crate::usb_quirks::UsbQuirkFlags::BAD_DESCRIPTOR); ++ + let lang_id = +- if raw_dd.manufacturer_str > 0 || raw_dd.product_str > 0 || raw_dd.serial_str > 0 { +- let lang_ids = self.fetch_lang_ids_desc(port_id, slot).await?; +- // Prefer US English, but fall back to first language ID, or zero +- let en_us_id = 0x409; +- if lang_ids.contains(&en_us_id) { +- en_us_id +- } else { +- match lang_ids.first() { +- Some(some) => *some, +- None => 0, ++ if !quirks.contains(crate::usb_quirks::UsbQuirkFlags::NO_STRING_FETCH) ++ && (raw_dd.manufacturer_str > 0 ++ || raw_dd.product_str > 0 ++ || raw_dd.serial_str > 0) ++ { ++ match self.fetch_lang_ids_desc(port_id, slot).await { ++ Ok(lang_ids) => { ++ // Prefer US English, but fall back to first language ID, or zero ++ let en_us_id = 0x409; ++ if lang_ids.contains(&en_us_id) { ++ en_us_id ++ } else { ++ match lang_ids.first() { ++ Some(some) => *some, ++ None => 0, ++ } ++ } ++ } ++ Err(err) if bad_descriptor => { ++ log::warn!( ++ "port {} slot {}: failed to fetch language IDs with BAD_DESCRIPTOR set: {}", ++ port_id, ++ slot, ++ err ++ ); ++ 0 + } ++ Err(err) => return Err(err), + } + } else { + 0 + }; + log::debug!("port {} using language ID 0x{:04x}", port_id, lang_id); + +- let (manufacturer_str, product_str, serial_str) = ( +- if raw_dd.manufacturer_str > 0 { +- Some( +- self.fetch_string_desc(port_id, slot, raw_dd.manufacturer_str, lang_id) +- .await?, +- ) +- } else { +- None +- }, +- if raw_dd.product_str > 0 { +- Some( +- self.fetch_string_desc(port_id, slot, raw_dd.product_str, lang_id) +- .await?, +- ) ++ let (manufacturer_str, product_str, serial_str) = ++ if quirks.contains(crate::usb_quirks::UsbQuirkFlags::NO_STRING_FETCH) { ++ (None, None, None) + } else { +- None +- }, +- if raw_dd.serial_str > 0 { +- Some( +- self.fetch_string_desc(port_id, slot, raw_dd.serial_str, lang_id) +- .await?, ++ ( ++ if raw_dd.manufacturer_str > 0 { ++ if bad_descriptor { ++ self.fetch_string_desc(port_id, slot, raw_dd.manufacturer_str, lang_id) ++ .await ++ .ok() ++ } else { ++ Some( ++ self.fetch_string_desc( ++ port_id, ++ slot, ++ raw_dd.manufacturer_str, ++ lang_id, ++ ) ++ .await?, ++ ) ++ } ++ } else { ++ None ++ }, ++ if raw_dd.product_str > 0 { ++ if bad_descriptor { ++ self.fetch_string_desc(port_id, slot, raw_dd.product_str, lang_id) ++ .await ++ .ok() ++ } else { ++ Some( ++ self.fetch_string_desc(port_id, slot, raw_dd.product_str, lang_id) ++ .await?, ++ ) ++ } ++ } else { ++ None ++ }, ++ if raw_dd.serial_str > 0 { ++ if bad_descriptor { ++ self.fetch_string_desc(port_id, slot, raw_dd.serial_str, lang_id) ++ .await ++ .ok() ++ } else { ++ Some( ++ self.fetch_string_desc(port_id, slot, raw_dd.serial_str, lang_id) ++ .await?, ++ ) ++ } ++ } else { ++ None ++ }, + ) +- } else { +- None +- }, +- ); ++ }; + log::debug!( + "manufacturer {:?} product {:?} serial {:?}", + manufacturer_str, +@@ -1508,14 +1922,39 @@ impl Xhci { + + //TODO let (bos_desc, bos_data) = self.fetch_bos_desc(port_id, slot).await?; + +- let supports_superspeed = false; +- //TODO usb::bos_capability_descs(bos_desc, &bos_data).any(|desc| desc.is_superspeed()); +- let supports_superspeedplus = false; +- //TODO usb::bos_capability_descs(bos_desc, &bos_data).any(|desc| desc.is_superspeedplus()); ++ let (supports_superspeed, supports_superspeedplus) = ++ if quirks.contains(crate::usb_quirks::UsbQuirkFlags::NO_BOS) { ++ (false, false) ++ } else { ++ match self.fetch_bos_desc(port_id, slot).await { ++ Ok((bos_desc, bos_data)) => ( ++ usb::bos_capability_descs(bos_desc, &bos_data) ++ .any(|desc| desc.is_superspeed()), ++ usb::bos_capability_descs(bos_desc, &bos_data) ++ .any(|desc| desc.is_superspeedplus()), ++ ), ++ Err(err) => { ++ log::debug!( ++ "port {} slot {}: failed to fetch BOS descriptor: {}", ++ port_id, ++ slot, ++ err ++ ); ++ (false, false) ++ } ++ } ++ }; + + let mut config_descs = SmallVec::new(); + +- for index in 0..raw_dd.configurations { ++ let configuration_indices: Vec = ++ if quirks.contains(crate::usb_quirks::UsbQuirkFlags::FORCE_ONE_CONFIG) { ++ vec![0] ++ } else { ++ (0..raw_dd.configurations).collect() ++ }; ++ ++ for index in configuration_indices { + debug!("Fetching the config descriptor at index {}", index); + let (desc, data) = self.fetch_config_desc(port_id, slot, index).await?; + log::debug!( +@@ -1541,6 +1980,12 @@ impl Xhci { + let mut iter = descriptors.into_iter().peekable(); + + while let Some(item) = iter.next() { ++ if quirks.contains(crate::usb_quirks::UsbQuirkFlags::HONOR_BNUMINTERFACES) ++ && interface_descs.len() >= desc.interfaces as usize ++ { ++ break; ++ } ++ + if let AnyDescriptor::Interface(idesc) = item { + let mut endpoints = SmallVec::<[EndpDesc; 4]>::new(); + let mut hid_descs = SmallVec::<[HidDesc; 1]>::new(); +@@ -1554,6 +1999,9 @@ impl Xhci { + } + Some(unexpected) => { + log::warn!("expected endpoint, got {:X?}", unexpected); ++ if bad_descriptor { ++ continue; ++ } + break; + } + None => break, +@@ -1578,8 +2026,16 @@ impl Xhci { + } + + interface_descs.push( +- self.new_if_desc(port_id, slot, idesc, endpoints, hid_descs, lang_id) +- .await?, ++ self.new_if_desc( ++ port_id, ++ slot, ++ idesc, ++ endpoints, ++ hid_descs, ++ lang_id, ++ quirks, ++ ) ++ .await?, + ); + } else { + log::warn!("expected interface, got {:?}", item); +@@ -1590,11 +2046,20 @@ impl Xhci { + + config_descs.push(ConfDesc { + kind: desc.kind, +- configuration: if desc.configuration_str > 0 { +- Some( ++ configuration: if quirks.contains(crate::usb_quirks::UsbQuirkFlags::NO_STRING_FETCH) ++ { ++ None ++ } else if desc.configuration_str > 0 { ++ if bad_descriptor { + self.fetch_string_desc(port_id, slot, desc.configuration_str, lang_id) +- .await?, +- ) ++ .await ++ .ok() ++ } else { ++ Some( ++ self.fetch_string_desc(port_id, slot, desc.configuration_str, lang_id) ++ .await?, ++ ) ++ } + } else { + None + }, +@@ -1856,7 +2321,7 @@ impl Xhci { + if (flags & O_DIRECTORY != 0) || (flags & O_STAT != 0) { + let mut contents = Vec::new(); + +- write!(contents, "descriptors\nendpoints\n").unwrap(); ++ write!(contents, "descriptors\nendpoints\npm_state\nsuspend\nresume\n").unwrap(); + + if self.slot_state( + self.port_states +@@ -1893,6 +2358,14 @@ impl Xhci { + Ok(Handle::PortState(port_num)) + } + ++ fn open_handle_port_pm_state(&self, port_num: PortId, flags: usize) -> Result { ++ if flags & O_DIRECTORY != 0 && flags & O_STAT == 0 { ++ return Err(Error::new(ENOTDIR)); ++ } ++ ++ Ok(Handle::PortPmState(port_num)) ++ } ++ + /// implements open() for /port/endpoints + /// + /// # Arguments +@@ -2087,6 +2560,30 @@ impl Xhci { + Ok(Handle::DetachDevice(port_num)) + } + ++ fn open_handle_suspend_device(&self, port_num: PortId, flags: usize) -> Result { ++ if flags & O_DIRECTORY != 0 && flags & O_STAT == 0 { ++ return Err(Error::new(ENOTDIR)); ++ } ++ ++ if flags & O_RDWR != O_WRONLY && flags & O_STAT == 0 { ++ return Err(Error::new(EACCES)); ++ } ++ ++ Ok(Handle::SuspendDevice(port_num)) ++ } ++ ++ fn open_handle_resume_device(&self, port_num: PortId, flags: usize) -> Result { ++ if flags & O_DIRECTORY != 0 && flags & O_STAT == 0 { ++ return Err(Error::new(ENOTDIR)); ++ } ++ ++ if flags & O_RDWR != O_WRONLY && flags & O_STAT == 0 { ++ return Err(Error::new(EACCES)); ++ } ++ ++ Ok(Handle::ResumeDevice(port_num)) ++ } ++ + /// implements open() for /port/request + /// + /// # Arguments +@@ -2155,6 +2652,9 @@ impl SchemeSync for &Xhci { + SchemeParameters::PortState(port_number) => { + self.open_handle_port_state(port_number, flags)? + } ++ SchemeParameters::PortPmState(port_number) => { ++ self.open_handle_port_pm_state(port_number, flags)? ++ } + SchemeParameters::PortReq(port_number) => { + self.open_handle_port_request(port_number, flags)? + } +@@ -2173,6 +2673,12 @@ impl SchemeSync for &Xhci { + SchemeParameters::DetachDevice(port_number) => { + self.open_handle_detach_device(port_number, flags)? + } ++ SchemeParameters::SuspendDevice(port_number) => { ++ self.open_handle_suspend_device(port_number, flags)? ++ } ++ SchemeParameters::ResumeDevice(port_number) => { ++ self.open_handle_resume_device(port_number, flags)? ++ } + }; + + let fd = self.next_handle.fetch_add(1, atomic::Ordering::Relaxed); +@@ -2203,7 +2709,11 @@ impl SchemeSync for &Xhci { + + //If we have a handle to the configure scheme, we need to mark it as write only. + match &*guard { +- Handle::ConfigureEndpoints(_) | Handle::AttachDevice(_) | Handle::DetachDevice(_) => { ++ Handle::ConfigureEndpoints(_) ++ | Handle::AttachDevice(_) ++ | Handle::DetachDevice(_) ++ | Handle::SuspendDevice(_) ++ | Handle::ResumeDevice(_) => { + stat.st_mode = stat.st_mode | 0o200; + } + _ => {} +@@ -2263,6 +2773,8 @@ impl SchemeSync for &Xhci { + Handle::ConfigureEndpoints(_) => Err(Error::new(EBADF)), + Handle::AttachDevice(_) => Err(Error::new(EBADF)), + Handle::DetachDevice(_) => Err(Error::new(EBADF)), ++ Handle::SuspendDevice(_) => Err(Error::new(EBADF)), ++ Handle::ResumeDevice(_) => Err(Error::new(EBADF)), + Handle::SchemeRoot => Err(Error::new(EBADF)), + + &mut Handle::Endpoint(port_num, endp_num, ref mut st) => match st { +@@ -2294,6 +2806,10 @@ impl SchemeSync for &Xhci { + + Ok(Xhci::::write_dyn_string(string, buf, offset)) + } ++ &mut Handle::PortPmState(port_num) => { ++ let ps = self.port_states.get(&port_num).ok_or(Error::new(EBADF))?; ++ Ok(Xhci::::write_dyn_string(ps.pm_state.as_str().as_bytes(), buf, offset)) ++ } + &mut Handle::PortReq(port_num, ref mut st) => { + let state = std::mem::replace(st, PortReqState::Tmp); + drop(guard); // release the lock +@@ -2333,6 +2849,14 @@ impl SchemeSync for &Xhci { + block_on(self.detach_device(port_num))?; + Ok(buf.len()) + } ++ &mut Handle::SuspendDevice(port_num) => { ++ block_on(self.suspend_device(port_num))?; ++ Ok(buf.len()) ++ } ++ &mut Handle::ResumeDevice(port_num) => { ++ block_on(self.resume_device(port_num))?; ++ Ok(buf.len()) ++ } + &mut Handle::Endpoint(port_num, endp_num, ref ep_file_ty) => match ep_file_ty { + EndpointHandleTy::Ctl => block_on(self.on_write_endp_ctl(port_num, endp_num, buf)), + EndpointHandleTy::Data => { +@@ -2356,6 +2880,59 @@ impl Xhci { + self.handles.remove(&fd); + } + ++ fn ensure_port_active(&self, port_num: PortId) -> Result<()> { ++ let port_state = self.port_states.get(&port_num).ok_or(Error::new(EBADFD))?; ++ if port_state.lifecycle.state() == super::PortLifecycleState::Detaching { ++ return Err(Error::new(EBUSY)); ++ } ++ ++ let pm_state = port_state.pm_state; ++ match pm_state { ++ super::PortPmState::Active => Ok(()), ++ super::PortPmState::Suspended => Err(Error::new(EBUSY)), ++ } ++ } ++ ++ pub async fn suspend_device(&self, port_num: PortId) -> Result<()> { ++ let _op = self.begin_attached_operation(port_num)?; ++ let mut port_state = self.port_states.get_mut(&port_num).ok_or(Error::new(EBADFD))?; ++ ++ if port_state ++ .quirks ++ .contains(crate::usb_quirks::UsbQuirkFlags::NO_SUSPEND) ++ { ++ return Err(Error::new(EOPNOTSUPP)); ++ } ++ ++ if port_state.pm_state != super::PortPmState::Active { ++ return Err(Error::new(EBUSY)); ++ } ++ ++ port_state.pm_state = super::PortPmState::Suspended; ++ Ok(()) ++ } ++ ++ pub async fn resume_device(&self, port_num: PortId) -> Result<()> { ++ let _op = self.begin_attached_operation(port_num)?; ++ let mut port_state = self.port_states.get_mut(&port_num).ok_or(Error::new(EBADFD))?; ++ ++ if port_state.pm_state == super::PortPmState::Active { ++ return Ok(()); ++ } ++ ++ let slot_state = self.slot_state(port_state.slot as usize); ++ if slot_state != SlotState::Addressed as u8 && slot_state != SlotState::Configured as u8 { ++ warn!( ++ "refusing to resume port {} while slot {} is in controller state {}", ++ port_num, port_state.slot, slot_state ++ ); ++ return Err(Error::new(EIO)); ++ } ++ ++ port_state.pm_state = super::PortPmState::Active; ++ Ok(()) ++ } ++ + pub fn get_endp_status(&self, port_num: PortId, endp_num: u8) -> Result { + let port_state = self.port_states.get(&port_num).ok_or(Error::new(EBADFD))?; + +@@ -2406,6 +2983,8 @@ impl Xhci { + endp_num: u8, + clear_feature: bool, + ) -> Result<()> { ++ self.ensure_port_active(port_num)?; ++ + if self.get_endp_status(port_num, endp_num)? != EndpointStatus::Halted { + return Err(Error::new(EPROTO)); + } +@@ -2531,7 +3110,7 @@ impl Xhci { + ) + }) + .await; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + handle_event_trb("SET_TR_DEQUEUE_PTR", &event_trb, &command_trb) + } +@@ -2541,10 +3120,14 @@ impl Xhci { + endp_num: u8, + buf: &[u8], + ) -> Result { ++ let _op = self.begin_routable_operation(port_num)?; + let mut port_state = self + .port_states + .get_mut(&port_num) + .ok_or(Error::new(EBADF))?; ++ if port_state.pm_state != super::PortPmState::Active { ++ return Err(Error::new(EBUSY)); ++ } + + let ep_if_state = &mut port_state + .endpoint_states +@@ -2562,6 +3145,7 @@ impl Xhci { + }, + XhciEndpCtlReq::Reset { no_clear_feature } => match ep_if_state { + EndpIfState::Init => { ++ drop(port_state); + self.on_req_reset_device(port_num, endp_num, !no_clear_feature) + .await? + } +@@ -2631,6 +3215,9 @@ impl Xhci { + endp_num: u8, + buf: &[u8], + ) -> Result { ++ let _op = self.begin_routable_operation(port_num)?; ++ self.ensure_port_active(port_num)?; ++ + let mut port_state = self + .port_states + .get_mut(&port_num) +@@ -2732,6 +3319,9 @@ impl Xhci { + endp_num: u8, + buf: &mut [u8], + ) -> Result { ++ let _op = self.begin_routable_operation(port_num)?; ++ self.ensure_port_active(port_num)?; ++ + let mut port_state = self + .port_states + .get_mut(&port_num) +@@ -2832,6 +3422,64 @@ pub fn handle_transfer_event_trb(name: &str, event_trb: &Trb, transfer_trb: &Trb + Err(Error::new(EIO)) + } + } ++ ++fn apply_hub_tt_info(current_slot_c: u32, req: &ConfigureEndpointsReq) -> u32 { ++ const TT_THINK_TIME_MASK: u32 = 0x0003_0000; ++ const TT_THINK_TIME_SHIFT: u8 = 16; ++ ++ let mut slot_c = current_slot_c & !TT_THINK_TIME_MASK; ++ if req.hub_ports.is_some() { ++ if let Some(hub_think_time) = req.hub_think_time { ++ slot_c |= (u32::from(hub_think_time) << TT_THINK_TIME_SHIFT) & TT_THINK_TIME_MASK; ++ } ++ } ++ slot_c ++} ++ ++#[derive(Clone, Copy)] ++struct ConfigureContextSnapshot { ++ add_context: u32, ++ drop_context: u32, ++ control: u32, ++ slot_a: u32, ++ slot_b: u32, ++ slot_c: u32, ++} ++ ++#[derive(Clone, Copy)] ++struct EndpointContextSnapshot { ++ a: u32, ++ b: u32, ++ trl: u32, ++ trh: u32, ++ c: u32, ++} ++ ++impl EndpointContextSnapshot { ++ fn capture_values(a: u32, b: u32, trl: u32, trh: u32, c: u32) -> Self { ++ Self { a, b, trl, trh, c } ++ } ++ ++ fn restore(&self, ctx: &mut EndpointContext) { ++ ctx.a.write(self.a); ++ ctx.b.write(self.b); ++ ctx.trl.write(self.trl); ++ ctx.trh.write(self.trh); ++ ctx.c.write(self.c); ++ } ++} ++ ++#[derive(Clone, Copy)] ++struct EndpointProgram { ++ endp_num: u8, ++ endp_num_xhc: u8, ++ a: u32, ++ b: u32, ++ trl: u32, ++ trh: u32, ++ c: u32, ++} ++ + use lazy_static::lazy_static; + use std::ops::{Add, Div, Rem}; + +@@ -2845,3 +3493,26 @@ where + a / b + } + } ++ ++#[cfg(test)] ++mod tests { ++ use super::{apply_hub_tt_info, ConfigureEndpointsReq}; ++ ++ #[test] ++ fn apply_hub_tt_info_only_sets_bits_for_hub_requests() { ++ let req = ConfigureEndpointsReq { ++ config_desc: 1, ++ interface_desc: None, ++ alternate_setting: None, ++ hub_ports: Some(4), ++ hub_think_time: Some(3), ++ }; ++ assert_eq!(apply_hub_tt_info(0, &req), 0x0003_0000); ++ ++ let no_hub = ConfigureEndpointsReq { ++ hub_ports: None, ++ ..req.clone() ++ }; ++ assert_eq!(apply_hub_tt_info(0x0003_0000, &no_hub), 0); ++ } ++} diff --git a/local/patches/base/redox.patch b/local/patches/base/redox.patch index 6715bd03..a58a28c8 100644 --- a/local/patches/base/redox.patch +++ b/local/patches/base/redox.patch @@ -1416,6 +1416,1158 @@ index 5a5040c3..5f1232bd 100644 + assert_eq!(dmi_contents(Some(&dmi_info), "unknown"), None); + } +} +diff --git a/drivers/acpid/src/main.rs b/drivers/acpid/src/main.rs +index 916e1864..52d8c8b4 100644 +--- a/drivers/acpid/src/main.rs ++++ b/drivers/acpid/src/main.rs +@@ -16,6 +16,7 @@ mod aml_physmem; + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + mod ec; + ++mod sleep; + mod scheme; + + #[derive(Debug, Error)] +diff --git a/drivers/acpid/src/sleep.rs b/drivers/acpid/src/sleep.rs +new file mode 100644 +index 00000000..f8095663 +--- /dev/null ++++ b/drivers/acpid/src/sleep.rs +@@ -0,0 +1,84 @@ ++use std::convert::TryFrom; ++ ++#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] ++pub enum SleepTarget { ++ S1, ++ S3, ++ S4, ++ S5, ++} ++ ++impl SleepTarget { ++ pub fn aml_method_name(self) -> &'static str { ++ match self { ++ Self::S1 => "_S1", ++ Self::S3 => "_S3", ++ Self::S4 => "_S4", ++ Self::S5 => "_S5", ++ } ++ } ++ ++ pub fn is_soft_off(self) -> bool { ++ matches!(self, Self::S5) ++ } ++} ++ ++impl TryFrom for SleepTarget { ++ type Error = (); ++ ++ fn try_from(value: u8) -> Result { ++ match value { ++ 1 => Ok(Self::S1), ++ 3 => Ok(Self::S3), ++ 4 => Ok(Self::S4), ++ 5 => Ok(Self::S5), ++ _ => Err(()), ++ } ++ } ++} ++ ++#[derive(Clone, Copy, Debug, Eq, PartialEq)] ++pub enum SleepPhase { ++ Prepare, ++ Enter, ++ Resume, ++} ++ ++#[cfg(test)] ++mod tests { ++ use std::convert::TryFrom; ++ ++ use super::{SleepPhase, SleepTarget}; ++ ++ #[test] ++ fn sleep_target_maps_to_expected_aml_names() { ++ assert_eq!(SleepTarget::S1.aml_method_name(), "_S1"); ++ assert_eq!(SleepTarget::S3.aml_method_name(), "_S3"); ++ assert_eq!(SleepTarget::S4.aml_method_name(), "_S4"); ++ assert_eq!(SleepTarget::S5.aml_method_name(), "_S5"); ++ } ++ ++ #[test] ++ fn sleep_target_parsing_accepts_expected_states() { ++ assert_eq!(SleepTarget::try_from(1), Ok(SleepTarget::S1)); ++ assert_eq!(SleepTarget::try_from(3), Ok(SleepTarget::S3)); ++ assert_eq!(SleepTarget::try_from(4), Ok(SleepTarget::S4)); ++ assert_eq!(SleepTarget::try_from(5), Ok(SleepTarget::S5)); ++ assert_eq!(SleepTarget::try_from(2), Err(())); ++ } ++ ++ #[test] ++ fn only_s5_is_currently_treated_as_soft_off() { ++ assert!(!SleepTarget::S1.is_soft_off()); ++ assert!(!SleepTarget::S3.is_soft_off()); ++ assert!(!SleepTarget::S4.is_soft_off()); ++ assert!(SleepTarget::S5.is_soft_off()); ++ } ++ ++ #[test] ++ fn sleep_phase_debug_surface_is_stable() { ++ assert_eq!(format!("{:?}", SleepPhase::Prepare), "Prepare"); ++ assert_eq!(format!("{:?}", SleepPhase::Enter), "Enter"); ++ assert_eq!(format!("{:?}", SleepPhase::Resume), "Resume"); ++ } ++} +diff --git a/drivers/acpid/src/acpi.rs b/drivers/acpid/src/acpi.rs +index 58bcc22d..4f817811 100644 +--- a/drivers/acpid/src/acpi.rs ++++ b/drivers/acpid/src/acpi.rs +@@ -15,6 +15,7 @@ use common::io::{Io, Pio}; + + use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; + use thiserror::Error; ++use crate::sleep::{SleepPhase, SleepTarget}; + + use acpi::{ + aml::{namespace::AmlName, AmlError, Interpreter}, +@@ -952,7 +953,7 @@ pub struct AcpiContext { + fadt: Option, + pm1a_cnt_blk: u64, + pm1b_cnt_blk: u64, +- s5_values: RwLock>, ++ sleep_values: RwLock>, + reset_reg: Option, + reset_value: u8, + +@@ -1240,7 +1241,7 @@ impl AcpiContext { + fadt, + pm1a_cnt_blk, + pm1b_cnt_blk, +- s5_values: RwLock::new(None), ++ sleep_values: RwLock::new(std::collections::BTreeMap::new()), + reset_reg, + reset_value, + pci_fd: RwLock::new(None), +@@ -1385,23 +1386,35 @@ impl AcpiContext { + /// - search for PM1a + /// See https://forum.osdev.org/viewtopic.php?t=16990 for practical details + pub fn set_global_s_state(&self, state: u8) { +- if state != 5 { +- return; +- } +- let (slp_typa, slp_typb) = if let Some(values) = *self.s5_values.read() { +- values +- } else { +- let Ok(values) = self.evaluate_acpi_method("\\", "_S5", &[]) else { +- log::error!("Cannot set S-state, failed to evaluate \\_S5"); +- return; +- }; +- if values.len() < 2 { +- log::error!("Cannot set S-state, \\_S5 package too small"); +- return; +- } +- let values = (values[0] as u8, values[1] as u8); +- *self.s5_values.write() = Some(values); +- values +- }; ++ let target = match SleepTarget::try_from(state) { ++ Ok(target) => target, ++ Err(_) => { ++ log::error!("Cannot set S-state {state}, unsupported target"); ++ return; ++ } ++ }; ++ ++ if !target.is_soft_off() { ++ log::warn!( ++ "ACPI sleep groundwork only: {} is recognized but not implemented yet", ++ target.aml_method_name() ++ ); ++ return; ++ } ++ ++ log::info!("acpid: {:?} {}", SleepPhase::Prepare, target.aml_method_name()); ++ ++ let (slp_typa, slp_typb) = match self.sleep_type_values(target) { ++ Some(values) => values, ++ None => return, ++ }; + + let mut val = 1 << 13; + log::trace!("Shutdown SLP_TYPa {:X}, SLP_TYPb {:X}", slp_typa, slp_typb); +@@ -1412,6 +1425,7 @@ impl AcpiContext { + { + if self.pm1a_cnt_blk != 0 { + let port = self.pm1a_cnt_blk as u16; ++ log::info!("acpid: {:?} {} via PM1a", SleepPhase::Enter, target.aml_method_name()); + log::warn!("Shutdown with ACPI outw(0x{:X}, 0x{:X})", port, val); + Pio::::new(port).write(val); + } +@@ -1419,6 +1433,7 @@ impl AcpiContext { + if self.pm1b_cnt_blk != 0 { + let mut val_b = 1 << 13; + val_b |= u16::from(slp_typb); ++ log::info!("acpid: {:?} {} via PM1b", SleepPhase::Enter, target.aml_method_name()); + let port = self.pm1b_cnt_blk as u16; + log::warn!("Shutdown with ACPI outw(0x{:X}, 0x{:X})", port, val_b); + Pio::::new(port).write(val_b); +@@ -1438,6 +1453,23 @@ impl AcpiContext { + } + } + ++ fn sleep_type_values(&self, target: SleepTarget) -> Option<(u8, u8)> { ++ if let Some(values) = self.sleep_values.read().get(&target).copied() { ++ return Some(values); ++ } ++ ++ let method_name = target.aml_method_name(); ++ let Ok(values) = self.evaluate_acpi_method("\\", method_name, &[]) else { ++ log::error!("Cannot set S-state, failed to evaluate \\{method_name}"); ++ return None; ++ }; ++ if values.len() < 2 { ++ log::error!("Cannot set S-state, \\{method_name} package too small"); ++ return None; ++ } ++ let values = (values[0] as u8, values[1] as u8); ++ self.sleep_values.write().insert(target, values); ++ Some(values) ++ } ++ + pub fn acpi_reboot(&self) { + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + if let Some(reset_reg) = &self.reset_reg { +diff --git a/drivers/graphics/ihdgd/config.toml b/drivers/graphics/ihdgd/config.toml +diff --git a/drivers/storage/usbscsid/src/quirks.rs b/drivers/storage/usbscsid/src/quirks.rs +index 5051f1b0..ae4c2e46 100644 +--- a/drivers/storage/usbscsid/src/quirks.rs ++++ b/drivers/storage/usbscsid/src/quirks.rs +@@ -128,7 +128,7 @@ fn quirk_files() -> Option> { + } + + fn parse_runtime_quirks_from_toml(text: &str) -> Vec { +- let Ok(value) = text.parse::() else { ++ let Ok(value) = text.trim().parse::() else { + return Vec::new(); + }; +@@ -201,13 +201,7 @@ mod tests { + #[test] + fn runtime_toml_parser_keeps_supported_flags_and_skips_unknown_ones() { + let entries = parse_runtime_quirks_from_toml( +- r#" +- [[usb_storage_quirk]] +- vendor = 4660 +- product = 22136 +- flags = ["ignore_residue", "unknown_flag", "fix_capacity"] +- "#, ++ "[[usb_storage_quirk]]\nvendor = 4660\nproduct = 22136\nflags = [\"ignore_residue\", \"unknown_flag\", \"fix_capacity\"]\n", + ); + + assert_eq!(entries.len(), 1); +diff --git a/drivers/storage/usbscsid/src/scsi/cmds.rs b/drivers/storage/usbscsid/src/scsi/cmds.rs +index ddc12336..df5d0a0c 100644 +--- a/drivers/storage/usbscsid/src/scsi/cmds.rs ++++ b/drivers/storage/usbscsid/src/scsi/cmds.rs +@@ -265,6 +265,57 @@ impl Write10 { + } + } + ++#[repr(C, packed)] ++#[derive(Clone, Copy, Debug)] ++pub struct SynchronizeCache10 { ++ pub opcode: u8, ++ pub a: u8, ++ pub lba: u32, ++ pub group_num: u8, ++ pub blocks: u16, ++ pub control: u8, ++} ++unsafe impl plain::Plain for SynchronizeCache10 {} ++ ++impl SynchronizeCache10 { ++ pub const fn new(lba: u64, blocks: u16, control: u8) -> Self { ++ Self { ++ opcode: Opcode::SyncCache10 as u8, ++ a: 0, ++ lba: u32::to_be(lba as u32), ++ group_num: 0, ++ blocks: u16::to_be(blocks), ++ control, ++ } ++ } ++} ++ ++#[repr(C, packed)] ++#[derive(Clone, Copy, Debug)] ++pub struct SynchronizeCache16 { ++ pub opcode: u8, ++ pub a: u8, ++ pub lba: u64, ++ pub blocks: u32, ++ pub group_num: u8, ++ pub control: u8, ++} ++unsafe impl plain::Plain for SynchronizeCache16 {} ++ ++impl SynchronizeCache16 { ++ pub const fn new(lba: u64, blocks: u32, control: u8) -> Self { ++ Self { ++ opcode: Opcode::SyncCache16 as u8, ++ a: 0, ++ lba: u64::to_be(lba), ++ blocks: u32::to_be(blocks), ++ group_num: 0, ++ control, ++ } ++ } ++} ++ + #[repr(C, packed)] + #[derive(Clone, Copy, Debug)] + pub struct ModeSense6 { +diff --git a/drivers/storage/usbscsid/src/scsi/mod.rs b/drivers/storage/usbscsid/src/scsi/mod.rs +index b6d379d0..cf4a9707 100644 +--- a/drivers/storage/usbscsid/src/scsi/mod.rs ++++ b/drivers/storage/usbscsid/src/scsi/mod.rs +@@ -25,6 +25,8 @@ const REQUEST_SENSE_CMD_LEN: u8 = 6; + const MIN_INQUIRY_ALLOC_LEN: u16 = 5; + const MIN_REPORT_SUPP_OPCODES_ALLOC_LEN: u32 = 4; + const MAX_SECTORS_64_LIMIT: u64 = 64; ++const SYNC_CACHE10_CMD_LEN: usize = 10; ++const SYNC_CACHE16_CMD_LEN: usize = 16; +@@ -286,6 +288,12 @@ impl Scsi { + pub fn cmd_write10(&mut self) -> Result<&mut cmds::Write10> { + parse_mut_bytes("WRITE(10) command", &mut self.command_buffer) + } ++ pub fn cmd_sync_cache10(&mut self) -> Result<&mut cmds::SynchronizeCache10> { ++ parse_mut_bytes("SYNCHRONIZE CACHE(10) command", &mut self.command_buffer) ++ } ++ pub fn cmd_sync_cache16(&mut self) -> Result<&mut cmds::SynchronizeCache16> { ++ parse_mut_bytes("SYNCHRONIZE CACHE(16) command", &mut self.command_buffer) ++ } + pub fn res_standard_inquiry_data(&self) -> Result<&StandardInquiryData> { + parse_bytes("standard inquiry data", &self.inquiry_buffer) + } +@@ -467,6 +475,10 @@ impl Scsi { + let status = protocol.send_command( + &self.command_buffer[..10], + DeviceReqData::Out(&buffer[..bytes_to_write]), + )?; ++ if self.quirks.contains(UsbStorageQuirkFlags::NEEDS_SYNC_CACHE) ++ && status.kind == SendCommandStatusKind::Success ++ { ++ self.sync_cache(protocol, lba, blocks_to_write)?; ++ } + Ok(status.bytes_transferred(bytes_to_write as u32)) + } else { +@@ -482,8 +494,83 @@ impl Scsi { + let status = protocol.send_command( + &self.command_buffer[..16], + DeviceReqData::Out(&buffer[..bytes_to_write]), + )?; ++ if self.quirks.contains(UsbStorageQuirkFlags::NEEDS_SYNC_CACHE) ++ && status.kind == SendCommandStatusKind::Success ++ { ++ self.sync_cache(protocol, lba, blocks_to_write)?; ++ } + Ok(status.bytes_transferred(bytes_to_write as u32)) + } + } ++ ++ fn sync_cache(&mut self, protocol: &mut dyn Protocol, lba: u64, blocks: u64) -> Result<()> { ++ let use_sync_cache10 = self.quirks.contains(UsbStorageQuirkFlags::INITIAL_READ10) ++ && u32::try_from(lba).is_ok() ++ && u16::try_from(blocks).is_ok(); ++ ++ let status = if use_sync_cache10 { ++ let sync = self.cmd_sync_cache10()?; ++ *sync = cmds::SynchronizeCache10::new( ++ lba, ++ u16::try_from(blocks) ++ .map_err(|_| ScsiError::Overflow("sync cache(10) block count overflow"))?, ++ 0, ++ ); ++ protocol.send_command(&self.command_buffer[..SYNC_CACHE10_CMD_LEN], DeviceReqData::NoData)? ++ } else { ++ let sync = self.cmd_sync_cache16()?; ++ *sync = cmds::SynchronizeCache16::new( ++ lba, ++ u32::try_from(blocks) ++ .map_err(|_| ScsiError::Overflow("sync cache(16) block count overflow"))?, ++ 0, ++ ); ++ protocol.send_command(&self.command_buffer[..SYNC_CACHE16_CMD_LEN], DeviceReqData::NoData)? ++ }; ++ ++ if status.kind == SendCommandStatusKind::Success { ++ return Ok(()); ++ } ++ ++ if let Ok(()) = self.get_ff_sense(protocol, cmds::RequestSense::MINIMAL_ALLOC_LEN) { ++ if let Ok(sense) = self.res_ff_sense_data() { ++ if sense.add_sense_code == 0x3A ++ || sense.add_sense_code == 0x20 ++ || (sense.add_sense_code == 0x04 && sense.add_sense_code_qual == 0x04) ++ || sense.sense_key() == cmds::SenseKey::IllegalRequest ++ { ++ return Ok(()); ++ } ++ } ++ } ++ ++ Err(ScsiError::ProtocolError(ProtocolError::ProtocolError( ++ "SYNCHRONIZE CACHE command failed", ++ ))) ++ } + } +@@ -527,3 +614,53 @@ impl<'a> BlkDescSlice<'a> { + } + } + } ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ use crate::protocol::SendCommandStatus; ++ use crate::scsi::opcodes::Opcode; ++ ++ struct MockProtocol { ++ commands: Vec>, ++ } ++ ++ impl MockProtocol { ++ fn new() -> Self { ++ Self { commands: Vec::new() } ++ } ++ } ++ ++ impl Protocol for MockProtocol { ++ fn send_command(&mut self, command: &[u8], _data: DeviceReqData) -> std::result::Result { ++ self.commands.push(command.to_vec()); ++ Ok(SendCommandStatus { residue: None, kind: SendCommandStatusKind::Success }) ++ } ++ } ++ ++ fn scsi_for_tests(quirks: UsbStorageQuirkFlags) -> Scsi { ++ Scsi { ++ command_buffer: [0u8; 16], ++ inquiry_buffer: [0u8; 259], ++ data_buffer: Vec::new(), ++ block_size: 512, ++ block_count: 1024, ++ quirks, ++ } ++ } ++ ++ #[test] ++ fn sync_cache_uses_10_byte_command_for_initial_read10_quirk() { ++ let mut scsi = scsi_for_tests(UsbStorageQuirkFlags::INITIAL_READ10); ++ let mut protocol = MockProtocol::new(); ++ scsi.sync_cache(&mut protocol, 7, 4).unwrap(); ++ assert_eq!(protocol.commands.len(), 1); ++ assert_eq!(protocol.commands[0].len(), 10); ++ assert_eq!(protocol.commands[0][0], Opcode::SyncCache10 as u8); ++ } ++ ++ #[test] ++ fn sync_cache_uses_16_byte_command_without_initial_read10_quirk() { ++ let mut scsi = scsi_for_tests(UsbStorageQuirkFlags::empty()); ++ let mut protocol = MockProtocol::new(); ++ scsi.sync_cache(&mut protocol, 7, 4).unwrap(); ++ assert_eq!(protocol.commands.len(), 1); ++ assert_eq!(protocol.commands[0].len(), 16); ++ assert_eq!(protocol.commands[0][0], Opcode::SyncCache16 as u8); ++ } ++} + +diff --git a/drivers/storage/usbscsid/src/quirks.rs b/drivers/storage/usbscsid/src/quirks.rs +index ae4c2e46..30380aea 100644 +--- a/drivers/storage/usbscsid/src/quirks.rs ++++ b/drivers/storage/usbscsid/src/quirks.rs +@@ -128,7 +128,7 @@ fn quirk_files() -> Option> { + } + + fn parse_runtime_quirks_from_toml(text: &str) -> Vec { +- let Ok(value) = text.parse::() else { ++ let Ok(value) = text.trim().parse::
() else { + return Vec::new(); + }; +@@ -201,13 +201,7 @@ mod tests { + #[test] + fn runtime_toml_parser_keeps_supported_flags_and_skips_unknown_ones() { + let entries = parse_runtime_quirks_from_toml( +- r#" +- [[usb_storage_quirk]] +- vendor = 4660 +- product = 22136 +- flags = ["ignore_residue", "unknown_flag", "fix_capacity"] +- "#, ++ "[[usb_storage_quirk]]\nvendor = 4660\nproduct = 22136\nflags = [\"ignore_residue\", \"unknown_flag\", \"fix_capacity\"]\n", + ); + + assert_eq!(entries.len(), 1); +diff --git a/drivers/storage/usbscsid/src/scsi/cmds.rs b/drivers/storage/usbscsid/src/scsi/cmds.rs +index df5d0a0c..0674c6b5 100644 +--- a/drivers/storage/usbscsid/src/scsi/cmds.rs ++++ b/drivers/storage/usbscsid/src/scsi/cmds.rs +@@ -265,6 +265,57 @@ impl Write10 { + } + } + ++#[repr(C, packed)] ++#[derive(Clone, Copy, Debug)] ++pub struct SynchronizeCache10 { ++ pub opcode: u8, ++ pub a: u8, ++ pub lba: u32, ++ pub group_num: u8, ++ pub blocks: u16, ++ pub control: u8, ++} ++unsafe impl plain::Plain for SynchronizeCache10 {} ++ ++impl SynchronizeCache10 { ++ pub const fn new(lba: u64, blocks: u16, control: u8) -> Self { ++ Self { ++ opcode: Opcode::SyncCache10 as u8, ++ a: 0, ++ lba: u32::to_be(lba as u32), ++ group_num: 0, ++ blocks: u16::to_be(blocks), ++ control, ++ } ++ } ++} ++ ++#[repr(C, packed)] ++#[derive(Clone, Copy, Debug)] ++pub struct SynchronizeCache16 { ++ pub opcode: u8, ++ pub a: u8, ++ pub lba: u64, ++ pub blocks: u32, ++ pub group_num: u8, ++ pub control: u8, ++} ++unsafe impl plain::Plain for SynchronizeCache16 {} ++ ++impl SynchronizeCache16 { ++ pub const fn new(lba: u64, blocks: u32, control: u8) -> Self { ++ Self { ++ opcode: Opcode::SyncCache16 as u8, ++ a: 0, ++ lba: u64::to_be(lba), ++ blocks: u32::to_be(blocks), ++ group_num: 0, ++ control, ++ } ++ } ++} ++ + #[repr(C, packed)] + #[derive(Clone, Copy, Debug)] + pub struct ModeSense6 { +diff --git a/drivers/storage/usbscsid/src/scsi/mod.rs b/drivers/storage/usbscsid/src/scsi/mod.rs +index cf4a9707..ad0565ca 100644 +--- a/drivers/storage/usbscsid/src/scsi/mod.rs ++++ b/drivers/storage/usbscsid/src/scsi/mod.rs +@@ -25,6 +25,8 @@ const REQUEST_SENSE_CMD_LEN: u8 = 6; + const MIN_INQUIRY_ALLOC_LEN: u16 = 5; + const MIN_REPORT_SUPP_OPCODES_ALLOC_LEN: u32 = 4; + const MAX_SECTORS_64_LIMIT: u64 = 64; ++const SYNC_CACHE10_CMD_LEN: usize = 10; ++const SYNC_CACHE16_CMD_LEN: usize = 16; +@@ -286,6 +288,12 @@ impl Scsi { + pub fn cmd_write10(&mut self) -> Result<&mut cmds::Write10> { + parse_mut_bytes("WRITE(10) command", &mut self.command_buffer) + } ++ pub fn cmd_sync_cache10(&mut self) -> Result<&mut cmds::SynchronizeCache10> { ++ parse_mut_bytes("SYNCHRONIZE CACHE(10) command", &mut self.command_buffer) ++ } ++ pub fn cmd_sync_cache16(&mut self) -> Result<&mut cmds::SynchronizeCache16> { ++ parse_mut_bytes("SYNCHRONIZE CACHE(16) command", &mut self.command_buffer) ++ } + pub fn res_standard_inquiry_data(&self) -> Result<&StandardInquiryData> { + parse_bytes("standard inquiry data", &self.inquiry_buffer) + } +@@ -467,6 +475,10 @@ impl Scsi { + let status = protocol.send_command( + &self.command_buffer[..10], + DeviceReqData::Out(&buffer[..bytes_to_write]), + )?; ++ if self.quirks.contains(UsbStorageQuirkFlags::NEEDS_SYNC_CACHE) ++ && status.kind == SendCommandStatusKind::Success ++ { ++ self.sync_cache(protocol, lba, blocks_to_write)?; ++ } + Ok(status.bytes_transferred(bytes_to_write as u32)) + } else { +@@ -482,8 +494,83 @@ impl Scsi { + let status = protocol.send_command( + &self.command_buffer[..16], + DeviceReqData::Out(&buffer[..bytes_to_write]), + )?; ++ if self.quirks.contains(UsbStorageQuirkFlags::NEEDS_SYNC_CACHE) ++ && status.kind == SendCommandStatusKind::Success ++ { ++ self.sync_cache(protocol, lba, blocks_to_write)?; ++ } + Ok(status.bytes_transferred(bytes_to_write as u32)) + } + } ++ ++ fn sync_cache(&mut self, protocol: &mut dyn Protocol, lba: u64, blocks: u64) -> Result<()> { ++ let use_sync_cache10 = self.quirks.contains(UsbStorageQuirkFlags::INITIAL_READ10) ++ && u32::try_from(lba).is_ok() ++ && u16::try_from(blocks).is_ok(); ++ ++ let status = if use_sync_cache10 { ++ let sync = self.cmd_sync_cache10()?; ++ *sync = cmds::SynchronizeCache10::new( ++ lba, ++ u16::try_from(blocks) ++ .map_err(|_| ScsiError::Overflow("sync cache(10) block count overflow"))?, ++ 0, ++ ); ++ protocol.send_command(&self.command_buffer[..SYNC_CACHE10_CMD_LEN], DeviceReqData::NoData)? ++ } else { ++ let sync = self.cmd_sync_cache16()?; ++ *sync = cmds::SynchronizeCache16::new( ++ lba, ++ u32::try_from(blocks) ++ .map_err(|_| ScsiError::Overflow("sync cache(16) block count overflow"))?, ++ 0, ++ ); ++ protocol.send_command(&self.command_buffer[..SYNC_CACHE16_CMD_LEN], DeviceReqData::NoData)? ++ }; ++ ++ if status.kind == SendCommandStatusKind::Success { ++ return Ok(()); ++ } ++ ++ if let Ok(()) = self.get_ff_sense(protocol, cmds::RequestSense::MINIMAL_ALLOC_LEN) { ++ if let Ok(sense) = self.res_ff_sense_data() { ++ if sense.add_sense_code == 0x3A ++ || sense.add_sense_code == 0x20 ++ || (sense.add_sense_code == 0x04 && sense.add_sense_code_qual == 0x04) ++ || sense.sense_key() == cmds::SenseKey::IllegalRequest ++ { ++ return Ok(()); ++ } ++ } ++ } ++ ++ Err(ScsiError::ProtocolError(ProtocolError::ProtocolError( ++ "SYNCHRONIZE CACHE command failed", ++ ))) ++ } + } +@@ -527,3 +614,53 @@ impl<'a> BlkDescSlice<'a> { + } + } + } ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ use crate::protocol::SendCommandStatus; ++ use crate::scsi::opcodes::Opcode; ++ ++ struct MockProtocol { ++ commands: Vec>, ++ } ++ ++ impl MockProtocol { ++ fn new() -> Self { ++ Self { commands: Vec::new() } ++ } ++ } ++ ++ impl Protocol for MockProtocol { ++ fn send_command(&mut self, command: &[u8], _data: DeviceReqData) -> std::result::Result { ++ self.commands.push(command.to_vec()); ++ Ok(SendCommandStatus { residue: None, kind: SendCommandStatusKind::Success }) ++ } ++ } ++ ++ fn scsi_for_tests(quirks: UsbStorageQuirkFlags) -> Scsi { ++ Scsi { ++ command_buffer: [0u8; 16], ++ inquiry_buffer: [0u8; 259], ++ data_buffer: Vec::new(), ++ block_size: 512, ++ block_count: 1024, ++ quirks, ++ } ++ } ++ ++ #[test] ++ fn sync_cache_uses_10_byte_command_for_initial_read10_quirk() { ++ let mut scsi = scsi_for_tests(UsbStorageQuirkFlags::INITIAL_READ10); ++ let mut protocol = MockProtocol::new(); ++ scsi.sync_cache(&mut protocol, 7, 4).unwrap(); ++ assert_eq!(protocol.commands.len(), 1); ++ assert_eq!(protocol.commands[0].len(), 10); ++ assert_eq!(protocol.commands[0][0], Opcode::SyncCache10 as u8); ++ } ++ ++ #[test] ++ fn sync_cache_uses_16_byte_command_without_initial_read10_quirk() { ++ let mut scsi = scsi_for_tests(UsbStorageQuirkFlags::empty()); ++ let mut protocol = MockProtocol::new(); ++ scsi.sync_cache(&mut protocol, 7, 4).unwrap(); ++ assert_eq!(protocol.commands.len(), 1); ++ assert_eq!(protocol.commands[0].len(), 16); ++ assert_eq!(protocol.commands[0][0], Opcode::SyncCache16 as u8); ++ } ++} + +diff --git a/drivers/usb/xhcid/src/xhci/device_enumerator.rs b/drivers/usb/xhcid/src/xhci/device_enumerator.rs +index 1f144ac9..00000000 100644 +--- a/drivers/usb/xhcid/src/xhci/device_enumerator.rs ++++ b/drivers/usb/xhcid/src/xhci/device_enumerator.rs +@@ -4,8 +4,12 @@ use crate::xhci::{PortId, Xhci}; + use common::io::Io; + use crossbeam_channel; + use log::{debug, info, warn}; + use std::sync::Arc; +-use std::time::Duration; ++use std::time::{Duration, Instant}; + use syscall::EAGAIN; ++ ++const DEFAULT_PORT_RESET_SETTLE_MS: u64 = 16; ++const RESET_DELAY_PORT_RESET_SETTLE_MS: u64 = 100; ++const HUB_SLOW_RESET_PORT_RESET_SETTLE_MS: u64 = 200; + + pub struct DeviceEnumerationRequest { + pub port_id: PortId, +@@ -25,10 +29,14 @@ impl DeviceEnumerator { + loop { + debug!("Start Device Enumerator Loop"); + let request = match self.request_queue.recv() { + Ok(req) => req, + Err(err) => { +- panic!("Failed to received an enumeration request! error: {}", err) ++ warn!( ++ "device enumerator stopping after request queue closed: {}", ++ err ++ ); ++ break; + } + }; + +@@ -64,13 +72,12 @@ impl DeviceEnumerator { + //If the port isn't enabled (i.e. it's a USB2 port), we need to reset it if it isn't resetting already + //A USB3 port won't generate a Connect Status Change until it's already enabled, so this check + //will always be skipped for USB3 ports + if !flags.contains(PortFlags::PED) { +- let disabled_state = flags.contains(PortFlags::PP) +- && flags.contains(PortFlags::CCS) +- && !flags.contains(PortFlags::PED) +- && !flags.contains(PortFlags::PR); ++ let disabled_state = Self::port_is_disabled(&flags); + + if !disabled_state { +- panic!( +- "Port {} isn't in the disabled state! Current flags: {:?}", ++ warn!( ++ "Port {} never reached the disabled state before reset-driven enumeration; current flags: {:?}", + port_id, flags + ); ++ continue; + } else { + debug!("Port {} has entered the disabled state.", port_id); + } +@@ -89,17 +96,15 @@ impl DeviceEnumerator { + + port.clear_prc(); + +- let delay_ms = if early_quirks +- .contains(crate::usb_quirks::UsbQuirkFlags::HUB_SLOW_RESET) +- { +- 200 +- } else if early_quirks.contains(crate::usb_quirks::UsbQuirkFlags::RESET_DELAY) { +- 100 +- } else { +- 16 +- }; +- +- std::thread::sleep(Duration::from_millis(delay_ms)); // Some devices need extra time to settle after reset. ++ } ++ ++ let flags = self.wait_for_port_enabled_state( ++ port_array_index, ++ Duration::from_millis(Self::port_reset_settle_delay_ms(early_quirks)), ++ ); + +- let flags = port.flags(); +- +- let enabled_state = flags.contains(PortFlags::PP) +- && flags.contains(PortFlags::CCS) +- && flags.contains(PortFlags::PED) +- && !flags.contains(PortFlags::PR); ++ let enabled_state = Self::port_is_enabled(&flags); + + if !enabled_state { + warn!( +- "Port {} isn't in the enabled state! Current flags: {:?}", ++ "Port {} isn't in the enabled state after bounded reset settle; current flags: {:?}", + port_id, flags + ); ++ continue; + } else { +@@ -140,4 +145,47 @@ impl DeviceEnumerator { + } + } + } ++ ++ fn port_reset_settle_delay_ms(quirks: crate::usb_quirks::UsbQuirkFlags) -> u64 { ++ if quirks.contains(crate::usb_quirks::UsbQuirkFlags::HUB_SLOW_RESET) { ++ HUB_SLOW_RESET_PORT_RESET_SETTLE_MS ++ } else if quirks.contains(crate::usb_quirks::UsbQuirkFlags::RESET_DELAY) { ++ RESET_DELAY_PORT_RESET_SETTLE_MS ++ } else { ++ DEFAULT_PORT_RESET_SETTLE_MS ++ } ++ } ++ ++ fn port_is_disabled(flags: &PortFlags) -> bool { ++ flags.contains(PortFlags::PP) ++ && flags.contains(PortFlags::CCS) ++ && !flags.contains(PortFlags::PED) ++ && !flags.contains(PortFlags::PR) ++ } ++ ++ fn port_is_enabled(flags: &PortFlags) -> bool { ++ flags.contains(PortFlags::PP) ++ && flags.contains(PortFlags::CCS) ++ && flags.contains(PortFlags::PED) ++ && !flags.contains(PortFlags::PR) ++ } ++ ++ fn wait_for_port_enabled_state( ++ &self, ++ port_array_index: usize, ++ settle_timeout: Duration, ++ ) -> PortFlags { ++ let start = Instant::now(); ++ ++ loop { ++ let flags = { ++ let ports = self ++ .hci ++ .ports ++ .lock() ++ .unwrap_or_else(|poisoned| poisoned.into_inner()); ++ ports[port_array_index].flags() ++ }; ++ ++ if Self::port_is_enabled(&flags) ++ || !flags.contains(PortFlags::PR) ++ || start.elapsed() >= settle_timeout ++ { ++ return flags; ++ } ++ ++ std::thread::sleep(Duration::from_millis(1)); ++ } ++ } +diff --git a/drivers/usb/xhcid/src/xhci/device_enumerator.rs b/drivers/usb/xhcid/src/xhci/device_enumerator.rs +index 00000000..00000000 100644 +--- a/drivers/usb/xhcid/src/xhci/device_enumerator.rs ++++ b/drivers/usb/xhcid/src/xhci/device_enumerator.rs +@@ -46,7 +46,11 @@ impl DeviceEnumerator { + debug!("Device Enumerator request for port {}", port_id); + + let (len, flags) = { +- let ports = self.hci.ports.lock().unwrap(); ++ let ports = self ++ .hci ++ .ports ++ .lock() ++ .unwrap_or_else(|poisoned| poisoned.into_inner()); + + let len = ports.len(); + +@@ -86,7 +90,11 @@ impl DeviceEnumerator { + debug!("Received a device connect on port {}, but it's not enabled. Resetting the port.", port_id); + if let Err(err) = self.hci.reset_port(port_id) { + warn!( + "failed to reset port {} before enumeration; skipping attach: {}", + port_id, err + ); + continue; + } + +- let mut ports = self.hci.ports.lock().unwrap(); ++ let mut ports = self ++ .hci ++ .ports ++ .lock() ++ .unwrap_or_else(|poisoned| poisoned.into_inner()); + let port = &mut ports[port_array_index]; + + port.clear_prc(); +@@ -144,10 +152,16 @@ impl DeviceEnumerator { + let result = futures::executor::block_on(self.hci.detach_device(port_id)); + match result { + Ok(was_connected) => { + if was_connected { + info!("Device on port {} was detached", port_id); ++ } else { ++ debug!( ++ "Ignoring duplicate or out-of-order detach event for unattached port {}", ++ port_id ++ ); + } + } + Err(err) => { +- warn!("processing of device attach request failed! Error: {}", err); ++ warn!("processing of device detach request failed! Error: {}", err); + } + } +diff --git a/drivers/usb/xhcid/src/xhci/mod.rs b/drivers/usb/xhcid/src/xhci/mod.rs +index c53cb59f..814fdb4f 100644 +--- a/drivers/usb/xhcid/src/xhci/mod.rs ++++ b/drivers/usb/xhcid/src/xhci/mod.rs +@@ -307,6 +307,7 @@ struct PortState { + slot: u8, + protocol_speed: &'static ProtocolSpeed, + cfg_idx: Option, ++ active_alternates: BTreeMap, + input_context: Mutex>>, + dev_desc: Option, + endpoint_states: BTreeMap, +@@ -324,29 +325,37 @@ pub(crate) enum PortPmState { + impl PortState { + //TODO: fetch using endpoint number instead + fn get_endp_desc(&self, endp_idx: u8) -> Option<&EndpDesc> { +- let cfg_idx = self.cfg_idx?; +- let config_desc = self +- .dev_desc +- .as_ref()? +- .config_descs +- .iter() +- .find(|desc| desc.configuration_value == cfg_idx)?; +- let mut endp_count = 0; +- for if_desc in config_desc.interface_descs.iter() { +- let active_alternate = self +- .active_alternates +- .get(&if_desc.number) +- .copied() +- .unwrap_or(0); +- if if_desc.alternate_setting != active_alternate { +- continue; +- } +- for endp_desc in if_desc.endpoints.iter() { +- if endp_idx == endp_count { +- return Some(endp_desc); +- } +- endp_count += 1; +- } +- } +- None ++ active_endpoint_desc( ++ self.dev_desc.as_ref()?, ++ self.cfg_idx?, ++ &self.active_alternates, ++ endp_idx, ++ ) + } + } ++ ++fn active_configuration<'a>(dev_desc: &'a DevDesc, cfg_idx: u8) -> Option<&'a ConfDesc> { ++ dev_desc ++ .config_descs ++ .iter() ++ .find(|desc| desc.configuration_value == cfg_idx) ++} ++ ++fn active_endpoint_desc<'a>(dev_desc: &'a DevDesc, cfg_idx: u8, active_alternates: &BTreeMap, endp_idx: u8) -> Option<&'a EndpDesc> { ++ let config_desc = active_configuration(dev_desc, cfg_idx)?; ++ let mut endp_count = 0; ++ for if_desc in config_desc.interface_descs.iter() { ++ let active_alternate = active_alternates.get(&if_desc.number).copied().unwrap_or(0); ++ if if_desc.alternate_setting != active_alternate { ++ continue; ++ } ++ for endp_desc in if_desc.endpoints.iter() { ++ if endp_idx == endp_count { ++ return Some(endp_desc); ++ } ++ endp_count += 1; ++ } ++ } ++ None ++} +@@ -872,6 +881,7 @@ impl Xhci { + protocol_speed, + input_context: Mutex::new(input), + dev_desc: None, ++ active_alternates: BTreeMap::new(), + cfg_idx: None, + endpoint_states: std::iter::once(( + 0, +@@ -1516,6 +1526,67 @@ struct DriversConfig { + drivers: Vec, + } ++ ++#[cfg(test)] ++mod tests { ++ use super::{active_endpoint_desc, BTreeMap, ConfDesc, DevDesc, EndpDesc, IfDesc}; ++ use crate::driver_interface::EndpointTy; ++ use smallvec::smallvec; ++ ++ fn endp(address: u8, attributes: u8) -> EndpDesc { ++ EndpDesc { kind: 5, address, attributes, max_packet_size: 64, interval: 1, ssc: None, sspc: None } ++ } ++ ++ fn if_desc(number: u8, alternate_setting: u8, endpoints: Vec) -> IfDesc { ++ IfDesc { ++ kind: 4, ++ number, ++ alternate_setting, ++ class: 3, ++ sub_class: 1, ++ protocol: 1, ++ interface_str: None, ++ endpoints: endpoints.into_iter().collect(), ++ hid_descs: smallvec![], ++ } ++ } ++ ++ fn sample_dev_desc() -> DevDesc { ++ DevDesc { ++ kind: 1, ++ usb: 0x0200, ++ class: 0, ++ sub_class: 0, ++ protocol: 0, ++ packet_size: 64, ++ vendor: 0x1234, ++ product: 0x5678, ++ release: 0x0100, ++ manufacturer_str: None, ++ product_str: None, ++ serial_str: None, ++ config_descs: smallvec![ConfDesc { kind: 2, configuration_value: 1, configuration: None, attributes: 0x80, max_power: 50, interface_descs: smallvec![ if_desc(0, 0, vec![endp(0x81, 0x03)]), if_desc(0, 1, vec![endp(0x82, 0x03), endp(0x02, 0x03)]), if_desc(1, 0, vec![endp(0x83, 0x02)]), ], }], ++ } ++ } ++ ++ #[test] ++ fn active_endpoint_desc_uses_default_alternates_initially() { ++ let dev_desc = sample_dev_desc(); ++ let active = BTreeMap::new(); ++ let first = active_endpoint_desc(&dev_desc, 1, &active, 0).expect("endpoint 0"); ++ let second = active_endpoint_desc(&dev_desc, 1, &active, 1).expect("endpoint 1"); ++ assert_eq!(first.address, 0x81); ++ assert_eq!(first.ty(), EndpointTy::Interrupt); ++ assert_eq!(second.address, 0x83); ++ assert_eq!(second.ty(), EndpointTy::Bulk); ++ assert!(active_endpoint_desc(&dev_desc, 1, &active, 2).is_none()); ++ } ++ ++ #[test] ++ fn active_endpoint_desc_switches_to_selected_alternate() { ++ let dev_desc = sample_dev_desc(); ++ let mut active = BTreeMap::new(); ++ active.insert(0, 1); ++ let first = active_endpoint_desc(&dev_desc, 1, &active, 0).expect("endpoint 0"); ++ let second = active_endpoint_desc(&dev_desc, 1, &active, 1).expect("endpoint 1"); ++ let third = active_endpoint_desc(&dev_desc, 1, &active, 2).expect("endpoint 2"); ++ assert_eq!(first.address, 0x82); ++ assert_eq!(second.address, 0x02); ++ assert_eq!(third.address, 0x83); ++ } ++} + +diff --git a/drivers/usb/xhcid/src/xhci/scheme.rs b/drivers/usb/xhcid/src/xhci/scheme.rs +--- a/drivers/usb/xhcid/src/xhci/scheme.rs ++++ b/drivers/usb/xhcid/src/xhci/scheme.rs +@@ -3487,6 +3487,14 @@ impl EndpointContextSnapshot { + fn capture_values(a: u32, b: u32, trl: u32, trh: u32, c: u32) -> Self { + Self { a, b, trl, trh, c } + } ++ ++ fn restore(&self, ctx: &mut EndpointContext) { ++ ctx.a.write(self.a); ++ ctx.b.write(self.b); ++ ctx.trl.write(self.trl); ++ ctx.trh.write(self.trh); ++ ctx.c.write(self.c); ++ } + } +@@ -1171,7 +1171,9 @@ impl XhciScheme + input_context.device.slot.c.write(snapshot.slot_c); + + for (endp_i, endp_snapshot) in endpoint_snapshots { +- endp_snapshot.restore(&mut input_context.device.endpoints[*endp_i]); ++ let endpoint_ptr = core::ptr::addr_of_mut!(input_context.device.endpoints[*endp_i]); ++ let mut endpoint = unsafe { core::ptr::read_unaligned(endpoint_ptr) }; ++ endp_snapshot.restore(&mut endpoint); ++ unsafe { core::ptr::write_unaligned(endpoint_ptr, endpoint) }; + } + + Ok(input_context.physical()) +index b0fb9b85..bba6f232 100644 +--- a/drivers/usb/xhcid/src/xhci/scheme.rs ++++ b/drivers/usb/xhcid/src/xhci/scheme.rs +@@ -1105,11 +1105,28 @@ impl Xhci { + .find(|desc| desc.configuration_value == req.config_desc) + .ok_or(Error::new(EBADFD))?; ++ let configuration_value = config_desc.configuration_value; ++ ++ let interface_layout = config_desc ++ .interface_descs ++ .iter() ++ .map(|if_desc| { ++ ( ++ if_desc.number, ++ if_desc.alternate_setting, ++ if_desc.endpoints.iter().map(|endp| *endp).collect::>(), ++ ) ++ }) ++ .collect::>(); + +- //TODO: USE ENDPOINTS FROM ALL INTERFACES ++ port_state.active_alternates.clear(); ++ for (if_num, _, _) in &interface_layout { ++ port_state.active_alternates.entry(*if_num).or_insert(0); ++ } ++ + let mut endp_desc_count = 0; + let mut new_context_entries = 1; +- for if_desc in config_desc.interface_descs.iter() { +- for endpoint in if_desc.endpoints.iter() { ++ for (if_num, alternate_setting, endpoints) in &interface_layout { ++ let active_alternate = port_state.active_alternates.get(if_num).copied().unwrap_or(0); ++ if *alternate_setting != active_alternate { ++ continue; ++ } ++ for endpoint in endpoints.iter() { + endp_desc_count += 1; + let entry = Self::endp_num_to_dci(endp_desc_count, endpoint); + if entry > new_context_entries { +@@ -1128,7 +1145,7 @@ impl Xhci { + ( + endp_desc_count, + new_context_entries, +- config_desc.configuration_value, ++ configuration_value, + ) + }; +@@ -1397,6 +1414,10 @@ impl Xhci { + if !skip_set_interface { + self.set_interface(port, interface_num, alternate_setting) + .await?; ++ } ++ ++ if let Some(mut port_state) = self.port_states.get_mut(&port) { ++ port_state.active_alternates.insert(interface_num, alternate_setting); + } + } + } + diff --git a/drivers/graphics/ihdgd/config.toml b/drivers/graphics/ihdgd/config.toml index acbb4e78..210731ae 100644 --- a/drivers/graphics/ihdgd/config.toml @@ -1727,6 +2879,64 @@ index 9018dc6b..2721c4fd 100644 pub fn irq(&mut self) { diff --git a/drivers/input/usbhidd/src/main.rs b/drivers/input/usbhidd/src/main.rs +diff --git a/drivers/input/ps2d/src/controller.rs b/drivers/input/ps2d/src/controller.rs +index 561aa527..0310a367 100644 +--- a/drivers/input/ps2d/src/controller.rs ++++ b/drivers/input/ps2d/src/controller.rs +@@ -283,8 +283,27 @@ impl Ps2 { + status_bits + ); + } + ++ let flushed = self.flush_output(); ++ if flushed != 0 { ++ debug!("ps/2 controller probe drained {} stale byte(s)", flushed); ++ } + + self.config().is_ok() + } ++ ++ pub fn flush_output(&mut self) -> usize { ++ let mut flushed = 0; ++ while let Some((keyboard, data)) = self.next() { ++ flushed += 1; ++ trace!( ++ "ps/2 flush discarded {:02X} from {} channel", ++ data, ++ if keyboard { "keyboard" } else { "mouse" } ++ ); ++ } ++ flushed ++ } + + pub fn init_keyboard(&mut self) -> Result<(), Error> { + let mut b; +@@ -325,6 +344,11 @@ impl Ps2 { + self.command(Command::DisableSecond)?; + } + ++ let flushed = self.flush_output(); ++ if flushed != 0 { ++ debug!("ps/2 init discarded {} stale byte(s) before config", flushed); ++ } ++ + // Disable clocks, disable interrupts, and disable translate + { + // Since the default config may have interrupts enabled, and the kernel may eat up +@@ -358,6 +382,11 @@ impl Ps2 { + warn!("self test unexpected value: {:02X}", r); + } + } ++ ++ let flushed = self.flush_output(); ++ if flushed != 0 { ++ debug!("ps/2 init discarded {} byte(s) after controller self-test", flushed); ++ } + + // Initialize keyboard + if let Err(err) = self.init_keyboard() { + +diff --git a/drivers/input/usbhidd/src/main.rs b/drivers/input/usbhidd/src/main.rs index 15c5b778..c67fb8bc 100644 --- a/drivers/input/usbhidd/src/main.rs +++ b/drivers/input/usbhidd/src/main.rs @@ -3507,6 +4717,306 @@ index 790abea6..b6d379d0 100644 } #[derive(Debug)] diff --git a/drivers/usb/usbhubd/src/main.rs b/drivers/usb/usbhubd/src/main.rs +index 0e58542d..b13bb58a 100644 +--- a/drivers/usb/usbhubd/src/main.rs ++++ b/drivers/usb/usbhubd/src/main.rs +@@ -84,7 +84,7 @@ fn main() -> Result<(), Box> { + })?; + + // Read hub descriptor +- let (ports, usb_3) = if desc.major_version() >= 3 { ++ let (ports, usb_3, hub_think_time) = if desc.major_version() >= 3 { + // USB 3.0 hubs + let mut hub_desc = usb::HubDescriptorV3::default(); + handle +@@ -101,7 +101,7 @@ fn main() -> Result<(), Box> { + "Failed to read USB 3 hub descriptor for port {port_id}: {err}" + )) + })?; +- (hub_desc.ports, true) ++ (hub_desc.ports, true, None) + } else { + // USB 2.0 and earlier hubs + let mut hub_desc = usb::HubDescriptorV2::default(); +@@ -119,7 +119,7 @@ fn main() -> Result<(), Box> { + "Failed to read USB 2 hub descriptor for port {port_id}: {err}" + )) + })?; +- (hub_desc.ports, false) ++ (hub_desc.ports, false, hub_desc.tt_think_time(desc.protocol)) + }; +@@ -128,6 +128,7 @@ fn main() -> Result<(), Box> { + interface_desc: None, //TODO: stalls on USB 3 hub: Some(interface_num), + alternate_setting: None, //TODO: stalls on USB 3 hub: Some(if_desc.alternate_setting), + hub_ports: Some(ports), ++ hub_think_time, + }) + .map_err(|err| { + other_error(format!( + +diff --git a/drivers/usb/xhcid/src/usb/hub.rs b/drivers/usb/xhcid/src/usb/hub.rs +index 2d278320..fb02b17b 100644 +--- a/drivers/usb/xhcid/src/usb/hub.rs ++++ b/drivers/usb/xhcid/src/usb/hub.rs +@@ -17,6 +17,23 @@ unsafe impl plain::Plain for HubDescriptorV2 {} + + impl HubDescriptorV2 { + pub const DESCRIPTOR_KIND: u8 = 0x29; ++ ++ pub fn tt_think_time(self, device_protocol: u8) -> Option { ++ const HUB_CHAR_TTTT: u16 = 0x0060; ++ const HUB_TTTT_8_BITS: u16 = 0x0000; ++ const HUB_TTTT_16_BITS: u16 = 0x0020; ++ const HUB_TTTT_24_BITS: u16 = 0x0040; ++ const HUB_TTTT_32_BITS: u16 = 0x0060; ++ ++ match self.characteristics & HUB_CHAR_TTTT { ++ HUB_TTTT_8_BITS if device_protocol != 0 => Some(0), ++ HUB_TTTT_16_BITS => Some(1), ++ HUB_TTTT_24_BITS => Some(2), ++ HUB_TTTT_32_BITS => Some(3), ++ _ => None, ++ } ++ } + } +@@ -196,3 +213,23 @@ impl HubPortStatus { + } + } + } ++ ++#[cfg(test)] ++mod tests { ++ use super::HubDescriptorV2; ++ ++ #[test] ++ fn usb2_hub_tt_think_time_decodes_linux_compatible_values() { ++ let mut hub = HubDescriptorV2::default(); ++ ++ hub.characteristics = 0x0000; ++ assert_eq!(hub.tt_think_time(0), None); ++ assert_eq!(hub.tt_think_time(1), Some(0)); ++ ++ hub.characteristics = 0x0020; ++ assert_eq!(hub.tt_think_time(0), Some(1)); ++ ++ hub.characteristics = 0x0040; ++ assert_eq!(hub.tt_think_time(0), Some(2)); ++ ++ hub.characteristics = 0x0060; ++ assert_eq!(hub.tt_think_time(0), Some(3)); ++ } ++} + +diff --git a/drivers/usb/xhcid/src/xhci/scheme.rs b/drivers/usb/xhcid/src/xhci/scheme.rs +index 627d33a7..7eb553ae 100644 +--- a/drivers/usb/xhcid/src/xhci/scheme.rs ++++ b/drivers/usb/xhcid/src/xhci/scheme.rs +@@ -1196,11 +1196,8 @@ impl Xhci { + // Set hub data + current_slot_a &= !(1 << 26); + current_slot_b &= !HUB_PORTS_MASK; +- current_slot_c &= !TT_THINK_TIME_MASK; + if let Some(hub_ports) = req.hub_ports { + current_slot_a |= 1 << 26; + current_slot_b |= (u32::from(hub_ports) << HUB_PORTS_SHIFT) & HUB_PORTS_MASK; +- if let Some(hub_think_time) = req.hub_think_time { +- current_slot_c |= (u32::from(hub_think_time) << TT_THINK_TIME_SHIFT) & TT_THINK_TIME_MASK; +- } + } + current_slot_c = apply_hub_tt_info(current_slot_c, req); + +@@ -3250,6 +3247,21 @@ fn resolve_active_alternates( + active + } + ++fn apply_hub_tt_info(current_slot_c: u32, req: &ConfigureEndpointsReq) -> u32 { ++ const TT_THINK_TIME_MASK: u32 = 0x0003_0000; ++ const TT_THINK_TIME_SHIFT: u8 = 16; ++ ++ let mut slot_c = current_slot_c & !TT_THINK_TIME_MASK; ++ if req.hub_ports.is_some() { ++ if let Some(hub_think_time) = req.hub_think_time { ++ slot_c |= (u32::from(hub_think_time) << TT_THINK_TIME_SHIFT) & TT_THINK_TIME_MASK; ++ } ++ } ++ slot_c ++} ++ + use lazy_static::lazy_static; + use std::ops::{Add, Div, Rem}; +@@ -3283,4 +3295,18 @@ mod tests { + assert_eq!(resolved.get(&0), Some(&1)); + assert_eq!(resolved.get(&1), Some(&2)); + } ++ ++ #[test] ++ fn apply_hub_tt_info_only_sets_bits_for_hub_requests() { ++ let req = ConfigureEndpointsReq { ++ config_desc: 1, ++ interface_desc: None, ++ alternate_setting: None, ++ hub_ports: Some(4), ++ hub_think_time: Some(3), ++ }; ++ assert_eq!(apply_hub_tt_info(0, &req), 0x0003_0000); ++ ++ let no_hub = ConfigureEndpointsReq { hub_ports: None, ..req.clone() }; ++ assert_eq!(apply_hub_tt_info(0x0003_0000, &no_hub), 0); ++ } + } + +diff --git a/drivers/usb/usbhubd/src/main.rs b/drivers/usb/usbhubd/src/main.rs +index 0e58542d..b13bb58a 100644 +--- a/drivers/usb/usbhubd/src/main.rs ++++ b/drivers/usb/usbhubd/src/main.rs +@@ -84,7 +84,7 @@ fn main() -> Result<(), Box> { + })?; + + // Read hub descriptor +- let (ports, usb_3) = if desc.major_version() >= 3 { ++ let (ports, usb_3, hub_think_time) = if desc.major_version() >= 3 { + // USB 3.0 hubs + let mut hub_desc = usb::HubDescriptorV3::default(); + handle +@@ -101,7 +101,7 @@ fn main() -> Result<(), Box> { + "Failed to read USB 3 hub descriptor for port {port_id}: {err}" + )) + })?; +- (hub_desc.ports, true) ++ (hub_desc.ports, true, None) + } else { + // USB 2.0 and earlier hubs + let mut hub_desc = usb::HubDescriptorV2::default(); +@@ -119,7 +119,7 @@ fn main() -> Result<(), Box> { + "Failed to read USB 2 hub descriptor for port {port_id}: {err}" + )) + })?; +- (hub_desc.ports, false) ++ (hub_desc.ports, false, hub_desc.tt_think_time(desc.protocol)) + }; +@@ -128,6 +128,7 @@ fn main() -> Result<(), Box> { + interface_desc: None, //TODO: stalls on USB 3 hub: Some(interface_num), + alternate_setting: None, //TODO: stalls on USB 3 hub: Some(if_desc.alternate_setting), + hub_ports: Some(ports), ++ hub_think_time, + }) + .map_err(|err| { + other_error(format!( + +diff --git a/drivers/usb/xhcid/src/usb/hub.rs b/drivers/usb/xhcid/src/usb/hub.rs +index b7dc4d54..2d278320 100644 +--- a/drivers/usb/xhcid/src/usb/hub.rs ++++ b/drivers/usb/xhcid/src/usb/hub.rs +@@ -17,6 +17,23 @@ unsafe impl plain::Plain for HubDescriptorV2 {} + + impl HubDescriptorV2 { + pub const DESCRIPTOR_KIND: u8 = 0x29; ++ ++ pub fn tt_think_time(self, device_protocol: u8) -> Option { ++ const HUB_CHAR_TTTT: u16 = 0x0060; ++ const HUB_TTTT_8_BITS: u16 = 0x0000; ++ const HUB_TTTT_16_BITS: u16 = 0x0020; ++ const HUB_TTTT_24_BITS: u16 = 0x0040; ++ const HUB_TTTT_32_BITS: u16 = 0x0060; ++ ++ match self.characteristics & HUB_CHAR_TTTT { ++ HUB_TTTT_8_BITS if device_protocol != 0 => Some(0), ++ HUB_TTTT_16_BITS => Some(1), ++ HUB_TTTT_24_BITS => Some(2), ++ HUB_TTTT_32_BITS => Some(3), ++ _ => None, ++ } ++ } + } +@@ -196,3 +213,23 @@ impl HubPortStatus { + } + } + } ++ ++#[cfg(test)] ++mod tests { ++ use super::HubDescriptorV2; ++ ++ #[test] ++ fn usb2_hub_tt_think_time_decodes_linux_compatible_values() { ++ let mut hub = HubDescriptorV2::default(); ++ ++ hub.characteristics = 0x0000; ++ assert_eq!(hub.tt_think_time(0), None); ++ assert_eq!(hub.tt_think_time(1), Some(0)); ++ ++ hub.characteristics = 0x0020; ++ assert_eq!(hub.tt_think_time(0), Some(1)); ++ ++ hub.characteristics = 0x0040; ++ assert_eq!(hub.tt_think_time(0), Some(2)); ++ ++ hub.characteristics = 0x0060; ++ assert_eq!(hub.tt_think_time(0), Some(3)); ++ } ++} + +diff --git a/drivers/usb/xhcid/src/xhci/scheme.rs b/drivers/usb/xhcid/src/xhci/scheme.rs +index d5266ca0..627d33a7 100644 +--- a/drivers/usb/xhcid/src/xhci/scheme.rs ++++ b/drivers/usb/xhcid/src/xhci/scheme.rs +@@ -1196,11 +1196,8 @@ impl Xhci { + // Set hub data + current_slot_a &= !(1 << 26); + current_slot_b &= !HUB_PORTS_MASK; +- current_slot_c &= !TT_THINK_TIME_MASK; + if let Some(hub_ports) = req.hub_ports { + current_slot_a |= 1 << 26; + current_slot_b |= (u32::from(hub_ports) << HUB_PORTS_SHIFT) & HUB_PORTS_MASK; +- if let Some(hub_think_time) = req.hub_think_time { +- current_slot_c |= (u32::from(hub_think_time) << TT_THINK_TIME_SHIFT) & TT_THINK_TIME_MASK; +- } + } ++ current_slot_c = apply_hub_tt_info(current_slot_c, req); + + input_context.device.slot.a.write(current_slot_a); + input_context.device.slot.b.write(current_slot_b); +@@ -3250,6 +3247,21 @@ fn resolve_active_alternates( + active + } + ++fn apply_hub_tt_info(current_slot_c: u32, req: &ConfigureEndpointsReq) -> u32 { ++ const TT_THINK_TIME_MASK: u32 = 0x0003_0000; ++ const TT_THINK_TIME_SHIFT: u8 = 16; ++ ++ let mut slot_c = current_slot_c & !TT_THINK_TIME_MASK; ++ if req.hub_ports.is_some() { ++ if let Some(hub_think_time) = req.hub_think_time { ++ slot_c |= (u32::from(hub_think_time) << TT_THINK_TIME_SHIFT) & TT_THINK_TIME_MASK; ++ } ++ } ++ slot_c ++} ++ + use lazy_static::lazy_static; + use std::ops::{Add, Div, Rem}; +@@ -3283,4 +3295,18 @@ mod tests { + assert_eq!(resolved.get(&0), Some(&1)); + assert_eq!(resolved.get(&1), Some(&2)); + } ++ ++ #[test] ++ fn apply_hub_tt_info_only_sets_bits_for_hub_requests() { ++ let req = ConfigureEndpointsReq { ++ config_desc: 1, ++ interface_desc: None, ++ alternate_setting: None, ++ hub_ports: Some(4), ++ hub_think_time: Some(3), ++ }; ++ assert_eq!(apply_hub_tt_info(0, &req), 0x0003_0000); ++ ++ let no_hub = ConfigureEndpointsReq { hub_ports: None, ..req.clone() }; ++ assert_eq!(apply_hub_tt_info(0x0003_0000, &no_hub), 0); ++ } + } + +diff --git a/drivers/usb/usbhubd/src/main.rs b/drivers/usb/usbhubd/src/main.rs index 2c8b9876..68538b77 100644 --- a/drivers/usb/usbhubd/src/main.rs +++ b/drivers/usb/usbhubd/src/main.rs @@ -4221,75 +5731,720 @@ index 74b9f732..1f144ac9 100644 let flags = port.flags(); diff --git a/drivers/usb/xhcid/src/xhci/mod.rs b/drivers/usb/xhcid/src/xhci/mod.rs -index f2143676..c53cb59f 100644 +index f2143676..d81648bf 100644 --- a/drivers/usb/xhcid/src/xhci/mod.rs +++ b/drivers/usb/xhcid/src/xhci/mod.rs -@@ -311,6 +311,14 @@ struct PortState { +@@ -11,12 +11,13 @@ + //! documents are specified in the crate-level documentation. + use std::collections::BTreeMap; + use std::convert::TryFrom; +-use std::fs::File; ++use std::fs::{self, File}; ++use std::time::Duration; + use std::sync::atomic::AtomicUsize; +-use std::sync::{Arc, Mutex}; ++use std::sync::{Arc, Condvar, Mutex}; + + use std::{mem, process, slice, thread}; +-use syscall::error::{Error, Result, EBADF, EBADMSG, EIO, ENOENT}; ++use syscall::error::{Error, Result, EBADF, EBADMSG, EBUSY, EIO, ENOENT}; + use syscall::{EAGAIN, PAGE_SIZE}; + + use chashmap::CHashMap; +@@ -77,7 +78,55 @@ pub enum InterruptMethod { + Msi, + } + ++const XHCID_TEST_HOOK_PATH: &str = "/tmp/xhcid-test-hook"; ++const XHCID_TEST_HOOK_MAX_DELAY_MS: u64 = 5_000; ++ + impl Xhci { ++ fn read_test_hook_command_from_path(path: &str) -> Option { ++ let contents = fs::read_to_string(path).ok()?; ++ contents ++ .lines() ++ .map(str::trim) ++ .find(|line| !line.is_empty() && !line.starts_with('#')) ++ .map(ToOwned::to_owned) ++ } ++ ++ fn clear_test_hook_command_path(path: &str) { ++ if let Err(err) = fs::remove_file(path) { ++ if err.kind() != std::io::ErrorKind::NotFound { ++ warn!( ++ "failed to remove xhcid test hook file {}: {}", ++ path, err ++ ); ++ } ++ } ++ } ++ ++ fn consume_test_hook_from_path(path: &str, expected: &str) -> bool { ++ match Self::read_test_hook_command_from_path(path) { ++ Some(command) if command == expected => { ++ Self::clear_test_hook_command_path(path); ++ true ++ } ++ _ => false, ++ } ++ } ++ ++ fn consume_test_hook_delay_ms_from_path(path: &str, prefix: &str) -> Option { ++ let command = Self::read_test_hook_command_from_path(path)?; ++ let delay_ms = command.strip_prefix(prefix)?.parse::().ok()?; ++ Self::clear_test_hook_command_path(path); ++ Some(delay_ms.min(XHCID_TEST_HOOK_MAX_DELAY_MS)) ++ } ++ ++ pub(crate) fn consume_test_hook(&self, expected: &str) -> bool { ++ Self::consume_test_hook_from_path(XHCID_TEST_HOOK_PATH, expected) ++ } ++ ++ pub(crate) fn consume_test_hook_delay_ms(&self, prefix: &str) -> Option { ++ Self::consume_test_hook_delay_ms_from_path(XHCID_TEST_HOOK_PATH, prefix) ++ } ++ + /// Gets descriptors, before the port state is initiated. + async fn get_desc_raw( + &self, +@@ -104,7 +153,18 @@ impl Xhci { + ); + + let future = { +- let mut port_state = self.port_states.get_mut(&port).ok_or(Error::new(ENOENT))?; ++ let mut published_port_state = self.port_states.get_mut(&port); ++ let mut staged_port_state = if published_port_state.is_none() { ++ self.staged_port_states.get_mut(&port) ++ } else { ++ None ++ }; ++ ++ let port_state = published_port_state ++ .as_deref_mut() ++ .or_else(|| staged_port_state.as_deref_mut()) ++ .ok_or(Error::new(ENOENT))?; ++ + let ring = port_state + .endpoint_states + .get_mut(&0) +@@ -150,7 +210,7 @@ impl Xhci { + trace!("Handling the transfer event TRB!"); + self::scheme::handle_transfer_event_trb("GET_DESC", &event_trb, &status_trb)?; + +- //self.event_handler_finished(); ++ self.event_handler_finished(); + Ok(()) + } + +@@ -283,6 +343,7 @@ pub struct Xhci { + handles: CHashMap, + next_handle: AtomicUsize, + port_states: CHashMap>, ++ staged_port_states: CHashMap>, + drivers: CHashMap>, + scheme_name: String, + +@@ -311,6 +372,144 @@ struct PortState { input_context: Mutex>>, dev_desc: Option, endpoint_states: BTreeMap, + quirks: crate::usb_quirks::UsbQuirkFlags, + pm_state: PortPmState, ++ lifecycle: Arc, ++} ++ ++#[derive(Clone, Copy, Debug, Eq, PartialEq)] ++pub(crate) enum PortLifecycleState { ++ Attaching, ++ Attached, ++ Detaching, ++} ++ ++struct PortLifecycleInner { ++ state: PortLifecycleState, ++ active_operations: usize, ++} ++ ++pub(crate) struct PortLifecycle { ++ inner: Mutex, ++ idle: Condvar, ++} ++ ++impl PortLifecycle { ++ pub(crate) fn new_attaching() -> Self { ++ Self { ++ inner: Mutex::new(PortLifecycleInner { ++ state: PortLifecycleState::Attaching, ++ active_operations: 1, ++ }), ++ idle: Condvar::new(), ++ } ++ } ++ ++ fn lock_inner(&self) -> std::sync::MutexGuard<'_, PortLifecycleInner> { ++ self.inner.lock().unwrap_or_else(|err| err.into_inner()) ++ } ++ ++ pub(crate) fn state(&self) -> PortLifecycleState { ++ self.lock_inner().state ++ } ++ ++ pub(crate) fn begin_operation(&self, allow_attaching: bool) -> Result<()> { ++ let mut inner = self.lock_inner(); ++ ++ let allowed = match inner.state { ++ PortLifecycleState::Attached => true, ++ PortLifecycleState::Attaching => allow_attaching, ++ PortLifecycleState::Detaching => false, ++ }; ++ ++ if !allowed { ++ return Err(Error::new(EBUSY)); ++ } ++ ++ inner.active_operations += 1; ++ Ok(()) ++ } ++ ++ pub(crate) fn finish_operation(&self) { ++ let mut inner = self.lock_inner(); ++ ++ if inner.active_operations == 0 { ++ return; ++ } ++ ++ inner.active_operations -= 1; ++ if inner.active_operations == 0 { ++ self.idle.notify_all(); ++ } ++ } ++ ++ pub(crate) fn finish_attach_success(&self) -> PortLifecycleState { ++ let mut inner = self.lock_inner(); ++ ++ if inner.state == PortLifecycleState::Attaching { ++ inner.state = PortLifecycleState::Attached; ++ } ++ ++ if inner.active_operations != 0 { ++ inner.active_operations -= 1; ++ } ++ if inner.active_operations == 0 { ++ self.idle.notify_all(); ++ } ++ ++ inner.state ++ } ++ ++ pub(crate) fn finish_attach_failure(&self) { ++ let mut inner = self.lock_inner(); ++ inner.state = PortLifecycleState::Detaching; ++ ++ if inner.active_operations != 0 { ++ inner.active_operations -= 1; ++ } ++ if inner.active_operations == 0 { ++ self.idle.notify_all(); ++ } ++ } ++ ++ pub(crate) fn begin_detaching(&self) { ++ let mut inner = self.lock_inner(); ++ inner.state = PortLifecycleState::Detaching; ++ ++ while inner.active_operations != 0 { ++ inner = self.idle.wait(inner).unwrap_or_else(|err| err.into_inner()); ++ } ++ } ++} ++ ++pub(crate) struct PortOperationGuard { ++ lifecycle: Arc, ++} ++ ++impl PortOperationGuard { ++ pub(crate) fn new(lifecycle: Arc) -> Self { ++ Self { lifecycle } ++ } ++} ++ ++impl Drop for PortOperationGuard { ++ fn drop(&mut self) { ++ self.lifecycle.finish_operation(); ++ } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub(crate) enum PortPmState { + Active, + Suspended, ++} ++impl PortPmState { ++ pub fn as_str(&self) -> &'static str { ++ match self { ++ Self::Active => "active", ++ Self::Suspended => "suspended", ++ } ++ } } impl PortState { -@@ -809,6 +817,7 @@ impl Xhci { +@@ -463,6 +662,7 @@ impl Xhci { + handles: CHashMap::new(), + next_handle: AtomicUsize::new(0), + port_states: CHashMap::new(), ++ staged_port_states: CHashMap::new(), + drivers: CHashMap::new(), + scheme_name, + +@@ -615,29 +815,24 @@ impl Xhci { + route_string: 0, + }; + +- //Get the CCS and CSC flags +- let (ccs, csc, flags) = { ++ // Only queue ports that are actually connected at startup. A stale CSC bit on an ++ // otherwise disconnected port should not trigger a full attach attempt. ++ let (ccs, flags) = { + let mut ports = self.ports.lock().unwrap(); + let port = &mut ports[port_id.root_hub_port_index()]; + let flags = port.flags(); + let ccs = flags.contains(PortFlags::CCS); +- let csc = flags.contains(PortFlags::CSC); + +- (ccs, csc, flags) ++ (ccs, flags) + }; + + debug!("Port {} has flags {:?}", port_id, flags); + +- match (ccs, csc) { +- (false, false) => { // Nothing is connected, and there was no port status change +- //Do nothing +- } +- _ => { +- //Either something is connected, or nothing is connected and a port status change was asserted. +- self.device_enumerator_sender +- .send(DeviceEnumerationRequest { port_id }) +- .expect("Failed to generate the port enumeration request!"); +- } ++ if ccs { ++ info!("xhcid: queueing initial enumeration for port {} with flags {:?}", port_id, flags); ++ self.device_enumerator_sender ++ .send(DeviceEnumerationRequest { port_id }) ++ .expect("Failed to generate the port enumeration request!"); + } + } + } +@@ -757,7 +952,7 @@ impl Xhci { + + trace!("Slot is enabled!"); + self::scheme::handle_event_trb("ENABLE_SLOT", &event_trb, &command_trb)?; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + Ok(event_trb.event_slot()) + } +@@ -768,7 +963,7 @@ impl Xhci { + .await; + + self::scheme::handle_event_trb("DISABLE_SLOT", &event_trb, &command_trb)?; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + Ok(()) + } +@@ -793,11 +988,13 @@ impl Xhci { + } + + pub async fn attach_device(&self, port_id: PortId) -> syscall::Result<()> { +- if self.port_states.contains_key(&port_id) { ++ if self.port_states.contains_key(&port_id) || self.staged_port_states.contains_key(&port_id) { + debug!("Already contains port {}", port_id); + return Err(syscall::Error::new(EAGAIN)); + } + ++ info!("xhcid: begin attach for port {}", port_id); ++ + let (data, state, speed, flags) = { + let port = &self.ports.lock().unwrap()[port_id.root_hub_port_index()]; + (port.read(), port.state(), port.speed(), port.flags()) +@@ -808,74 +1005,114 @@ impl Xhci { + port_id, data, state, speed, flags ); - if flags.contains(port::PortFlags::CCS) { -+ let early_quirks = crate::usb_quirks::lookup_usb_quirks_early(port_id); - let slot_ty = match self.supported_protocol(port_id) { - Some(protocol) => protocol.proto_slot_ty(), - None => { -@@ -838,7 +847,15 @@ impl Xhci { +- if flags.contains(port::PortFlags::CCS) { +- let slot_ty = match self.supported_protocol(port_id) { +- Some(protocol) => protocol.proto_slot_ty(), +- None => { +- warn!("Failed to find supported protocol information for port"); +- 0 +- } +- }; +- +- debug!("Slot type: {}", slot_ty); +- debug!("Enabling slot."); +- let slot = match self.enable_port_slot(slot_ty).await { +- Ok(ok) => ok, +- Err(err) => { +- error!("Failed to enable slot for port {}: {}", port_id, err); +- return Err(err); +- } +- }; ++ if !flags.contains(port::PortFlags::CCS) { ++ warn!("Attempted to attach a device that didnt have CCS=1"); ++ return Ok(()); ++ } - debug!("Attempting to address the device"); - let mut ring = match self +- debug!("Enabled port {}, which the xHC mapped to {}", port_id, slot); ++ let early_quirks = crate::usb_quirks::lookup_usb_quirks_early(port_id); ++ let slot_ty = match self.supported_protocol(port_id) { ++ Some(protocol) => protocol.proto_slot_ty(), ++ None => { ++ warn!("Failed to find supported protocol information for port {}", port_id); ++ 0 ++ } ++ }; + +- //TODO: get correct speed for child devices +- let protocol_speed = self +- .lookup_psiv(port_id, speed) +- .expect("Failed to retrieve speed ID"); ++ debug!("Slot type: {}", slot_ty); ++ debug!("Enabling slot."); ++ let slot = match self.enable_port_slot(slot_ty).await { ++ Ok(ok) => ok, ++ Err(err) => { ++ error!("Failed to enable slot for port {}: {}", port_id, err); ++ return Err(err); ++ } ++ }; + +- let mut input = unsafe { self.alloc_dma_zeroed::>()? }; ++ debug!("Enabled port {}, which the xHC mapped to {}", port_id, slot); ++ info!("xhcid: enabled slot {} for port {}", slot, port_id); + +- debug!("Attempting to address the device"); +- let mut ring = match self - .address_device(&mut input, port_id, slot_ty, slot, protocol_speed, speed) -+ .address_device( -+ &mut input, -+ port_id, -+ slot_ty, -+ slot, -+ protocol_speed, -+ speed, -+ early_quirks, -+ ) - .await +- .await +- { +- Ok(device_ring) => device_ring, +- Err(err) => { +- error!("Failed to address device for port {}: `{}`", port_id, err); +- return Err(err); ++ let protocol_speed = match self.lookup_psiv(port_id, speed) { ++ Some(protocol_speed) => protocol_speed, ++ None => { ++ let err = Error::new(EIO); ++ error!("Failed to retrieve speed ID for port {}", port_id); ++ if let Err(disable_err) = self.disable_port_slot(slot).await { ++ warn!( ++ "Failed to disable slot {} after speed lookup failure on port {}: {}", ++ slot, port_id, disable_err ++ ); + } +- }; +- +- debug!("Addressed device"); ++ return Err(err); ++ } ++ }; + +- // TODO: Should the descriptors be cached in PortState, or refetched? ++ let mut input = unsafe { self.alloc_dma_zeroed::>()? }; + +- let mut port_state = PortState { ++ debug!("Attempting to address the device"); ++ let ring = match self ++ .address_device( ++ &mut input, ++ port_id, ++ slot_ty, + slot, + protocol_speed, +- input_context: Mutex::new(input), +- dev_desc: None, +- cfg_idx: None, +- endpoint_states: std::iter::once(( +- 0, +- EndpointState { +- transfer: RingOrStreams::Ring(ring), +- driver_if_state: EndpIfState::Init, +- }, +- )) +- .collect::>(), +- }; +- self.port_states.insert(port_id, port_state); +- debug!("Got port states!"); ++ speed, ++ early_quirks, ++ ) ++ .await ++ { ++ Ok(device_ring) => device_ring, ++ Err(err) => { ++ error!("Failed to address device for port {}: `{}`", port_id, err); ++ if let Err(disable_err) = self.disable_port_slot(slot).await { ++ warn!( ++ "Failed to disable slot {} after address failure on port {}: {}", ++ slot, port_id, disable_err ++ ); ++ } ++ return Err(err); ++ } ++ }; ++ ++ debug!("Addressed device"); ++ info!("xhcid: addressed device on port {} slot {}", port_id, slot); ++ ++ let lifecycle = Arc::new(PortLifecycle::new_attaching()); ++ let port_state = PortState { ++ slot, ++ protocol_speed, ++ input_context: Mutex::new(input), ++ dev_desc: None, ++ cfg_idx: None, ++ endpoint_states: std::iter::once(( ++ 0, ++ EndpointState { ++ transfer: RingOrStreams::Ring(ring), ++ driver_if_state: EndpIfState::Init, ++ }, ++ )) ++ .collect::>(), ++ quirks: early_quirks, ++ pm_state: PortPmState::Active, ++ lifecycle: Arc::clone(&lifecycle), ++ }; ++ self.staged_port_states.insert(port_id, port_state); ++ debug!("Got staged port state!"); + +- // Ensure correct packet size is used ++ let attach_result = async { + let dev_desc_8_byte = self.fetch_dev_desc_8_byte(port_id, slot).await?; ++ info!("xhcid: fetched 8-byte device descriptor for port {}", port_id); { - Ok(device_ring) => device_ring, -@@ -866,6 +883,8 @@ impl Xhci { - }, - )) - .collect::>(), -+ quirks: early_quirks, -+ pm_state: PortPmState::Active, - }; - self.port_states.insert(port_id, port_state); - debug!("Got port states!"); -@@ -884,8 +903,14 @@ impl Xhci { +- let mut port_state = self.port_states.get_mut(&port_id).unwrap(); ++ let mut port_state = self ++ .staged_port_states ++ .get_mut(&port_id) ++ .ok_or(Error::new(ENOENT))?; + +- let mut input = port_state.input_context.lock().unwrap(); ++ let mut input = port_state ++ .input_context ++ .lock() ++ .unwrap_or_else(|err| err.into_inner()); + + self.update_max_packet_size(&mut *input, slot, dev_desc_8_byte) + .await?; +@@ -884,38 +1121,131 @@ impl Xhci { debug!("Got the 8 byte dev descriptor: {:X?}", dev_desc_8_byte); let dev_desc = self.get_desc(port_id, slot).await?; ++ info!( ++ "xhcid: got descriptors for port {} vendor {:04x} product {:04x}", ++ port_id, ++ dev_desc.vendor, ++ dev_desc.product ++ ); + let quirks = early_quirks + | crate::usb_quirks::lookup_usb_quirks(dev_desc.vendor, dev_desc.product); debug!("Got the full device descriptor!"); - self.port_states.get_mut(&port_id).unwrap().dev_desc = Some(dev_desc); + { -+ let mut port_state = self.port_states.get_mut(&port_id).unwrap(); ++ let mut port_state = self ++ .staged_port_states ++ .get_mut(&port_id) ++ .ok_or(Error::new(ENOENT))?; + port_state.quirks = quirks; + port_state.dev_desc = Some(dev_desc); + } debug!("Got the port states again!"); { -@@ -1052,6 +1077,7 @@ impl Xhci { +- let mut port_state = self.port_states.get_mut(&port_id).unwrap(); +- +- let mut input = port_state.input_context.lock().unwrap(); ++ let mut port_state = self ++ .staged_port_states ++ .get_mut(&port_id) ++ .ok_or(Error::new(ENOENT))?; ++ ++ let mut input = port_state ++ .input_context ++ .lock() ++ .unwrap_or_else(|err| err.into_inner()); + debug!("Got the input context!"); +- let dev_desc = port_state.dev_desc.as_ref().unwrap(); ++ let dev_desc = port_state.dev_desc.as_ref().ok_or(Error::new(EIO))?; + + self.update_default_control_pipe(&mut *input, slot, dev_desc) + .await?; + } + + debug!("Updated the default control pipe"); ++ Ok(()) ++ } ++ .await; ++ ++ match attach_result { ++ Ok(()) => { ++ if let Some(delay_ms) = ++ self.consume_test_hook_delay_ms("delay_before_attach_commit_ms=") ++ { ++ info!( ++ "xhcid: test hook delaying attach commit for port {} by {} ms", ++ port_id, delay_ms ++ ); ++ thread::sleep(Duration::from_millis(delay_ms)); ++ } + +- match self.spawn_drivers(port_id) { +- Ok(()) => (), +- Err(err) => { +- error!("Failed to spawn driver for port {}: `{}`", port_id, err) ++ if lifecycle.finish_attach_success() != PortLifecycleState::Attached { ++ warn!( ++ "attach for port {} completed after detach already started; skipping publication", ++ port_id ++ ); ++ return Err(Error::new(EBUSY)); + } ++ ++ let staged_port_state = self ++ .staged_port_states ++ .remove(&port_id) ++ .ok_or(Error::new(ENOENT))?; ++ self.port_states.insert(port_id, staged_port_state); ++ ++ match self.spawn_drivers(port_id) { ++ Ok(()) => (), ++ Err(err) => { ++ error!("Failed to spawn driver for port {}: `{}`", port_id, err) ++ } ++ } ++ info!("xhcid: finished attach for port {}", port_id); ++ Ok(()) ++ } ++ Err(err) => { ++ lifecycle.finish_attach_failure(); ++ if let Err(detach_err) = self.detach_device(port_id).await { ++ warn!( ++ "failed to clean up attach failure on port {}: {}", ++ port_id, detach_err ++ ); ++ } ++ Err(err) + } +- } else { +- warn!("Attempted to attach a device that didnt have CCS=1"); + } +- +- Ok(()) + } + + pub async fn detach_device(&self, port_id: PortId) -> Result { +- if let Some(children) = self.drivers.remove(&port_id) { ++ let published_state = self.port_states.get(&port_id); ++ let staged_state = if published_state.is_none() { ++ self.staged_port_states.get(&port_id) ++ } else { ++ None ++ }; ++ ++ let (slot, lifecycle, was_published) = match published_state ++ .as_deref() ++ .or_else(|| staged_state.as_deref()) ++ { ++ Some(state) => (state.slot, Arc::clone(&state.lifecycle), published_state.is_some()), ++ None => { ++ debug!( ++ "Attempted to detach from port {}, which wasn't previously attached.", ++ port_id ++ ); ++ return Ok(false); ++ } ++ }; ++ ++ info!("xhcid: begin detach quiesce for port {}", port_id); ++ lifecycle.begin_detaching(); ++ info!("xhcid: detach quiesce complete for port {}", port_id); ++ ++ if let Some(delay_ms) = self.consume_test_hook_delay_ms("delay_before_detach_disable_ms=") { ++ info!( ++ "xhcid: test hook delaying detach disable for port {} by {} ms", ++ port_id, delay_ms ++ ); ++ thread::sleep(Duration::from_millis(delay_ms)); ++ } ++ ++ if was_published { ++ if let Some(children) = self.drivers.remove(&port_id) { + for mut child in children { + info!("killing driver process {} for port {}", child.id(), port_id); + match child.kill() { +@@ -961,21 +1291,26 @@ impl Xhci { + } + } + } ++ } + +- if let Some(state) = self.port_states.remove(&port_id) { +- debug!("disabling port slot {} for port {}", state.slot, port_id); +- let result = self.disable_port_slot(state.slot).await.and(Ok(true)); +- debug!( +- "disabled port slot {} for port {} with result: {:?}", +- state.slot, port_id, result +- ); +- result +- } else { +- debug!( +- "Attempted to detach from port {}, which wasn't previously attached.", +- port_id +- ); +- Ok(false) ++ debug!("disabling port slot {} for port {}", slot, port_id); ++ match self.disable_port_slot(slot).await { ++ Ok(()) => { ++ if was_published { ++ let _ = self.port_states.remove(&port_id); ++ } else { ++ let _ = self.staged_port_states.remove(&port_id); ++ } ++ debug!("disabled port slot {} for port {}", slot, port_id); ++ Ok(true) ++ } ++ Err(err) => { ++ warn!( ++ "failed to disable port slot {} for port {}: {}", ++ slot, port_id, err ++ ); ++ Err(err) ++ } + } + } + +@@ -1004,7 +1339,7 @@ impl Xhci { + .await; + + self::scheme::handle_event_trb("EVALUATE_CONTEXT", &event_trb, &command_trb)?; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + Ok(()) + } +@@ -1039,7 +1374,7 @@ impl Xhci { + debug!("Completed the command to update the default control pipe"); + + self::scheme::handle_event_trb("EVALUATE_CONTEXT", &event_trb, &command_trb)?; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + Ok(()) + } +@@ -1052,6 +1387,7 @@ impl Xhci { slot: u8, protocol_speed: &ProtocolSpeed, speed: u8, @@ -4297,7 +6452,7 @@ index f2143676..c53cb59f 100644 ) -> Result { // Collect MTT, parent port number, parent slot ID let mut mtt = false; -@@ -1162,11 +1188,16 @@ impl Xhci { +@@ -1162,11 +1498,16 @@ impl Xhci { let input_context_physical = input_context.physical(); @@ -4319,11 +6474,98 @@ index f2143676..c53cb59f 100644 if event_trb.completion_code() != TrbCompletionCode::Success as u8 { error!( +@@ -1175,10 +1516,10 @@ impl Xhci { + port, + event_trb.completion_code() + ); +- //self.event_handler_finished(); ++ self.event_handler_finished(); + return Err(Error::new(EIO)); + } +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + Ok(ring) + } +@@ -1281,6 +1622,12 @@ impl Xhci { + ifdesc.sub_class, + ifdesc.protocol, + ); ++ match driver.name.as_str() { ++ "USB HID" => info!("USB HID driver spawned"), ++ "SCSI over USB" => info!("USB SCSI driver spawned"), ++ "USB HUB" => info!("USB HUB driver spawned"), ++ _ => {} ++ } + let (command, args) = driver.command.split_first().ok_or(Error::new(EBADMSG))?; + + let command = if command.starts_with('/') { +@@ -1487,3 +1834,52 @@ lazy_static! { + toml::from_slice::(TOML).expect("Failed to parse internally embedded config file") + }; + } ++ ++#[cfg(test)] ++mod tests { ++ use super::{Xhci, XHCID_TEST_HOOK_MAX_DELAY_MS}; ++ use std::fs; ++ use std::path::Path; ++ use std::time::{SystemTime, UNIX_EPOCH}; ++ ++ fn unique_test_hook_path() -> String { ++ let unique = SystemTime::now() ++ .duration_since(UNIX_EPOCH) ++ .unwrap() ++ .as_nanos(); ++ format!("/tmp/xhcid-test-hook-{}", unique) ++ } ++ ++ #[test] ++ fn consume_test_hook_only_clears_matching_command() { ++ let path = unique_test_hook_path(); ++ fs::write(&path, "fail_after_set_configuration\n").unwrap(); ++ ++ assert!(!Xhci::<16>::consume_test_hook_from_path( ++ &path, ++ "fail_after_configure_endpoint" ++ )); ++ assert!(Path::new(&path).exists()); ++ ++ assert!(Xhci::<16>::consume_test_hook_from_path( ++ &path, ++ "fail_after_set_configuration" ++ )); ++ assert!(!Path::new(&path).exists()); ++ } ++ ++ #[test] ++ fn consume_test_hook_delay_clamps_and_clears() { ++ let path = unique_test_hook_path(); ++ fs::write(&path, "delay_before_attach_commit_ms=999999\n").unwrap(); ++ ++ assert_eq!( ++ Xhci::<16>::consume_test_hook_delay_ms_from_path( ++ &path, ++ "delay_before_attach_commit_ms=" ++ ), ++ Some(XHCID_TEST_HOOK_MAX_DELAY_MS) ++ ); ++ assert!(!Path::new(&path).exists()); ++ } ++} diff --git a/drivers/usb/xhcid/src/xhci/scheme.rs b/drivers/usb/xhcid/src/xhci/scheme.rs -index f2d439a4..b0fb9b85 100644 +index f2d439a4..bc6d7fca 100644 --- a/drivers/usb/xhcid/src/xhci/scheme.rs +++ b/drivers/usb/xhcid/src/xhci/scheme.rs -@@ -24,6 +24,7 @@ use std::{cmp, fmt, io, mem, str}; +@@ -18,12 +18,15 @@ + //! port/endpoints//data + use std::convert::TryFrom; + use std::io::prelude::*; ++use std::io::Write; + use std::ops::Deref; ++use std::sync::Arc; + use std::sync::atomic; + use std::{cmp, fmt, io, mem, str}; use common::dma::Dma; use futures::executor::block_on; @@ -4331,7 +6573,7 @@ index f2d439a4..b0fb9b85 100644 use log::{debug, error, info, trace, warn}; use redox_scheme::scheme::SchemeSync; use smallvec::SmallVec; -@@ -32,9 +33,9 @@ use common::io::Io; +@@ -32,16 +35,16 @@ use common::io::Io; use redox_scheme::{CallerCtx, OpenResult}; use syscall::schemev2::NewFdFlags; use syscall::{ @@ -4344,7 +6586,15 @@ index f2d439a4..b0fb9b85 100644 }; use super::{port, usb}; -@@ -60,6 +61,10 @@ lazy_static! { + use super::{EndpointState, PortId, Xhci}; + + use super::context::{ +- SlotState, StreamContextArray, StreamContextType, CONTEXT_32, CONTEXT_64, ++ EndpointContext, SlotState, StreamContextArray, StreamContextType, CONTEXT_32, CONTEXT_64, + SLOT_CONTEXT_STATE_MASK, SLOT_CONTEXT_STATE_SHIFT, + }; + use super::extended::ProtocolSpeed; +@@ -60,10 +63,16 @@ lazy_static! { .expect("Failed to create the regex for the port/attach scheme."); static ref REGEX_PORT_DETACH: Regex = Regex::new(r"^port([\d\.]+)/detach$") .expect("Failed to create the regex for the port/detach scheme."); @@ -4355,7 +6605,20 @@ index f2d439a4..b0fb9b85 100644 static ref REGEX_PORT_DESCRIPTORS: Regex = Regex::new(r"^port([\d\.]+)/descriptors$") .expect("Failed to create the regex for the port/descriptors"); static ref REGEX_PORT_STATE: Regex = Regex::new(r"^port([\d\.]+)/state$") -@@ -143,6 +148,8 @@ pub enum Handle { + .expect("Failed to create the regex for the port/state scheme"); ++ static ref REGEX_PORT_PM_STATE: Regex = Regex::new(r"^port([\d\.]+)/pm_state$") ++ .expect("Failed to create the regex for the port/pm_state scheme"); + static ref REGEX_PORT_REQUEST: Regex = Regex::new(r"^port([\d\.]+)/request$") + .expect("Failed to create the regex for the port/request scheme"); + static ref REGEX_PORT_ENDPOINTS: Regex = Regex::new(r"^port([\d\.]+)/endpoints$") +@@ -137,12 +146,15 @@ pub enum Handle { + Port(PortId, Vec), // port, contents + PortDesc(PortId, Vec), // port, contents + PortState(PortId), // port ++ PortPmState(PortId), // port + PortReq(PortId, PortReqState), // port, state + Endpoints(PortId, Vec), // port, contents + Endpoint(PortId, u8, EndpointHandleTy), // port, endpoint, state ConfigureEndpoints(PortId), // port AttachDevice(PortId), // port DetachDevice(PortId), // port @@ -4364,7 +6627,16 @@ index f2d439a4..b0fb9b85 100644 SchemeRoot, } -@@ -187,6 +194,10 @@ enum SchemeParameters { +@@ -172,6 +184,8 @@ enum SchemeParameters { + PortDesc(PortId), // port number + /// /port/state + PortState(PortId), // port number ++ /// /port/pm_state ++ PortPmState(PortId), // port number + /// /port/request + PortReq(PortId), // port number + /// /port/endpoints +@@ -187,6 +201,10 @@ enum SchemeParameters { AttachDevice(PortId), // port number /// /port/detach DetachDevice(PortId), // port number @@ -4375,7 +6647,17 @@ index f2d439a4..b0fb9b85 100644 } impl Handle { -@@ -235,6 +246,12 @@ impl Handle { +@@ -209,6 +227,9 @@ impl Handle { + Handle::PortState(port_num) => { + format!("port{}/state", port_num) + } ++ Handle::PortPmState(port_num) => { ++ format!("port{}/pm_state", port_num) ++ } + Handle::PortReq(port_num, _) => { + format!("port{}/request", port_num) + } +@@ -235,6 +256,12 @@ impl Handle { Handle::DetachDevice(port_num) => { format!("port{}/detach", port_num) } @@ -4388,7 +6670,12 @@ index f2d439a4..b0fb9b85 100644 Handle::SchemeRoot => String::from(""), } } -@@ -262,6 +279,8 @@ impl Handle { +@@ -258,10 +285,13 @@ impl Handle { + &Handle::PortReq(_, PortReqState::Tmp) => unreachable!(), + &Handle::PortReq(_, PortReqState::TmpSetup(_)) => unreachable!(), + &Handle::PortState(_) => HandleType::Character, ++ &Handle::PortPmState(_) => HandleType::Character, + &Handle::PortReq(_, _) => HandleType::Character, &Handle::ConfigureEndpoints(_) => HandleType::Character, &Handle::AttachDevice(_) => HandleType::Character, &Handle::DetachDevice(_) => HandleType::Character, @@ -4397,7 +6684,12 @@ index f2d439a4..b0fb9b85 100644 &Handle::Endpoint(_, _, ref st) => match st { EndpointHandleTy::Data => HandleType::Character, EndpointHandleTy::Ctl => HandleType::Character, -@@ -293,6 +312,8 @@ impl Handle { +@@ -289,10 +319,13 @@ impl Handle { + &Handle::PortReq(_, PortReqState::Tmp) => None, + &Handle::PortReq(_, PortReqState::TmpSetup(_)) => None, + &Handle::PortState(_) => None, ++ &Handle::PortPmState(_) => None, + &Handle::PortReq(_, _) => None, &Handle::ConfigureEndpoints(_) => None, &Handle::AttachDevice(_) => None, &Handle::DetachDevice(_) => None, @@ -4406,7 +6698,7 @@ index f2d439a4..b0fb9b85 100644 &Handle::Endpoint(_, _, ref st) => match st { EndpointHandleTy::Data => None, EndpointHandleTy::Ctl => None, -@@ -383,6 +404,14 @@ impl SchemeParameters { +@@ -383,6 +416,14 @@ impl SchemeParameters { let port_num = get_port_id_from_regex(®EX_PORT_DETACH, scheme, 0)?; Ok(Self::DetachDevice(port_num)) @@ -4421,7 +6713,66 @@ index f2d439a4..b0fb9b85 100644 } else if REGEX_PORT_DESCRIPTORS.is_match(scheme) { let port_num = get_port_id_from_regex(®EX_PORT_DESCRIPTORS, scheme, 0)?; -@@ -564,15 +593,22 @@ impl Xhci { +@@ -391,6 +432,10 @@ impl SchemeParameters { + let port_num = get_port_id_from_regex(®EX_PORT_STATE, scheme, 0)?; + + Ok(Self::PortState(port_num)) ++ } else if REGEX_PORT_PM_STATE.is_match(scheme) { ++ let port_num = get_port_id_from_regex(®EX_PORT_PM_STATE, scheme, 0)?; ++ ++ Ok(Self::PortPmState(port_num)) + } else if REGEX_PORT_REQUEST.is_match(scheme) { + let port_num = get_port_id_from_regex(®EX_PORT_REQUEST, scheme, 0)?; + +@@ -556,6 +601,47 @@ impl AnyDescriptor { + } + + impl Xhci { ++ fn begin_port_operation( ++ &self, ++ port: PortId, ++ allow_attaching: bool, ++ require_active_pm: bool, ++ ) -> Result { ++ let lifecycle = { ++ let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; ++ Arc::clone(&port_state.lifecycle) ++ }; ++ ++ lifecycle.begin_operation(allow_attaching)?; ++ let guard = super::PortOperationGuard::new(lifecycle); ++ ++ if require_active_pm { ++ let pm_state = self ++ .port_states ++ .get(&port) ++ .ok_or(Error::new(EBADFD))? ++ .pm_state; ++ if pm_state != super::PortPmState::Active { ++ drop(guard); ++ return Err(Error::new(EBUSY)); ++ } ++ } ++ ++ Ok(guard) ++ } ++ ++ fn begin_transfer_operation(&self, port: PortId) -> Result { ++ self.begin_port_operation(port, true, true) ++ } ++ ++ fn begin_routable_operation(&self, port: PortId) -> Result { ++ self.begin_port_operation(port, false, true) ++ } ++ ++ fn begin_attached_operation(&self, port: PortId) -> Result { ++ self.begin_port_operation(port, false, false) ++ } ++ + async fn new_if_desc( + &self, + port_id: PortId, +@@ -564,15 +650,22 @@ impl Xhci { endps: impl IntoIterator, hid_descs: impl IntoIterator, lang_id: u16, @@ -4447,7 +6798,19 @@ index f2d439a4..b0fb9b85 100644 } else { None }, -@@ -628,6 +664,53 @@ impl Xhci { +@@ -590,10 +683,9 @@ impl Xhci { + /// # Locking + /// This function will lock `Xhci::cmd` and `Xhci::dbs`. + pub async fn execute_command(&self, f: F) -> (Trb, Trb) { +- //TODO: find out why this bit is set earlier! + if self.interrupt_is_pending(0) { + debug!("The EHB bit is already set!"); +- //self.force_clear_interrupt(0); ++ self.force_clear_interrupt(0); + } + + let next_event = { +@@ -628,6 +720,54 @@ impl Xhci { (event_trb, command_trb) } @@ -4458,6 +6821,7 @@ index f2d439a4..b0fb9b85 100644 + ) -> Result<(Trb, Trb)> { + if self.interrupt_is_pending(0) { + debug!("The EHB bit is already set!"); ++ self.force_clear_interrupt(0); + } + + let next_event = { @@ -4501,19 +6865,21 @@ index f2d439a4..b0fb9b85 100644 pub async fn execute_control_transfer( &self, port_num: PortId, -@@ -639,6 +722,8 @@ impl Xhci { +@@ -639,6 +779,9 @@ impl Xhci { where D: FnMut(&mut Trb, bool) -> ControlFlow, { ++ let _op = self.begin_transfer_operation(port_num)?; + self.ensure_port_active(port_num)?; + let future = { let mut port_state = self.port_state_mut(port_num)?; let slot = port_state.slot; -@@ -690,6 +775,20 @@ impl Xhci { +@@ -690,7 +833,21 @@ impl Xhci { handle_transfer_event_trb("CONTROL_TRANSFER", &event_trb, &status_trb)?; +- //self.event_handler_finished(); + let delay_ctrl_msg = self + .port_states + .get(&port_num) @@ -4528,19 +6894,21 @@ index f2d439a4..b0fb9b85 100644 + std::thread::sleep(std::time::Duration::from_millis(20)); + } + - //self.event_handler_finished(); ++ self.event_handler_finished(); Ok(event_trb) -@@ -709,6 +808,8 @@ impl Xhci { + } +@@ -709,6 +866,9 @@ impl Xhci { where D: FnMut(&mut Trb, bool) -> ControlFlow, { ++ let _op = self.begin_transfer_operation(port_num)?; + self.ensure_port_active(port_num)?; + let endp_idx = endp_num.checked_sub(1).ok_or(Error::new(EIO))?; let mut port_state = self.port_state_mut(port_num)?; -@@ -785,7 +886,29 @@ impl Xhci { +@@ -785,7 +945,31 @@ impl Xhci { let event_trb = trbs.event_trb; let transfer_trb = trbs.src_trb.ok_or(Error::new(EIO))?; @@ -4566,12 +6934,28 @@ index f2d439a4..b0fb9b85 100644 + } + } + ++ self.event_handler_finished(); ++ + return Err(err); + } // FIXME: EDTLA if event data was set if event_trb.completion_code() != TrbCompletionCode::ShortPacket as u8 -@@ -861,6 +984,21 @@ impl Xhci { +@@ -798,6 +982,8 @@ impl Xhci { + // TODO: Handle event data + trace!("EVENT DATA: {:?}", event_trb.event_data()); + ++ self.event_handler_finished(); ++ + Ok(event_trb) + } + async fn device_req_no_data(&self, port: PortId, req: usb::Setup) -> Result<()> { +@@ -857,10 +1043,27 @@ impl Xhci { + trb.reset_endpoint(slot, endp_num_xhc, tsp, cycle); + }) + .await; +- //self.event_handler_finished(); ++ self.event_handler_finished(); handle_event_trb("RESET_ENDPOINT", &event_trb, &command_trb) } @@ -4588,16 +6972,559 @@ index f2d439a4..b0fb9b85 100644 + }) + .await; + ++ self.event_handler_finished(); ++ + handle_event_trb("RESET_DEVICE", &event_trb, &command_trb) + } fn endp_ctx_interval(speed_id: &ProtocolSpeed, endp_desc: &EndpDesc) -> u8 { /// Logarithmic (base 2) 125 µs periods per millisecond. -@@ -1205,7 +1343,19 @@ impl Xhci { +@@ -949,35 +1152,106 @@ impl Xhci { + self.port_states.get_mut(&port).ok_or(Error::new(EBADF)) + } + ++ fn restore_configure_input_context( ++ &self, ++ port: PortId, ++ snapshot: ConfigureContextSnapshot, ++ endpoint_snapshots: &[(usize, EndpointContextSnapshot)], ++ ) -> Result { ++ let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; ++ let mut input_context = port_state ++ .input_context ++ .lock() ++ .unwrap_or_else(|err| err.into_inner()); ++ ++ input_context.add_context.write(snapshot.add_context); ++ input_context.drop_context.write(snapshot.drop_context); ++ input_context.control.write(snapshot.control); ++ input_context.device.slot.a.write(snapshot.slot_a); ++ input_context.device.slot.b.write(snapshot.slot_b); ++ input_context.device.slot.c.write(snapshot.slot_c); ++ ++ for (endp_i, endp_snapshot) in endpoint_snapshots { ++ input_context.device.endpoints[*endp_i].a.write(endp_snapshot.a); ++ input_context.device.endpoints[*endp_i].b.write(endp_snapshot.b); ++ input_context.device.endpoints[*endp_i].trl.write(endp_snapshot.trl); ++ input_context.device.endpoints[*endp_i].trh.write(endp_snapshot.trh); ++ input_context.device.endpoints[*endp_i].c.write(endp_snapshot.c); ++ } ++ ++ Ok(input_context.physical()) ++ } ++ ++ async fn rollback_configure_attempt( ++ &self, ++ port: PortId, ++ slot: u8, ++ configure_snapshot: ConfigureContextSnapshot, ++ endpoint_snapshots: &[(usize, EndpointContextSnapshot)], ++ stage: &str, ++ ) { ++ let rollback_input_context_physical = match self.restore_configure_input_context( ++ port, ++ configure_snapshot, ++ endpoint_snapshots, ++ ) { ++ Ok(physical) => physical, ++ Err(restore_err) => { ++ warn!( ++ "failed to restore configure input context after {}: {:?}", ++ stage, restore_err ++ ); ++ return; ++ } ++ }; ++ ++ let (rollback_event_trb, rollback_command_trb) = self ++ .execute_command(|trb, cycle| { ++ trb.configure_endpoint(slot, rollback_input_context_physical, cycle) ++ }) ++ .await; ++ ++ self.event_handler_finished(); ++ ++ if let Err(rollback_err) = ++ handle_event_trb("CONFIGURE_ENDPOINT_ROLLBACK", &rollback_event_trb, &rollback_command_trb) ++ { ++ warn!( ++ "failed to roll back CONFIGURE_ENDPOINT after {}: {:?}", ++ stage, rollback_err ++ ); ++ } ++ } ++ + async fn configure_endpoints_once( + &self, + port: PortId, + req: &ConfigureEndpointsReq, + ) -> Result<()> { +- let (endp_desc_count, new_context_entries, configuration_value) = { +- let mut port_state = self.port_states.get_mut(&port).ok_or(Error::new(EBADFD))?; +- +- port_state.cfg_idx = Some(req.config_desc); ++ let (dev_desc, endpoint_descs, new_context_entries, configuration_value) = { ++ let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; ++ let dev_desc = port_state.dev_desc.as_ref().ok_or(Error::new(EBADFD))?.clone(); + +- let config_desc = port_state +- .dev_desc +- .as_ref() +- .unwrap() ++ let config_desc = dev_desc + .config_descs + .iter() + .find(|desc| desc.configuration_value == req.config_desc) + .ok_or(Error::new(EBADFD))?; ++ let configuration_value = config_desc.configuration_value; + +- //TODO: USE ENDPOINTS FROM ALL INTERFACES +- let mut endp_desc_count = 0; +- let mut new_context_entries = 1; +- for if_desc in config_desc.interface_descs.iter() { +- for endpoint in if_desc.endpoints.iter() { +- endp_desc_count += 1; +- let entry = Self::endp_num_to_dci(endp_desc_count, endpoint); +- if entry > new_context_entries { +- new_context_entries = entry; +- } ++ let endpoint_descs = config_desc ++ .interface_descs ++ .iter() ++ .flat_map(|if_desc| if_desc.endpoints.iter().copied()) ++ .collect::>(); ++ ++ let endp_desc_count = endpoint_descs.len(); ++ let mut new_context_entries = 1u8; ++ for (endp_idx, endpoint) in endpoint_descs.iter().enumerate() { ++ let endp_num = endp_idx as u8 + 1; ++ let entry = Self::endp_num_to_dci(endp_num, endpoint); ++ if entry > new_context_entries { ++ new_context_entries = entry; + } + } + new_context_entries += 1; +@@ -988,11 +1262,13 @@ impl Xhci { + } + + ( +- endp_desc_count, ++ dev_desc, ++ endpoint_descs, + new_context_entries, +- config_desc.configuration_value, ++ configuration_value, + ) + }; ++ let endp_desc_count = endpoint_descs.len(); + let lec = self.cap.lec(); + let log_max_psa_size = self.cap.max_psa_size(); + +@@ -1002,9 +1278,160 @@ impl Xhci { + Error::new(EIO) + })?; + ++ let mut endpoint_programs = Vec::with_capacity(endp_desc_count as usize); ++ let mut staged_endpoint_states = Vec::with_capacity(endp_desc_count as usize); ++ + { ++ for (endp_idx, endp_desc) in endpoint_descs.iter().enumerate() { ++ let endp_num = endp_idx as u8 + 1; ++ ++ let endp_num_xhc = Self::endp_num_to_dci(endp_num, endp_desc); ++ let usb_log_max_streams = endp_desc.log_max_streams(); ++ ++ let primary_streams = if let Some(log_max_streams) = usb_log_max_streams { ++ if log_max_psa_size != 0 { ++ cmp::min(u8::from(log_max_streams), log_max_psa_size + 1) - 1 ++ } else { ++ 0 ++ } ++ } else { ++ 0 ++ }; ++ let linear_stream_array = primary_streams != 0; ++ ++ let mult = endp_desc.isoch_mult(lec); ++ ++ let max_packet_size = Self::endp_ctx_max_packet_size(endp_desc); ++ let max_burst_size = Self::endp_ctx_max_burst(speed_id, &dev_desc, endp_desc); ++ ++ let max_esit_payload = Self::endp_ctx_max_esit_payload( ++ speed_id, ++ &dev_desc, ++ endp_desc, ++ max_packet_size, ++ max_burst_size, ++ ); ++ let max_esit_payload_lo = max_esit_payload as u16; ++ let max_esit_payload_hi = ((max_esit_payload & 0x00FF_0000) >> 16) as u8; ++ ++ let interval = Self::endp_ctx_interval(speed_id, endp_desc); ++ ++ let max_error_count = 3; ++ let ep_ty = endp_desc.xhci_ep_type()?; ++ let host_initiate_disable = false; ++ ++ let avg_trb_len: u16 = match endp_desc.ty() { ++ EndpointTy::Ctrl => { ++ warn!("trying to use control endpoint"); ++ return Err(Error::new(EIO)); ++ } ++ EndpointTy::Bulk | EndpointTy::Isoch => 3072, ++ EndpointTy::Interrupt => 1024, ++ }; ++ ++ assert_eq!(ep_ty & 0x7, ep_ty); ++ assert_eq!(mult & 0x3, mult); ++ assert_eq!(max_error_count & 0x3, max_error_count); ++ assert_ne!(ep_ty, 0); ++ ++ let ring_ptr = if usb_log_max_streams.is_some() { ++ let mut array = ++ StreamContextArray::new::(self.cap.ac64(), 1 << (primary_streams + 1))?; ++ ++ array.add_ring::(self.cap.ac64(), 1, true)?; ++ let array_ptr = array.register(); ++ ++ assert_eq!( ++ array_ptr & 0xFFFF_FFFF_FFFF_FF81, ++ array_ptr, ++ "stream ctx ptr not aligned to 16 bytes" ++ ); ++ ++ staged_endpoint_states.push(( ++ endp_num, ++ EndpointState { ++ transfer: super::RingOrStreams::Streams(array), ++ driver_if_state: EndpIfState::Init, ++ }, ++ )); ++ ++ array_ptr ++ } else { ++ let ring = Ring::new::(self.cap.ac64(), 16, true)?; ++ let ring_ptr = ring.register(); ++ ++ assert_eq!( ++ ring_ptr & 0xFFFF_FFFF_FFFF_FF81, ++ ring_ptr, ++ "ring pointer not aligned to 16 bytes" ++ ); ++ ++ staged_endpoint_states.push(( ++ endp_num, ++ EndpointState { ++ transfer: super::RingOrStreams::Ring(ring), ++ driver_if_state: EndpIfState::Init, ++ }, ++ )); ++ ++ ring_ptr ++ }; ++ assert_eq!(primary_streams & 0x1F, primary_streams); ++ ++ endpoint_programs.push(EndpointProgram { ++ endp_num, ++ endp_num_xhc, ++ a: u32::from(mult) << 8 ++ | u32::from(primary_streams) << 10 ++ | u32::from(linear_stream_array) << 15 ++ | u32::from(interval) << 16 ++ | u32::from(max_esit_payload_hi) << 24, ++ b: max_error_count << 1 ++ | u32::from(ep_ty) << 3 ++ | u32::from(host_initiate_disable) << 7 ++ | u32::from(max_burst_size) << 8 ++ | u32::from(max_packet_size) << 16, ++ trl: ring_ptr as u32, ++ trh: (ring_ptr >> 32) as u32, ++ c: u32::from(avg_trb_len) | (u32::from(max_esit_payload_lo) << 16), ++ }); ++ ++ log::debug!("staged endpoint {}", endp_num); ++ } ++ } ++ ++ let (configure_snapshot, endpoint_snapshots, input_context_physical) = { + let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; +- let mut input_context = port_state.input_context.lock().unwrap(); ++ let mut input_context = port_state ++ .input_context ++ .lock() ++ .unwrap_or_else(|err| err.into_inner()); ++ ++ let configure_snapshot = ConfigureContextSnapshot { ++ add_context: input_context.add_context.read(), ++ drop_context: input_context.drop_context.read(), ++ control: input_context.control.read(), ++ slot_a: input_context.device.slot.a.read(), ++ slot_b: input_context.device.slot.b.read(), ++ slot_c: input_context.device.slot.c.read(), ++ }; ++ ++ let endpoint_snapshots = endpoint_programs ++ .iter() ++ .map(|program| { ++ let endp_i = program.endp_num_xhc as usize - 1; ++ ( ++ endp_i, ++ EndpointContextSnapshot::capture_values( ++ input_context.device.endpoints[endp_i].a.read(), ++ input_context.device.endpoints[endp_i].b.read(), ++ input_context.device.endpoints[endp_i].trl.read(), ++ input_context.device.endpoints[endp_i].trh.read(), ++ input_context.device.endpoints[endp_i].c.read(), ++ ), ++ ) ++ }) ++ .collect::>(); + + // Configure the slot context as well, which holds the last index of the endp descs. + input_context.add_context.write(1); +@@ -1015,25 +1442,26 @@ impl Xhci { + + const HUB_PORTS_MASK: u32 = 0xFF00_0000; + const HUB_PORTS_SHIFT: u8 = 24; ++ let mut current_slot_c = input_context.device.slot.c.read(); + + let mut current_slot_a = input_context.device.slot.a.read(); + let mut current_slot_b = input_context.device.slot.b.read(); + +- // Set context entries + current_slot_a &= !CONTEXT_ENTRIES_MASK; + current_slot_a |= + (u32::from(new_context_entries) << CONTEXT_ENTRIES_SHIFT) & CONTEXT_ENTRIES_MASK; + +- // Set hub data + current_slot_a &= !(1 << 26); + current_slot_b &= !HUB_PORTS_MASK; + if let Some(hub_ports) = req.hub_ports { + current_slot_a |= 1 << 26; + current_slot_b |= (u32::from(hub_ports) << HUB_PORTS_SHIFT) & HUB_PORTS_MASK; + } ++ current_slot_c = apply_hub_tt_info(current_slot_c, req); + + input_context.device.slot.a.write(current_slot_a); + input_context.device.slot.b.write(current_slot_b); ++ input_context.device.slot.c.write(current_slot_c); + + let control = if self.op.lock().unwrap().cie() { + (u32::from(req.alternate_setting.unwrap_or(0)) << 16) +@@ -1043,174 +1471,138 @@ impl Xhci { + 0 + }; + input_context.control.write(control); +- } + +- for endp_idx in 0..endp_desc_count as u8 { +- let endp_num = endp_idx + 1; +- +- let mut port_state = self.port_states.get_mut(&port).ok_or(Error::new(EBADFD))?; +- let dev_desc = port_state.dev_desc.as_ref().unwrap(); +- let endp_desc = port_state.get_endp_desc(endp_idx).ok_or_else(|| { +- warn!("failed to find endpoint {}", endp_idx); +- Error::new(EIO) +- })?; +- +- let endp_num_xhc = Self::endp_num_to_dci(endp_num, endp_desc); +- +- let usb_log_max_streams = endp_desc.log_max_streams(); +- +- // TODO: Secondary streams. +- let primary_streams = if let Some(log_max_streams) = usb_log_max_streams { +- // TODO: Can streams-capable be configured to not use streams? +- if log_max_psa_size != 0 { +- cmp::min(u8::from(log_max_streams), log_max_psa_size + 1) - 1 +- } else { +- 0 +- } +- } else { +- 0 +- }; +- let linear_stream_array = if primary_streams != 0 { true } else { false }; ++ for program in &endpoint_programs { ++ let endp_i = program.endp_num_xhc as usize - 1; ++ input_context.add_context.writef(1 << program.endp_num_xhc, true); ++ input_context.device.endpoints[endp_i].a.write(program.a); ++ input_context.device.endpoints[endp_i].b.write(program.b); ++ input_context.device.endpoints[endp_i].trl.write(program.trl); ++ input_context.device.endpoints[endp_i].trh.write(program.trh); ++ input_context.device.endpoints[endp_i].c.write(program.c); ++ } + +- // TODO: Interval related fields +- // TODO: Max ESIT payload size. ++ (configure_snapshot, endpoint_snapshots, input_context.physical()) ++ }; + +- let mult = endp_desc.isoch_mult(lec); ++ let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; ++ let slot = port_state.slot; + +- let max_packet_size = Self::endp_ctx_max_packet_size(endp_desc); +- let max_burst_size = Self::endp_ctx_max_burst(speed_id, dev_desc, endp_desc); ++ let (event_trb, command_trb) = self ++ .execute_command(|trb, cycle| trb.configure_endpoint(slot, input_context_physical, cycle)) ++ .await; + +- let max_esit_payload = Self::endp_ctx_max_esit_payload( +- speed_id, +- dev_desc, +- endp_desc, +- max_packet_size, +- max_burst_size, +- ); +- let max_esit_payload_lo = max_esit_payload as u16; +- let max_esit_payload_hi = ((max_esit_payload & 0x00FF_0000) >> 16) as u8; +- +- let interval = Self::endp_ctx_interval(speed_id, endp_desc); +- +- let max_error_count = 3; +- let ep_ty = endp_desc.xhci_ep_type()?; +- let host_initiate_disable = false; +- +- // TODO: Maybe this value is out of scope for xhcid, because the actual usb device +- // driver probably knows better. The spec says that the initial value should be 8 bytes +- // for control, 1KiB for interrupt and 3KiB for bulk and isoch. +- let avg_trb_len: u16 = match endp_desc.ty() { +- EndpointTy::Ctrl => { +- warn!("trying to use control endpoint"); +- return Err(Error::new(EIO)); // only endpoint zero is of type control, and is configured separately with the address device command. ++ self.event_handler_finished(); ++ ++ if let Err(err) = handle_event_trb("CONFIGURE_ENDPOINT", &event_trb, &command_trb) { ++ let rollback_input_context_physical = match self.restore_configure_input_context( ++ port, ++ configure_snapshot, ++ &endpoint_snapshots, ++ ) { ++ Ok(physical) => physical, ++ Err(restore_err) => { ++ warn!( ++ "failed to restore configure input context after CONFIGURE_ENDPOINT failure: {:?}", ++ restore_err ++ ); ++ return Err(err); + } +- EndpointTy::Bulk | EndpointTy::Isoch => 3072, // 3 KiB +- EndpointTy::Interrupt => 1024, // 1 KiB + }; + +- assert_eq!(ep_ty & 0x7, ep_ty); +- assert_eq!(mult & 0x3, mult); +- assert_eq!(max_error_count & 0x3, max_error_count); +- assert_ne!(ep_ty, 0); // 0 means invalid. +- +- let ring_ptr = if usb_log_max_streams.is_some() { +- let mut array = +- StreamContextArray::new::(self.cap.ac64(), 1 << (primary_streams + 1))?; ++ let (rollback_event_trb, rollback_command_trb) = self ++ .execute_command(|trb, cycle| { ++ trb.configure_endpoint(slot, rollback_input_context_physical, cycle) ++ }) ++ .await; + +- // TODO: Use as many stream rings as needed. +- array.add_ring::(self.cap.ac64(), 1, true)?; +- let array_ptr = array.register(); ++ self.event_handler_finished(); + +- assert_eq!( +- array_ptr & 0xFFFF_FFFF_FFFF_FF81, +- array_ptr, +- "stream ctx ptr not aligned to 16 bytes" +- ); +- port_state.endpoint_states.insert( +- endp_num, +- EndpointState { +- transfer: super::RingOrStreams::Streams(array), +- driver_if_state: EndpIfState::Init, +- }, ++ if let Err(rollback_err) = ++ handle_event_trb("CONFIGURE_ENDPOINT_ROLLBACK", &rollback_event_trb, &rollback_command_trb) ++ { ++ warn!( ++ "failed to roll back CONFIGURE_ENDPOINT after failure {:?}: {:?}", ++ err, ++ rollback_err + ); ++ } + +- array_ptr +- } else { +- let ring = Ring::new::(self.cap.ac64(), 16, true)?; +- let ring_ptr = ring.register(); ++ return Err(err); ++ } + +- assert_eq!( +- ring_ptr & 0xFFFF_FFFF_FFFF_FF81, +- ring_ptr, +- "ring pointer not aligned to 16 bytes" +- ); +- port_state.endpoint_states.insert( +- endp_num, +- EndpointState { +- transfer: super::RingOrStreams::Ring(ring), +- driver_if_state: EndpIfState::Init, +- }, +- ); +- ring_ptr +- }; +- assert_eq!(primary_streams & 0x1F, primary_streams); +- +- let mut input_context = port_state.input_context.lock().unwrap(); +- input_context.add_context.writef(1 << endp_num_xhc, true); +- +- let endp_i = endp_num_xhc as usize - 1; +- input_context.device.endpoints[endp_i].a.write( +- u32::from(mult) << 8 +- | u32::from(primary_streams) << 10 +- | u32::from(linear_stream_array) << 15 +- | u32::from(interval) << 16 +- | u32::from(max_esit_payload_hi) << 24, ++ if self.consume_test_hook("fail_after_configure_endpoint") { ++ info!( ++ "xhcid: test hook injecting failure after CONFIGURE_ENDPOINT for port {}", ++ port + ); +- input_context.device.endpoints[endp_i].b.write( +- max_error_count << 1 +- | u32::from(ep_ty) << 3 +- | u32::from(host_initiate_disable) << 7 +- | u32::from(max_burst_size) << 8 +- | u32::from(max_packet_size) << 16, +- ); +- +- input_context.device.endpoints[endp_i] +- .trl +- .write(ring_ptr as u32); +- input_context.device.endpoints[endp_i] +- .trh +- .write((ring_ptr >> 32) as u32); +- +- input_context.device.endpoints[endp_i] +- .c +- .write(u32::from(avg_trb_len) | (u32::from(max_esit_payload_lo) << 16)); +- +- log::debug!("initialized endpoint {}", endp_num); ++ self.rollback_configure_attempt( ++ port, ++ slot, ++ configure_snapshot, ++ &endpoint_snapshots, ++ "test hook fail_after_configure_endpoint", ++ ) ++ .await; ++ return Err(Error::new(EIO)); } - // Tell the device about this configuration. -- self.set_configuration(port, configuration_value).await?; +- { +- let port_state = self.port_states.get(&port).ok_or(Error::new(EBADFD))?; +- let slot = port_state.slot; +- let input_context_physical = port_state.input_context.lock().unwrap().physical(); ++ // Tell the device about this configuration. + let skip_set_configuration = self + .port_states + .get(&port) @@ -4607,14 +7534,64 @@ index f2d439a4..b0fb9b85 100644 + .contains(crate::usb_quirks::UsbQuirkFlags::NO_SET_CONFIG) + }) + .unwrap_or(false); -+ + +- let (event_trb, command_trb) = self +- .execute_command(|trb, cycle| { +- trb.configure_endpoint(slot, input_context_physical, cycle) +- }) + if !skip_set_configuration { -+ self.set_configuration(port, configuration_value).await?; ++ if let Err(err) = self.set_configuration(port, configuration_value).await { ++ self.rollback_configure_attempt( ++ port, ++ slot, ++ configure_snapshot, ++ &endpoint_snapshots, ++ "set_configuration failure", ++ ) + .await; + +- //self.event_handler_finished(); ++ return Err(err); ++ } + +- handle_event_trb("CONFIGURE_ENDPOINT", &event_trb, &command_trb)?; ++ if self.consume_test_hook("fail_after_set_configuration") { ++ info!( ++ "xhcid: test hook injecting failure after SET_CONFIGURATION for port {}", ++ port ++ ); ++ self.rollback_configure_attempt( ++ port, ++ slot, ++ configure_snapshot, ++ &endpoint_snapshots, ++ "test hook fail_after_set_configuration", ++ ) ++ .await; ++ return Err(Error::new(EIO)); ++ } + } + +- // Tell the device about this configuration. +- self.set_configuration(port, configuration_value).await?; ++ { ++ let mut port_state = self.port_states.get_mut(&port).ok_or(Error::new(EBADFD))?; ++ port_state.cfg_idx = Some(configuration_value); ++ port_state.endpoint_states.retain(|endp_num, _| *endp_num == 0); ++ for (endp_num, endpoint_state) in staged_endpoint_states { ++ port_state.endpoint_states.insert(endp_num, endpoint_state); ++ } + } Ok(()) } -@@ -1234,8 +1384,20 @@ impl Xhci { + + async fn configure_endpoints(&self, port: PortId, json_buf: &[u8]) -> Result<()> { ++ let _op = self.begin_routable_operation(port)?; + let mut req: ConfigureEndpointsReq = + serde_json::from_slice(json_buf).or(Err(Error::new(EBADMSG)))?; + +@@ -1234,8 +1626,20 @@ impl Xhci { if let Some(interface_num) = req.interface_desc { if let Some(alternate_setting) = req.alternate_setting { @@ -4637,7 +7614,16 @@ index f2d439a4..b0fb9b85 100644 } } -@@ -1453,52 +1615,109 @@ impl Xhci { +@@ -1432,7 +1836,7 @@ impl Xhci { + }, + ) + .await?; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + let bytes_transferred = dma_buf + .as_ref() +@@ -1453,52 +1857,109 @@ impl Xhci { let raw_dd = self.fetch_dev_desc(port_id, slot).await?; log::debug!("port {} slot {} desc {:X?}", port_id, slot, raw_dd); @@ -4781,7 +7767,7 @@ index f2d439a4..b0fb9b85 100644 log::debug!( "manufacturer {:?} product {:?} serial {:?}", manufacturer_str, -@@ -1508,14 +1727,39 @@ impl Xhci { +@@ -1508,14 +1969,39 @@ impl Xhci { //TODO let (bos_desc, bos_data) = self.fetch_bos_desc(port_id, slot).await?; @@ -4826,7 +7812,7 @@ index f2d439a4..b0fb9b85 100644 debug!("Fetching the config descriptor at index {}", index); let (desc, data) = self.fetch_config_desc(port_id, slot, index).await?; log::debug!( -@@ -1541,6 +1785,12 @@ impl Xhci { +@@ -1541,6 +2027,12 @@ impl Xhci { let mut iter = descriptors.into_iter().peekable(); while let Some(item) = iter.next() { @@ -4839,7 +7825,7 @@ index f2d439a4..b0fb9b85 100644 if let AnyDescriptor::Interface(idesc) = item { let mut endpoints = SmallVec::<[EndpDesc; 4]>::new(); let mut hid_descs = SmallVec::<[HidDesc; 1]>::new(); -@@ -1554,6 +1804,9 @@ impl Xhci { +@@ -1554,6 +2046,9 @@ impl Xhci { } Some(unexpected) => { log::warn!("expected endpoint, got {:X?}", unexpected); @@ -4849,7 +7835,7 @@ index f2d439a4..b0fb9b85 100644 break; } None => break, -@@ -1578,8 +1831,16 @@ impl Xhci { +@@ -1578,8 +2073,16 @@ impl Xhci { } interface_descs.push( @@ -4868,7 +7854,7 @@ index f2d439a4..b0fb9b85 100644 ); } else { log::warn!("expected interface, got {:?}", item); -@@ -1590,11 +1851,20 @@ impl Xhci { +@@ -1590,11 +2093,20 @@ impl Xhci { config_descs.push(ConfDesc { kind: desc.kind, @@ -4893,16 +7879,31 @@ index f2d439a4..b0fb9b85 100644 } else { None }, -@@ -1856,7 +2126,7 @@ impl Xhci { +@@ -1856,7 +2368,7 @@ impl Xhci { if (flags & O_DIRECTORY != 0) || (flags & O_STAT != 0) { let mut contents = Vec::new(); - write!(contents, "descriptors\nendpoints\n").unwrap(); -+ write!(contents, "descriptors\nendpoints\nsuspend\nresume\n").unwrap(); ++ write!(contents, "descriptors\nendpoints\npm_state\nsuspend\nresume\n").unwrap(); if self.slot_state( self.port_states -@@ -2087,6 +2357,30 @@ impl Xhci { +@@ -1893,6 +2405,14 @@ impl Xhci { + Ok(Handle::PortState(port_num)) + } + ++ fn open_handle_port_pm_state(&self, port_num: PortId, flags: usize) -> Result { ++ if flags & O_DIRECTORY != 0 && flags & O_STAT == 0 { ++ return Err(Error::new(ENOTDIR)); ++ } ++ ++ Ok(Handle::PortPmState(port_num)) ++ } ++ + /// implements open() for /port/endpoints + /// + /// # Arguments +@@ -2087,6 +2607,30 @@ impl Xhci { Ok(Handle::DetachDevice(port_num)) } @@ -4933,7 +7934,17 @@ index f2d439a4..b0fb9b85 100644 /// implements open() for /port/request /// /// # Arguments -@@ -2173,6 +2467,12 @@ impl SchemeSync for &Xhci { +@@ -2155,6 +2699,9 @@ impl SchemeSync for &Xhci { + SchemeParameters::PortState(port_number) => { + self.open_handle_port_state(port_number, flags)? + } ++ SchemeParameters::PortPmState(port_number) => { ++ self.open_handle_port_pm_state(port_number, flags)? ++ } + SchemeParameters::PortReq(port_number) => { + self.open_handle_port_request(port_number, flags)? + } +@@ -2173,6 +2720,12 @@ impl SchemeSync for &Xhci { SchemeParameters::DetachDevice(port_number) => { self.open_handle_detach_device(port_number, flags)? } @@ -4946,7 +7957,7 @@ index f2d439a4..b0fb9b85 100644 }; let fd = self.next_handle.fetch_add(1, atomic::Ordering::Relaxed); -@@ -2203,7 +2503,11 @@ impl SchemeSync for &Xhci { +@@ -2203,7 +2756,11 @@ impl SchemeSync for &Xhci { //If we have a handle to the configure scheme, we need to mark it as write only. match &*guard { @@ -4959,7 +7970,7 @@ index f2d439a4..b0fb9b85 100644 stat.st_mode = stat.st_mode | 0o200; } _ => {} -@@ -2263,6 +2567,8 @@ impl SchemeSync for &Xhci { +@@ -2263,6 +2820,8 @@ impl SchemeSync for &Xhci { Handle::ConfigureEndpoints(_) => Err(Error::new(EBADF)), Handle::AttachDevice(_) => Err(Error::new(EBADF)), Handle::DetachDevice(_) => Err(Error::new(EBADF)), @@ -4968,7 +7979,18 @@ index f2d439a4..b0fb9b85 100644 Handle::SchemeRoot => Err(Error::new(EBADF)), &mut Handle::Endpoint(port_num, endp_num, ref mut st) => match st { -@@ -2333,6 +2639,14 @@ impl SchemeSync for &Xhci { +@@ -2294,6 +2853,10 @@ impl SchemeSync for &Xhci { + + Ok(Xhci::::write_dyn_string(string, buf, offset)) + } ++ &mut Handle::PortPmState(port_num) => { ++ let ps = self.port_states.get(&port_num).ok_or(Error::new(EBADF))?; ++ Ok(Xhci::::write_dyn_string(ps.pm_state.as_str().as_bytes(), buf, offset)) ++ } + &mut Handle::PortReq(port_num, ref mut st) => { + let state = std::mem::replace(st, PortReqState::Tmp); + drop(guard); // release the lock +@@ -2333,6 +2896,14 @@ impl SchemeSync for &Xhci { block_on(self.detach_device(port_num))?; Ok(buf.len()) } @@ -4983,16 +8005,17 @@ index f2d439a4..b0fb9b85 100644 &mut Handle::Endpoint(port_num, endp_num, ref ep_file_ty) => match ep_file_ty { EndpointHandleTy::Ctl => block_on(self.on_write_endp_ctl(port_num, endp_num, buf)), EndpointHandleTy::Data => { -@@ -2356,6 +2670,38 @@ impl Xhci { +@@ -2356,6 +2927,59 @@ impl Xhci { self.handles.remove(&fd); } + fn ensure_port_active(&self, port_num: PortId) -> Result<()> { -+ let pm_state = self -+ .port_states -+ .get(&port_num) -+ .ok_or(Error::new(EBADFD))? -+ .pm_state; ++ let port_state = self.port_states.get(&port_num).ok_or(Error::new(EBADFD))?; ++ if port_state.lifecycle.state() == super::PortLifecycleState::Detaching { ++ return Err(Error::new(EBUSY)); ++ } ++ ++ let pm_state = port_state.pm_state; + match pm_state { + super::PortPmState::Active => Ok(()), + super::PortPmState::Suspended => Err(Error::new(EBUSY)), @@ -5000,6 +8023,7 @@ index f2d439a4..b0fb9b85 100644 + } + + pub async fn suspend_device(&self, port_num: PortId) -> Result<()> { ++ let _op = self.begin_attached_operation(port_num)?; + let mut port_state = self.port_states.get_mut(&port_num).ok_or(Error::new(EBADFD))?; + + if port_state @@ -5009,12 +8033,31 @@ index f2d439a4..b0fb9b85 100644 + return Err(Error::new(EOPNOTSUPP)); + } + ++ if port_state.pm_state != super::PortPmState::Active { ++ return Err(Error::new(EBUSY)); ++ } ++ + port_state.pm_state = super::PortPmState::Suspended; + Ok(()) + } + + pub async fn resume_device(&self, port_num: PortId) -> Result<()> { ++ let _op = self.begin_attached_operation(port_num)?; + let mut port_state = self.port_states.get_mut(&port_num).ok_or(Error::new(EBADFD))?; ++ ++ if port_state.pm_state == super::PortPmState::Active { ++ return Ok(()); ++ } ++ ++ let slot_state = self.slot_state(port_state.slot as usize); ++ if slot_state != SlotState::Addressed as u8 && slot_state != SlotState::Configured as u8 { ++ warn!( ++ "refusing to resume port {} while slot {} is in controller state {}", ++ port_num, port_state.slot, slot_state ++ ); ++ return Err(Error::new(EIO)); ++ } ++ + port_state.pm_state = super::PortPmState::Active; + Ok(()) + } @@ -5022,7 +8065,7 @@ index f2d439a4..b0fb9b85 100644 pub fn get_endp_status(&self, port_num: PortId, endp_num: u8) -> Result { let port_state = self.port_states.get(&port_num).ok_or(Error::new(EBADFD))?; -@@ -2406,6 +2752,8 @@ impl Xhci { +@@ -2406,6 +3030,8 @@ impl Xhci { endp_num: u8, clear_feature: bool, ) -> Result<()> { @@ -5031,40 +8074,150 @@ index f2d439a4..b0fb9b85 100644 if self.get_endp_status(port_num, endp_num)? != EndpointStatus::Halted { return Err(Error::new(EPROTO)); } -@@ -2562,6 +2910,7 @@ impl Xhci { - }, - XhciEndpCtlReq::Reset { no_clear_feature } => match ep_if_state { - EndpIfState::Init => { -+ self.ensure_port_active(port_num)?; - self.on_req_reset_device(port_num, endp_num, !no_clear_feature) - .await? - } -@@ -2571,6 +2920,7 @@ impl Xhci { - }, - XhciEndpCtlReq::Transfer { direction, count } => match ep_if_state { - state @ EndpIfState::Init => { -+ self.ensure_port_active(port_num)?; - if direction == XhciEndpCtlDirection::NoData { - // Yield the result directly because no bytes have to be sent or received - // beforehand. -@@ -2631,6 +2981,8 @@ impl Xhci { +@@ -2531,7 +3157,7 @@ impl Xhci { + ) + }) + .await; +- //self.event_handler_finished(); ++ self.event_handler_finished(); + + handle_event_trb("SET_TR_DEQUEUE_PTR", &event_trb, &command_trb) + } +@@ -2541,10 +3167,14 @@ impl Xhci { endp_num: u8, buf: &[u8], ) -> Result { ++ let _op = self.begin_routable_operation(port_num)?; + let mut port_state = self + .port_states + .get_mut(&port_num) + .ok_or(Error::new(EBADF))?; ++ if port_state.pm_state != super::PortPmState::Active { ++ return Err(Error::new(EBUSY)); ++ } + + let ep_if_state = &mut port_state + .endpoint_states +@@ -2562,6 +3192,7 @@ impl Xhci { + }, + XhciEndpCtlReq::Reset { no_clear_feature } => match ep_if_state { + EndpIfState::Init => { ++ drop(port_state); + self.on_req_reset_device(port_num, endp_num, !no_clear_feature) + .await? + } +@@ -2631,6 +3262,9 @@ impl Xhci { + endp_num: u8, + buf: &[u8], + ) -> Result { ++ let _op = self.begin_routable_operation(port_num)?; + self.ensure_port_active(port_num)?; + let mut port_state = self .port_states .get_mut(&port_num) -@@ -2732,6 +3084,8 @@ impl Xhci { +@@ -2732,6 +3366,9 @@ impl Xhci { endp_num: u8, buf: &mut [u8], ) -> Result { ++ let _op = self.begin_routable_operation(port_num)?; + self.ensure_port_active(port_num)?; + let mut port_state = self .port_states .get_mut(&port_num) +@@ -2832,6 +3469,64 @@ pub fn handle_transfer_event_trb(name: &str, event_trb: &Trb, transfer_trb: &Trb + Err(Error::new(EIO)) + } + } ++ ++fn apply_hub_tt_info(current_slot_c: u32, req: &ConfigureEndpointsReq) -> u32 { ++ const TT_THINK_TIME_MASK: u32 = 0x0003_0000; ++ const TT_THINK_TIME_SHIFT: u8 = 16; ++ ++ let mut slot_c = current_slot_c & !TT_THINK_TIME_MASK; ++ if req.hub_ports.is_some() { ++ if let Some(hub_think_time) = req.hub_think_time { ++ slot_c |= (u32::from(hub_think_time) << TT_THINK_TIME_SHIFT) & TT_THINK_TIME_MASK; ++ } ++ } ++ slot_c ++} ++ ++#[derive(Clone, Copy)] ++struct ConfigureContextSnapshot { ++ add_context: u32, ++ drop_context: u32, ++ control: u32, ++ slot_a: u32, ++ slot_b: u32, ++ slot_c: u32, ++} ++ ++#[derive(Clone, Copy)] ++struct EndpointContextSnapshot { ++ a: u32, ++ b: u32, ++ trl: u32, ++ trh: u32, ++ c: u32, ++} ++ ++impl EndpointContextSnapshot { ++ fn capture_values(a: u32, b: u32, trl: u32, trh: u32, c: u32) -> Self { ++ Self { a, b, trl, trh, c } ++ } ++ ++ fn restore(&self, ctx: &mut EndpointContext) { ++ ctx.a.write(self.a); ++ ctx.b.write(self.b); ++ ctx.trl.write(self.trl); ++ ctx.trh.write(self.trh); ++ ctx.c.write(self.c); ++ } ++} ++ ++#[derive(Clone, Copy)] ++struct EndpointProgram { ++ endp_num: u8, ++ endp_num_xhc: u8, ++ a: u32, ++ b: u32, ++ trl: u32, ++ trh: u32, ++ c: u32, ++} ++ + use lazy_static::lazy_static; + use std::ops::{Add, Div, Rem}; + +@@ -2845,3 +3540,26 @@ where + a / b + } + } ++ ++#[cfg(test)] ++mod tests { ++ use super::{apply_hub_tt_info, ConfigureEndpointsReq}; ++ ++ #[test] ++ fn apply_hub_tt_info_only_sets_bits_for_hub_requests() { ++ let req = ConfigureEndpointsReq { ++ config_desc: 1, ++ interface_desc: None, ++ alternate_setting: None, ++ hub_ports: Some(4), ++ hub_think_time: Some(3), ++ }; ++ assert_eq!(apply_hub_tt_info(0, &req), 0x0003_0000); ++ ++ let no_hub = ConfigureEndpointsReq { ++ hub_ports: None, ++ ..req.clone() ++ }; ++ assert_eq!(apply_hub_tt_info(0x0003_0000, &no_hub), 0); ++ } ++} diff --git a/init.initfs.d/40_ps2d.service b/init.initfs.d/40_ps2d.service index 881e75ea..bbee2699 100644 --- a/init.initfs.d/40_ps2d.service diff --git a/local/patches/kernel/redox.patch b/local/patches/kernel/redox.patch index eddc3de6..a92b883f 100644 --- a/local/patches/kernel/redox.patch +++ b/local/patches/kernel/redox.patch @@ -493,3 +493,367 @@ index 94519448..0db1de53 100644 page_count, )) } +diff --git a/src/event.rs b/src/event.rs +index 7398145a..92e5793c 100644 +--- a/src/event.rs ++++ b/src/event.rs +@@ -8,13 +8,14 @@ use crate::{ + context, + scheme::{self, SchemeExt, SchemeId}, + sync::{ +- CleanLockToken, LockToken, RwLock, RwLockReadGuard, RwLockWriteGuard, WaitQueue, L0, L1, L2, ++ CleanLockToken, LockToken, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, ++ WaitCondition, WaitQueue, L0, L1, L2, + }, + syscall::{ + data::Event, +- error::{Error, Result, EBADF}, +- flag::EventFlags, +- usercopy::UserSliceWo, ++ error::{Error, Result, EAGAIN, EBADF, EINVAL, EINTR}, ++ flag::{EVENT_READ, EVENT_WRITE, EventFlags}, ++ usercopy::{UserSliceRo, UserSliceWo}, + }, + }; + +@@ -25,6 +26,17 @@ pub struct EventQueue { + queue: WaitQueue, + } + ++const EVENTFD_COUNTER_MAX: u64 = u64::MAX - 1; ++const EVENTFD_TAG_BIT: usize = 1usize << (usize::BITS - 1); ++ ++pub struct EventCounter { ++ id: usize, ++ counter: Mutex, ++ read_condition: WaitCondition, ++ write_condition: WaitCondition, ++ semaphore: bool, ++} ++ + impl EventQueue { + pub fn new(id: EventQueueId) -> EventQueue { + EventQueue { +@@ -91,19 +103,146 @@ impl EventQueue { + } + } + ++impl EventCounter { ++ pub fn new(id: usize, init: u64, semaphore: bool) -> EventCounter { ++ EventCounter { ++ id, ++ counter: Mutex::new(init), ++ read_condition: WaitCondition::new(), ++ write_condition: WaitCondition::new(), ++ semaphore, ++ } ++ } ++ ++ pub fn is_readable(&self, token: &mut CleanLockToken) -> bool { ++ *self.counter.lock(token.token()) > 0 ++ } ++ ++ pub fn is_writable(&self, token: &mut CleanLockToken) -> bool { ++ *self.counter.lock(token.token()) < EVENTFD_COUNTER_MAX ++ } ++ ++ pub fn read(&self, buf: UserSliceWo, block: bool, token: &mut CleanLockToken) -> Result { ++ if buf.len() < core::mem::size_of::() { ++ return Err(Error::new(EINVAL)); ++ } ++ ++ loop { ++ let counter = self.counter.lock(token.token()); ++ let (mut counter, mut token) = counter.into_split(); ++ ++ if *counter > 0 { ++ let value = if self.semaphore { ++ *counter -= 1; ++ 1 ++ } else { ++ let value = *counter; ++ *counter = 0; ++ value ++ }; ++ ++ buf.limit(core::mem::size_of::()) ++ .ok_or(Error::new(EINVAL))? ++ .copy_from_slice(&value.to_ne_bytes())?; ++ ++ trigger_locked( ++ GlobalSchemes::Event.scheme_id(), ++ self.id, ++ EVENT_WRITE, ++ token.token(), ++ ); ++ self.write_condition.notify_locked(token.token()); ++ ++ return Ok(core::mem::size_of::()); ++ } ++ ++ if !block { ++ return Err(Error::new(EAGAIN)); ++ } ++ ++ if !self ++ .read_condition ++ .wait(counter, "EventCounter::read", &mut token) ++ { ++ return Err(Error::new(EINTR)); ++ } ++ } ++ } ++ ++ pub fn write(&self, buf: UserSliceRo, block: bool, token: &mut CleanLockToken) -> Result { ++ if buf.len() != core::mem::size_of::() { ++ return Err(Error::new(EINVAL)); ++ } ++ ++ let value = unsafe { buf.read_exact::()? }; ++ if value == u64::MAX { ++ return Err(Error::new(EINVAL)); ++ } ++ ++ loop { ++ let counter = self.counter.lock(token.token()); ++ let (mut counter, mut token) = counter.into_split(); ++ ++ if EVENTFD_COUNTER_MAX - *counter >= value { ++ let was_zero = *counter == 0; ++ *counter += value; ++ ++ if was_zero && value != 0 { ++ trigger_locked( ++ GlobalSchemes::Event.scheme_id(), ++ self.id, ++ EVENT_READ, ++ token.token(), ++ ); ++ self.read_condition.notify_locked(token.token()); ++ } ++ ++ return Ok(core::mem::size_of::()); ++ } ++ ++ if !block { ++ return Err(Error::new(EAGAIN)); ++ } ++ ++ if !self ++ .write_condition ++ .wait(counter, "EventCounter::write", &mut token) ++ { ++ return Err(Error::new(EINTR)); ++ } ++ } ++ } ++ ++ pub fn into_drop(self, _token: LockToken<'_, L1>) { ++ drop(self); ++ } ++} ++ + pub type EventQueueList = HashMap>; ++pub type EventCounterList = HashMap>; + + // Next queue id + static NEXT_QUEUE_ID: AtomicUsize = AtomicUsize::new(0); ++static NEXT_COUNTER_ID: AtomicUsize = AtomicUsize::new(0); + + /// Get next queue id + pub fn next_queue_id() -> EventQueueId { + EventQueueId::from(NEXT_QUEUE_ID.fetch_add(1, Ordering::SeqCst)) + } + ++pub fn next_counter_id() -> usize { ++ EVENTFD_TAG_BIT | NEXT_COUNTER_ID.fetch_add(1, Ordering::SeqCst) ++} ++ ++pub fn is_counter_id(id: usize) -> bool { ++ id & EVENTFD_TAG_BIT != 0 ++} ++ + // Current event queues + static QUEUES: RwLock = + RwLock::new(EventQueueList::with_hasher(DefaultHashBuilder::new())); ++static COUNTERS: RwLock = ++ RwLock::new(EventCounterList::with_hasher(DefaultHashBuilder::new())); + + /// Get the event queues list, const + pub fn queues(token: LockToken<'_, L0>) -> RwLockReadGuard<'_, L2, EventQueueList> { +@@ -115,6 +254,14 @@ pub fn queues_mut(token: LockToken<'_, L0>) -> RwLockWriteGuard<'_, L2, EventQue + QUEUES.write(token) + } + ++pub fn counters(token: LockToken<'_, L0>) -> RwLockReadGuard<'_, L2, EventCounterList> { ++ COUNTERS.read(token) ++} ++ ++pub fn counters_mut(token: LockToken<'_, L0>) -> RwLockWriteGuard<'_, L2, EventCounterList> { ++ COUNTERS.write(token) ++} ++ + #[derive(Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] + pub struct RegKey { + pub scheme: SchemeId, +diff --git a/src/scheme/event.rs b/src/scheme/event.rs +index 36efe5b2..c64b6bd0 100644 +--- a/src/scheme/event.rs ++++ b/src/scheme/event.rs +@@ -1,9 +1,12 @@ +-use alloc::sync::Arc; ++use alloc::{sync::Arc, vec::Vec}; + use syscall::{EventFlags, O_NONBLOCK}; + + use crate::{ + context::file::InternalFlags, +- event::{next_queue_id, queues, queues_mut, EventQueue, EventQueueId}, ++ event::{ ++ EventCounter, EventQueue, EventQueueId, counters, counters_mut, is_counter_id, ++ next_counter_id, next_queue_id, queues, queues_mut, ++ }, + sync::CleanLockToken, + syscall::{ + data::Event, +@@ -25,7 +28,7 @@ impl KernelScheme for EventScheme { + fn kopenat( + &self, + id: usize, +- _user_buf: StrOrBytes, ++ user_buf: StrOrBytes, + _flags: usize, + _fcntl_flags: u32, + _ctx: CallerCtx, +@@ -34,13 +37,53 @@ impl KernelScheme for EventScheme { + if id != SCHEME_ROOT_ID { + return Err(Error::new(EACCES)); + } +- let id = next_queue_id(); +- queues_mut(token.token()).insert(id, Arc::new(EventQueue::new(id))); + +- Ok(OpenResult::SchemeLocal(id.get(), InternalFlags::empty())) ++ let path = user_buf.as_str().or(Err(Error::new(EINVAL)))?; ++ let path = path.trim_matches('/'); ++ ++ if path.is_empty() { ++ let id = next_queue_id(); ++ queues_mut(token.token()).insert(id, Arc::new(EventQueue::new(id))); ++ return Ok(OpenResult::SchemeLocal(id.get(), InternalFlags::empty())); ++ } ++ ++ let parts: Vec<&str> = path.split('/').collect(); ++ if matches!(parts.first(), Some(&"eventfd")) { ++ let init = match parts.get(1) { ++ Some(value) => value.parse::().map_err(|_| Error::new(EINVAL))?, ++ None => 0_u64, ++ }; ++ if init > u32::MAX as u64 { ++ return Err(Error::new(EINVAL)); ++ } ++ let semaphore = match parts.get(2) { ++ Some(value) => match *value { ++ "0" => Ok(false), ++ "1" => Ok(true), ++ _ => Err(Error::new(EINVAL)), ++ }?, ++ None => false, ++ }; ++ ++ let id = next_counter_id(); ++ counters_mut(token.token()).insert(id, Arc::new(EventCounter::new(id, init, semaphore))); ++ return Ok(OpenResult::SchemeLocal(id, InternalFlags::empty())); ++ } ++ ++ Err(Error::new(ENOENT)) + } + + fn close(&self, id: usize, token: &mut CleanLockToken) -> Result<()> { ++ if is_counter_id(id) { ++ let counter = counters_mut(token.token()) ++ .remove(&id) ++ .ok_or(Error::new(EBADF))?; ++ if let Some(counter) = Arc::into_inner(counter) { ++ counter.into_drop(token.downgrade()); ++ } ++ return Ok(()); ++ } ++ + let id = EventQueueId::from(id); + let queue = queues_mut(token.token()) + .remove(&id) +@@ -59,6 +102,15 @@ impl KernelScheme for EventScheme { + _stored_flags: u32, + token: &mut CleanLockToken, + ) -> Result { ++ if is_counter_id(id) { ++ let counter = { ++ let handles = counters(token.token()); ++ let handle = handles.get(&id).ok_or(Error::new(EBADF))?; ++ handle.clone() ++ }; ++ return counter.read(buf, flags & O_NONBLOCK as u32 == 0, token); ++ } ++ + let id = EventQueueId::from(id); + + let queue = { +@@ -74,10 +126,19 @@ impl KernelScheme for EventScheme { + &self, + id: usize, + buf: UserSliceRo, +- _flags: u32, ++ flags: u32, + _stored_flags: u32, + token: &mut CleanLockToken, + ) -> Result { ++ if is_counter_id(id) { ++ let counter = { ++ let handles = counters(token.token()); ++ let handle = handles.get(&id).ok_or(Error::new(EBADF))?; ++ handle.clone() ++ }; ++ return counter.write(buf, flags & O_NONBLOCK as u32 == 0, token); ++ } ++ + let id = EventQueueId::from(id); + + let queue = { +@@ -98,8 +159,12 @@ impl KernelScheme for EventScheme { + Ok(events_written * size_of::()) + } + +- fn kfpath(&self, _id: usize, buf: UserSliceWo, _token: &mut CleanLockToken) -> Result { +- buf.copy_common_bytes_from_slice(b"/scheme/event/") ++ fn kfpath(&self, id: usize, buf: UserSliceWo, _token: &mut CleanLockToken) -> Result { ++ if is_counter_id(id) { ++ buf.copy_common_bytes_from_slice(b"/scheme/event/eventfd") ++ } else { ++ buf.copy_common_bytes_from_slice(b"/scheme/event/") ++ } + } + + fn fevent( +@@ -108,6 +173,23 @@ impl KernelScheme for EventScheme { + flags: EventFlags, + token: &mut CleanLockToken, + ) -> Result { ++ if is_counter_id(id) { ++ let counter = { ++ let handles = counters(token.token()); ++ let handle = handles.get(&id).ok_or(Error::new(EBADF))?; ++ handle.clone() ++ }; ++ ++ let mut ready = EventFlags::empty(); ++ if flags.contains(EventFlags::EVENT_READ) && counter.is_readable(token) { ++ ready |= EventFlags::EVENT_READ; ++ } ++ if flags.contains(EventFlags::EVENT_WRITE) && counter.is_writable(token) { ++ ready |= EventFlags::EVENT_WRITE; ++ } ++ return Ok(ready); ++ } ++ + let id = EventQueueId::from(id); + + let queue = { diff --git a/local/patches/relibc/P3-elf64-types.patch b/local/patches/relibc/P3-elf64-types.patch new file mode 100644 index 00000000..85d6287b --- /dev/null +++ b/local/patches/relibc/P3-elf64-types.patch @@ -0,0 +1,13 @@ +diff -ruN a/src/header/elf/mod.rs b/src/header/elf/mod.rs +--- a/src/header/elf/mod.rs ++++ b/src/header/elf/mod.rs +@@ -9,8 +9,8 @@ + pub type Elf32_Word = uint32_t; + pub type Elf32_Sword = int32_t; +-pub type Elf64_Word = uint64_t; +-pub type Elf64_Sword = int64_t; ++pub type Elf64_Word = uint32_t; ++pub type Elf64_Sword = int32_t; + + pub type Elf32_Xword = uint64_t; + pub type Elf32_Sxword = int64_t; diff --git a/local/patches/relibc/P3-eventfd.patch b/local/patches/relibc/P3-eventfd.patch index e68ea35e..67bc2e0c 100644 --- a/local/patches/relibc/P3-eventfd.patch +++ b/local/patches/relibc/P3-eventfd.patch @@ -12,12 +12,9 @@ diff -ruN a/src/header/mod.rs b/src/header/mod.rs diff -ruN a/src/header/sys_eventfd/cbindgen.toml b/src/header/sys_eventfd/cbindgen.toml --- a/src/header/sys_eventfd/cbindgen.toml 1970-01-01 00:00:00.000000000 +0000 +++ b/src/header/sys_eventfd/cbindgen.toml 2026-04-15 09:46:42.009280833 +0100 -@@ -0,0 +1,12 @@ +@@ -0,0 +1,9 @@ +sys_includes = ["stdint.h"] +include_guard = "_SYS_EVENTFD_H" -+trailer = """ -+typedef uint64_t eventfd_t; -+""" +language = "C" +style = "Tag" +no_includes = true @@ -28,7 +25,7 @@ diff -ruN a/src/header/sys_eventfd/cbindgen.toml b/src/header/sys_eventfd/cbindg diff -ruN a/src/header/sys_eventfd/mod.rs b/src/header/sys_eventfd/mod.rs --- a/src/header/sys_eventfd/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ b/src/header/sys_eventfd/mod.rs 2026-04-15 09:46:42.009305629 +0100 -@@ -0,0 +1,90 @@ +@@ -0,0 +1,87 @@ +//! `sys/eventfd.h` implementation. +//! +//! Non-POSIX, see . @@ -36,6 +33,7 @@ diff -ruN a/src/header/sys_eventfd/mod.rs b/src/header/sys_eventfd/mod.rs +use core::{mem, slice}; + +use crate::{ ++ c_str::{CStr, CString}, + error::{Errno, ResultExt}, + header::{ + errno::{EFAULT, EINVAL, EIO}, @@ -80,18 +78,14 @@ diff -ruN a/src/header/sys_eventfd/mod.rs b/src/header/sys_eventfd/mod.rs + oflag |= O_NONBLOCK; + } + -+ let fd = Sys::open(c"/scheme/event".into(), oflag, 0)?; -+ if initval != 0 { -+ let value = u64::from(initval); -+ let buf = unsafe { -+ slice::from_raw_parts((&raw const value).cast::(), mem::size_of::()) -+ }; -+ if let Err(err) = write_exact(fd, buf) { -+ let _ = Sys::close(fd); -+ return Err(err); -+ } -+ } -+ Ok(fd) ++ let path = CString::new(format!( ++ "/scheme/event/eventfd/{}/{}", ++ initval, ++ if flags & EFD_SEMAPHORE == EFD_SEMAPHORE { 1 } else { 0 } ++ )) ++ .map_err(|_| Errno(EINVAL))?; ++ ++ Sys::open(CStr::borrow(&path), oflag, 0) +} + +#[unsafe(no_mangle)] diff --git a/local/patches/relibc/P3-fd-event-tests.patch b/local/patches/relibc/P3-fd-event-tests.patch index 0361631a..e4e4ad61 100644 --- a/local/patches/relibc/P3-fd-event-tests.patch +++ b/local/patches/relibc/P3-fd-event-tests.patch @@ -34,7 +34,7 @@ diff --git a/tests/sys_signalfd/signalfd.c b/tests/sys_signalfd/signalfd.c new file mode 100644 --- /dev/null +++ b/tests/sys_signalfd/signalfd.c -@@ -0,0 +1,22 @@ +@@ -0,0 +1,23 @@ +#include +#include +#include @@ -62,7 +62,7 @@ diff --git a/tests/sys_timerfd/timerfd.c b/tests/sys_timerfd/timerfd.c new file mode 100644 --- /dev/null +++ b/tests/sys_timerfd/timerfd.c -@@ -0,0 +1,27 @@ +@@ -0,0 +1,29 @@ +#include +#include +#include @@ -87,8 +87,44 @@ new file mode 100644 + + memset(&spec, 0, sizeof(spec)); + spec.it_value.tv_sec = 1; -+ assert(timerfd_settime(fd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &spec, NULL) == 0); ++ assert(timerfd_settime(fd, TFD_TIMER_ABSTIME, &spec, NULL) == 0); + assert(close(fd) == 0); + puts("timerfd ok"); + return 0; +} +diff --git a/tests/Makefile.tests.mk b/tests/Makefile.tests.mk +--- a/tests/Makefile.tests.mk ++++ b/tests/Makefile.tests.mk +@@ -312,6 +312,9 @@ VARIED_NAMES=\ + grp/getgrgid_r \ + grp/getgrnam_r \ + grp/gr_iter \ ++ sys_eventfd/eventfd \ ++ sys_signalfd/signalfd \ ++ sys_timerfd/timerfd \ + waitpid \ + waitpid_multiple \ + $(FAILING_TESTS) +diff --git a/tests/Makefile b/tests/Makefile +--- a/tests/Makefile ++++ b/tests/Makefile +@@ -78,14 +78,16 @@ FLAGS=\ + -Wno-deprecated-declarations \ + -pedantic \ + -g \ +- -I . ++ -I . \ ++ $(CPPFLAGS) $(CFLAGS) + + STATIC_FLAGS=\ +- -static ++ -static $(LDFLAGS) + + DYNAMIC_FLAGS=\ + -Wl,--enable-new-dtags \ +- -Wl,-export-dynamic ++ -Wl,-export-dynamic \ ++ $(LDFLAGS) + + SYSROOT?=$(abspath ../sysroot/$(TARGET)/) + SYSROOT_TARGET?=$(SYSROOT) diff --git a/local/patches/relibc/P3-ipc-tests.patch b/local/patches/relibc/P3-ipc-tests.patch index a0c30cfb..ca5a2c84 100644 --- a/local/patches/relibc/P3-ipc-tests.patch +++ b/local/patches/relibc/P3-ipc-tests.patch @@ -38,55 +38,3 @@ new file mode 100644 + puts("shmget ok"); + return 0; +} -diff --git a/tests/sys_timerfd/timerfd.c b/tests/sys_timerfd/timerfd.c -new file mode 100644 ---- /dev/null -+++ b/tests/sys_timerfd/timerfd.c -@@ -0,0 +1,18 @@ -+#include -+#include -+#include -+#include -+#include -+ -+int main(void) { -+ int fd = timerfd_create(CLOCK_REALTIME, 0); -+ if (fd < 0) { -+ puts("timerfd unavailable"); -+ return 0; -+ } -+ struct itimerspec spec; -+ memset(&spec, 0, sizeof(spec)); -+ spec.it_value.tv_nsec = 1; -+ assert(timerfd_settime(fd, TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET, &spec, NULL) == 0); -+ assert(close(fd) == 0); -+ puts("timerfd ok"); -+ return 0; -+} -diff --git a/tests/sys_signalfd/signalfd.c b/tests/sys_signalfd/signalfd.c -new file mode 100644 ---- /dev/null -+++ b/tests/sys_signalfd/signalfd.c -@@ -0,0 +1,20 @@ -+#include -+#include -+#include -+#include -+#include -+ -+int main(void) { -+ sigset_t mask; -+ assert(sigemptyset(&mask) == 0); -+ assert(sigaddset(&mask, SIGUSR1) == 0); -+ int fd = signalfd(-1, &mask, sizeof(mask)); -+ if (fd < 0) { -+ puts("signalfd unavailable"); -+ return 0; -+ } -+ assert(kill(getpid(), SIGUSR1) == 0); -+ struct signalfd_siginfo info; -+ assert(read(fd, &info, sizeof(info)) == (ssize_t)sizeof(info)); -+ assert(info.ssi_signo == SIGUSR1); -+ puts("signalfd ok"); -+ return 0; -+} diff --git a/local/patches/relibc/P3-signalfd-header.patch b/local/patches/relibc/P3-signalfd-header.patch index a97a84b1..77a79bb7 100644 --- a/local/patches/relibc/P3-signalfd-header.patch +++ b/local/patches/relibc/P3-signalfd-header.patch @@ -1,26 +1,93 @@ +diff -ruN a/src/header/mod.rs b/src/header/mod.rs +--- a/src/header/mod.rs 2026-04-15 09:48:02.257700000 +0100 ++++ b/src/header/mod.rs 2026-04-19 13:30:00.000000000 +0100 +@@ -98,6 +98,7 @@ + pub mod sys_resource; + pub mod sys_select; ++pub mod sys_signalfd; + // TODO: sys/sem.h + // TODO: sys/shm.h + pub mod sys_socket; diff -ruN a/src/header/sys_signalfd/cbindgen.toml b/src/header/sys_signalfd/cbindgen.toml --- a/src/header/sys_signalfd/cbindgen.toml 1970-01-01 00:00:00.000000000 +0000 +++ b/src/header/sys_signalfd/cbindgen.toml 2026-04-15 09:48:02.257754724 +0100 -@@ -0,0 +1,12 @@ +@@ -0,0 +1,41 @@ +sys_includes = ["signal.h", "stdint.h"] +include_guard = "_SYS_SIGNALFD_H" ++trailer = """ ++#ifndef SFD_CLOEXEC ++#define SFD_CLOEXEC 0x80000 ++#endif ++ ++#ifndef SFD_NONBLOCK ++#define SFD_NONBLOCK 0x800 ++#endif ++ ++struct signalfd_siginfo { ++ uint32_t ssi_signo; ++ int32_t ssi_errno; ++ int32_t ssi_code; ++ uint32_t ssi_pid; ++ uint32_t ssi_uid; ++ int32_t ssi_fd; ++ uint32_t ssi_tid; ++ uint32_t ssi_band; ++ uint32_t ssi_overrun; ++ uint32_t ssi_trapno; ++ int32_t ssi_status; ++ int32_t ssi_int; ++ uint64_t ssi_ptr; ++ uint64_t ssi_utime; ++ uint64_t ssi_stime; ++ uint64_t ssi_addr; ++ uint16_t ssi_addr_lsb; ++ uint16_t __pad2; ++ int32_t ssi_syscall; ++ uint64_t ssi_call_addr; ++ uint32_t ssi_arch; ++ uint8_t __pad[28]; ++}; ++""" +language = "C" +style = "Tag" +no_includes = true +cpp_compat = true -+ + +[enum] +prefix_with_name = true -+ + +[export.rename] +"signalfd_siginfo" = "struct signalfd_siginfo" diff -ruN a/src/header/sys_signalfd/mod.rs b/src/header/sys_signalfd/mod.rs --- a/src/header/sys_signalfd/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ b/src/header/sys_signalfd/mod.rs 2026-04-15 09:48:02.257778048 +0100 -@@ -0,0 +1,6 @@ +@@ -0,0 +1,29 @@ +//! `sys/signalfd.h` implementation. + -+pub use crate::header::signal::{SFD_CLOEXEC, SFD_NONBLOCK, signalfd, signalfd4, signalfd_siginfo}; ++use crate::{ ++ header::signal::{self, sigset_t, signalfd_siginfo}, ++ platform::types::c_int, ++}; ++ ++pub const SFD_CLOEXEC: c_int = signal::SFD_CLOEXEC; ++pub const SFD_NONBLOCK: c_int = signal::SFD_NONBLOCK; + +#[unsafe(no_mangle)] -+pub extern "C" fn _cbindgen_export_sys_signalfd_siginfo(siginfo: signalfd_siginfo) {} ++pub unsafe extern "C" fn signalfd(fd: c_int, mask: *const sigset_t, masksize: usize) -> c_int { ++ unsafe { signal::signalfd(fd, mask, masksize) } ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn signalfd4( ++ fd: c_int, ++ mask: *const sigset_t, ++ masksize: usize, ++ flags: c_int, ++) -> c_int { ++ unsafe { signal::signalfd4(fd, mask, masksize, flags) } ++} ++ ++#[unsafe(no_mangle)] ++pub extern "C" fn _cbindgen_export_sys_signalfd_siginfo(siginfo: signalfd_siginfo) { ++ let _ = siginfo; ++} diff --git a/local/patches/relibc/P3-sysv-ipc.patch b/local/patches/relibc/P3-sysv-ipc.patch index 7eff024f..cc47f279 100644 --- a/local/patches/relibc/P3-sysv-ipc.patch +++ b/local/patches/relibc/P3-sysv-ipc.patch @@ -22,9 +22,12 @@ diff -ruN a/src/header/mod.rs b/src/header/mod.rs diff -ruN a/src/header/sys_ipc/cbindgen.toml b/src/header/sys_ipc/cbindgen.toml --- a/src/header/sys_ipc/cbindgen.toml 1970-01-01 00:00:00.000000000 +0000 +++ b/src/header/sys_ipc/cbindgen.toml 2026-04-15 09:57:28.904120977 +0100 -@@ -0,0 +1,9 @@ +@@ -0,0 +1,12 @@ +sys_includes = ["sys/types.h"] +include_guard = "_SYS_IPC_H" ++trailer = """ ++typedef struct ipc_perm ipc_perm; ++""" +language = "C" +style = "Tag" +no_includes = true diff --git a/local/patches/relibc/P3-timerfd.patch b/local/patches/relibc/P3-timerfd.patch index bd5061de..8e947996 100644 --- a/local/patches/relibc/P3-timerfd.patch +++ b/local/patches/relibc/P3-timerfd.patch @@ -1,3 +1,14 @@ +diff -ruN a/src/header/mod.rs b/src/header/mod.rs +--- a/src/header/mod.rs 2026-04-15 09:58:03.811510680 +0100 ++++ b/src/header/mod.rs 2026-04-15 09:59:40.902089070 +0100 +@@ -103,6 +103,7 @@ + pub mod sys_stat; + pub mod sys_statvfs; + pub mod sys_time; ++pub mod sys_timerfd; + #[deprecated] + pub mod sys_timeb; + //pub mod sys_times; diff -ruN a/src/header/sys_timerfd/cbindgen.toml b/src/header/sys_timerfd/cbindgen.toml --- a/src/header/sys_timerfd/cbindgen.toml 1970-01-01 00:00:00.000000000 +0000 +++ b/src/header/sys_timerfd/cbindgen.toml 2026-04-15 09:59:40.902120449 +0100 @@ -17,7 +28,7 @@ diff -ruN a/src/header/sys_timerfd/cbindgen.toml b/src/header/sys_timerfd/cbindg diff -ruN a/src/header/sys_timerfd/mod.rs b/src/header/sys_timerfd/mod.rs --- a/src/header/sys_timerfd/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ b/src/header/sys_timerfd/mod.rs 2026-04-15 09:59:40.902160103 +0100 -@@ -0,0 +1,94 @@ +@@ -0,0 +1,91 @@ +//! `sys/timerfd.h` implementation. +//! +//! Non-POSIX, see . @@ -84,13 +95,10 @@ diff -ruN a/src/header/sys_timerfd/mod.rs b/src/header/sys_timerfd/mod.rs + +#[unsafe(no_mangle)] +pub unsafe extern "C" fn timerfd_settime(fd: c_int, flags: c_int, new: *const itimerspec, old: *mut itimerspec) -> c_int { -+ let supported = TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET; ++ let supported = TFD_TIMER_ABSTIME; + if flags & !supported != 0 { + return Err::(Errno(EINVAL)).or_minus_one_errno(); + } -+ if flags & TFD_TIMER_CANCEL_ON_SET != 0 && flags & TFD_TIMER_ABSTIME == 0 { -+ return Err::(Errno(EINVAL)).or_minus_one_errno(); -+ } + if new.is_null() { + return Err::(Errno(EFAULT)).or_minus_one_errno(); + } diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/export.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/export.h new file mode 100644 index 00000000..5c1a4de5 --- /dev/null +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/export.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_EXPORT_H +#define _LINUX_EXPORT_H + +#include + +#endif /* _LINUX_EXPORT_H */ diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/refcount.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/refcount.h new file mode 100644 index 00000000..6968c7e6 --- /dev/null +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/refcount.h @@ -0,0 +1,73 @@ +#ifndef _LINUX_REFCOUNT_H +#define _LINUX_REFCOUNT_H + +#include +#include +#include + +typedef struct { + atomic_t refs; +} refcount_t; + +#define REFCOUNT_INIT(value) { .refs = { .counter = (value) } } + +static inline unsigned int refcount_read(const refcount_t *r) +{ + return (unsigned int)atomic_read(&r->refs); +} + +static inline void refcount_set(refcount_t *r, int n) +{ + atomic_set(&r->refs, n); +} + +static inline void refcount_inc(refcount_t *r) +{ + atomic_inc(&r->refs); +} + +static inline int refcount_inc_not_zero(refcount_t *r) +{ + return atomic_inc_not_zero(&r->refs); +} + +static inline int refcount_dec_and_test(refcount_t *r) +{ + return atomic_dec_and_test(&r->refs); +} + +static inline int refcount_dec_not_one(refcount_t *r) +{ + int current; + + do { + current = atomic_read(&r->refs); + if (current == 1) { + return 0; + } + } while (atomic_cmpxchg(&r->refs, current, current - 1) != current); + + return 1; +} + +static inline int refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) +{ + if (!refcount_dec_and_test(r)) { + return 0; + } + + mutex_lock(lock); + return 1; +} + +static inline int refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) +{ + if (!refcount_dec_and_test(r)) { + return 0; + } + + spin_lock(lock); + return 1; +} + +#endif /* _LINUX_REFCOUNT_H */ diff --git a/local/recipes/drivers/redox-driver-sys/source/src/pci.rs b/local/recipes/drivers/redox-driver-sys/source/src/pci.rs index 0cae9cef..d43aa32a 100644 --- a/local/recipes/drivers/redox-driver-sys/source/src/pci.rs +++ b/local/recipes/drivers/redox-driver-sys/source/src/pci.rs @@ -119,6 +119,25 @@ pub const PCI_CAP_ID_PCIE: u8 = 0x10; pub const PCI_CAP_ID_POWER: u8 = 0x01; pub const PCI_CAP_ID_VNDR: u8 = 0x09; +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum InterruptSupport { + None, + LegacyOnly, + Msi, + MsiX, +} + +impl InterruptSupport { + pub fn as_str(self) -> &'static str { + match self { + Self::None => "none", + Self::LegacyOnly => "legacy", + Self::Msi => "msi", + Self::MsiX => "msix", + } + } +} + #[derive(Clone, Debug)] pub struct MsixCapability { pub table_bar: u8, @@ -174,6 +193,41 @@ impl PciDeviceInfo { self.bars.iter().find(|b| b.index == index && b.is_memory()) } + pub fn supports_msi(&self) -> bool { + self.find_capability(PCI_CAP_ID_MSI).is_some() + } + + pub fn supports_msix(&self) -> bool { + self.find_capability(PCI_CAP_ID_MSIX).is_some() + } + + pub fn interrupt_support(&self) -> InterruptSupport { + let quirks = self.quirks(); + + let has_legacy = self.irq.is_some(); + let has_msi = self.supports_msi() && !quirks.contains(crate::quirks::PciQuirkFlags::NO_MSI); + let has_msix = + self.supports_msix() && !quirks.contains(crate::quirks::PciQuirkFlags::NO_MSIX); + + if quirks.contains(crate::quirks::PciQuirkFlags::FORCE_LEGACY_IRQ) { + return if has_legacy { + InterruptSupport::LegacyOnly + } else { + InterruptSupport::None + }; + } + + if has_msix { + InterruptSupport::MsiX + } else if has_msi { + InterruptSupport::Msi + } else if has_legacy { + InterruptSupport::LegacyOnly + } else { + InterruptSupport::None + } + } + pub fn quirks(&self) -> crate::quirks::PciQuirkFlags { crate::quirks::lookup_pci_quirks(self) } @@ -629,52 +683,11 @@ fn enumerate_pci_filtered(class: Option) -> Result> { let config_path = format!("{}/config", location.scheme_path()); if let Ok(data) = std::fs::read(&config_path) { - if data.len() < 64 { - continue; + if let Some(info) = parse_device_info_from_config_space(location, &data) + .filter(|info| class.is_none_or(|class| info.class_code == class)) + { + devices.push(info); } - let class_code = data[0x0b]; - if let Some(class) = class { - if class_code != class { - continue; - } - } - let vendor_id = u16::from_le_bytes([data[0x00], data[0x01]]); - let device_id = u16::from_le_bytes([data[0x02], data[0x03]]); - let subclass = data[0x0a]; - let prog_if = data[0x09]; - let revision = data[0x08]; - let header_type = data[0x0e] & 0x7F; - let irq_line = data[0x3c]; - - let (subsystem_vendor_id, subsystem_device_id) = - if header_type == PCI_HEADER_TYPE_NORMAL && data.len() > 0x2F { - ( - u16::from_le_bytes([data[0x2c], data[0x2d]]), - u16::from_le_bytes([data[0x2e], data[0x2f]]), - ) - } else { - (0xFFFF, 0xFFFF) - }; - - devices.push(PciDeviceInfo { - location, - vendor_id, - device_id, - subsystem_vendor_id, - subsystem_device_id, - revision, - class_code, - subclass, - prog_if, - header_type, - irq: if irq_line != 0 && irq_line != 0xff { - Some(irq_line as u32) - } else { - None - }, - bars: Vec::new(), - capabilities: Vec::new(), - }); } } @@ -688,6 +701,97 @@ fn enumerate_pci_filtered(class: Option) -> Result> { Ok(devices) } +pub fn parse_device_info_from_config_space(location: PciLocation, data: &[u8]) -> Option { + if data.len() < 64 { + return None; + } + + let class_code = data[0x0b]; + + let header_type = data[0x0e] & 0x7F; + let capabilities = if header_type == PCI_HEADER_TYPE_NORMAL { + parse_capabilities_from_config_bytes(data) + } else { + Vec::new() + }; + + let (subsystem_vendor_id, subsystem_device_id) = + if header_type == PCI_HEADER_TYPE_NORMAL && data.len() > 0x2F { + ( + u16::from_le_bytes([data[0x2c], data[0x2d]]), + u16::from_le_bytes([data[0x2e], data[0x2f]]), + ) + } else { + (0xFFFF, 0xFFFF) + }; + + let irq_line = data[0x3c]; + Some(PciDeviceInfo { + location, + vendor_id: u16::from_le_bytes([data[0x00], data[0x01]]), + device_id: u16::from_le_bytes([data[0x02], data[0x03]]), + subsystem_vendor_id, + subsystem_device_id, + revision: data[0x08], + class_code, + subclass: data[0x0a], + prog_if: data[0x09], + header_type, + irq: if irq_line != 0 && irq_line != 0xff { + Some(irq_line as u32) + } else { + None + }, + bars: Vec::new(), + capabilities, + }) +} + +fn parse_capabilities_from_config_bytes(data: &[u8]) -> Vec { + if data.len() < 64 { + return Vec::new(); + } + + let status = u16::from_le_bytes([data[0x06], data[0x07]]); + if status & 0x0010 == 0 { + return Vec::new(); + } + + let mut caps = Vec::new(); + let mut cap_ptr = usize::from(data[0x34]); + let mut visited = 0u8; + + while cap_ptr >= 0x40 && cap_ptr + 1 < data.len() && visited < 48 { + let cap_id = data[cap_ptr]; + let next_ptr = usize::from(data[cap_ptr + 1]); + + if cap_id == 0 { + break; + } + + let vendor_cap_id = if cap_id == PCI_CAP_ID_VNDR && cap_ptr + 2 < data.len() { + Some(data[cap_ptr + 2]) + } else { + None + }; + + caps.push(PciCapability { + id: cap_id, + offset: cap_ptr as u8, + vendor_cap_id, + }); + + if next_ptr == 0 || next_ptr <= cap_ptr { + break; + } + + cap_ptr = next_ptr; + visited += 1; + } + + caps +} + pub fn enumerate_pci_class(class: u8) -> Result> { enumerate_pci_filtered(Some(class)) } @@ -792,4 +896,161 @@ mod tests { assert_eq!(parsed.device, 0x1f); assert_eq!(parsed.function, 0x00); } + + #[test] + fn parse_capabilities_from_config_bytes_reads_standard_and_vendor_caps() { + let mut data = vec![0u8; 256]; + data[0x06] = 0x10; + data[0x34] = 0x50; + + data[0x50] = PCI_CAP_ID_MSI; + data[0x51] = 0x60; + + data[0x60] = PCI_CAP_ID_VNDR; + data[0x61] = 0x00; + data[0x62] = 0xAB; + + let caps = parse_capabilities_from_config_bytes(&data); + assert_eq!(caps.len(), 2); + assert_eq!(caps[0].id, PCI_CAP_ID_MSI); + assert_eq!(caps[0].offset, 0x50); + assert_eq!(caps[0].vendor_cap_id, None); + assert_eq!(caps[1].id, PCI_CAP_ID_VNDR); + assert_eq!(caps[1].offset, 0x60); + assert_eq!(caps[1].vendor_cap_id, Some(0xAB)); + } + + #[test] + fn parse_capabilities_from_config_bytes_stops_on_backwards_pointer() { + let mut data = vec![0u8; 256]; + data[0x06] = 0x10; + data[0x34] = 0x50; + data[0x50] = PCI_CAP_ID_MSI; + data[0x51] = 0x48; + + let caps = parse_capabilities_from_config_bytes(&data); + assert_eq!(caps.len(), 1); + assert_eq!(caps[0].id, PCI_CAP_ID_MSI); + } + + #[test] + fn parse_device_info_from_config_bytes_includes_capabilities() { + let mut data = vec![0u8; 256]; + data[0x00] = 0x86; + data[0x01] = 0x80; + data[0x02] = 0x34; + data[0x03] = 0x12; + data[0x06] = 0x10; + data[0x08] = 0x02; + data[0x09] = 0x01; + data[0x0a] = PCI_CLASS_DISPLAY_VGA; + data[0x0b] = PCI_CLASS_DISPLAY; + data[0x0e] = PCI_HEADER_TYPE_NORMAL; + data[0x2c] = 0x86; + data[0x2d] = 0x80; + data[0x2e] = 0x78; + data[0x2f] = 0x56; + data[0x34] = 0x50; + data[0x3c] = 11; + data[0x50] = PCI_CAP_ID_MSIX; + data[0x51] = 0x00; + + let info = parse_device_info_from_config_space( + PciLocation { + segment: 0, + bus: 0, + device: 2, + function: 0, + }, + &data, + ) + .expect("display device should be parsed"); + + assert_eq!(info.vendor_id, PCI_VENDOR_ID_INTEL); + assert_eq!(info.device_id, 0x1234); + assert_eq!(info.subsystem_device_id, 0x5678); + assert_eq!(info.irq, Some(11)); + assert_eq!(info.capabilities.len(), 1); + assert_eq!(info.capabilities[0].id, PCI_CAP_ID_MSIX); + } + + #[test] + fn parse_device_info_from_config_space_rejects_short_config() { + let location = PciLocation { + segment: 0, + bus: 0, + device: 0, + function: 0, + }; + assert!(parse_device_info_from_config_space(location, &[0u8; 32]).is_none()); + } + + #[test] + fn interrupt_support_prefers_msix_over_msi_and_legacy() { + let info = PciDeviceInfo { + location: PciLocation { + segment: 0, + bus: 0, + device: 0, + function: 0, + }, + vendor_id: 0x1234, + device_id: 0x5678, + subsystem_vendor_id: 0xffff, + subsystem_device_id: 0xffff, + revision: 0, + class_code: 0, + subclass: 0, + prog_if: 0, + header_type: PCI_HEADER_TYPE_NORMAL, + irq: Some(11), + bars: Vec::new(), + capabilities: vec![ + PciCapability { + id: PCI_CAP_ID_MSI, + offset: 0x50, + vendor_cap_id: None, + }, + PciCapability { + id: PCI_CAP_ID_MSIX, + offset: 0x60, + vendor_cap_id: None, + }, + ], + }; + + assert_eq!(info.interrupt_support(), InterruptSupport::MsiX); + assert_eq!(info.interrupt_support().as_str(), "msix"); + } + + #[test] + fn interrupt_support_honors_no_msix_quirk() { + let info = PciDeviceInfo { + location: PciLocation { + segment: 0, + bus: 0, + device: 0, + function: 0, + }, + vendor_id: 0x1022, + device_id: 0x145C, + subsystem_vendor_id: 0xffff, + subsystem_device_id: 0xffff, + revision: 0, + class_code: 0, + subclass: 0, + prog_if: 0, + header_type: PCI_HEADER_TYPE_NORMAL, + irq: Some(9), + bars: Vec::new(), + capabilities: vec![PciCapability { + id: PCI_CAP_ID_MSIX, + offset: 0x60, + vendor_cap_id: None, + }], + }; + + assert_eq!(info.interrupt_support(), InterruptSupport::LegacyOnly); + assert_eq!(info.interrupt_support().as_str(), "legacy"); + } } diff --git a/local/recipes/drivers/redox-driver-sys/source/src/quirks/usb_table.rs b/local/recipes/drivers/redox-driver-sys/source/src/quirks/usb_table.rs index b0ece246..165ce3f4 100644 --- a/local/recipes/drivers/redox-driver-sys/source/src/quirks/usb_table.rs +++ b/local/recipes/drivers/redox-driver-sys/source/src/quirks/usb_table.rs @@ -1,4 +1,4 @@ -use super::{UsbQuirkEntry, UsbQuirkFlags, PCI_QUIRK_ANY_ID}; +use super::{UsbQuirkEntry, UsbQuirkFlags}; const F_00: UsbQuirkFlags = UsbQuirkFlags::from_bits_truncate( UsbQuirkFlags::NEED_RESET.bits() | UsbQuirkFlags::NO_LPM.bits(), diff --git a/local/recipes/gpu/amdgpu/recipe.toml b/local/recipes/gpu/amdgpu/recipe.toml index 633c40cd..9824c53f 100644 --- a/local/recipes/gpu/amdgpu/recipe.toml +++ b/local/recipes/gpu/amdgpu/recipe.toml @@ -1,6 +1,8 @@ -# AMD GPU driver port for Redox OS — Phase P2+P5: Display Core + DML2 + TTM -# Scope: AMD DC modesetting, DML2 display modeling, TTM memory manager, connector detection. -# Full acceleration (compute, video decode) requires Mesa radeonsi backend. +# AMD GPU retained display glue path for Redox OS +# Scope: bounded Red Bear display glue path for init, connector detection, and modeset. +# Imported Linux AMD DC / TTM / amdgpu core trees remain adjacent source under compile triage and +# are not part of the default retained build path. Full acceleration still requires broader GPU work +# plus Mesa radeonsi backend enablement. [source] # Local overlay recipe. The extracted Linux 7.0-rc7 AMDGPU tree lives next to this @@ -20,13 +22,13 @@ DYNAMIC_INIT # Paths AMD_ROOT="${COOKBOOK_SOURCE}/../../amdgpu-source/gpu/drm/amd" AMD_SRC="${AMD_ROOT}" -TTM_SRC="${COOKBOOK_SOURCE}/../../amdgpu-source/gpu/drm/ttm" INCLUDES="${COOKBOOK_SOURCE}/../../amdgpu-source/include" LINUX_KPI="${COOKBOOK_ROOT}/local/recipes/drivers/linux-kpi/source/src/c_headers" REDOX_GLUE="${COOKBOOK_SOURCE}" TARGET_CC="${TARGET}-gcc" -# Compiler flags for AMD driver — DML2 enabled +# Compiler flags for the bounded retained AMD path. Legacy AMD DC config defines remain here only +# for header compatibility with the adjacent imported Linux source trees. export CFLAGS="-D__redox__ -D__KERNEL__ -DCONFIG_DRM_AMDGPU -DCONFIG_DRM_AMD_DC \ -DCONFIG_DRM_AMD_DC_DML2=1 \ -DCONFIG_DRM_AMD_DC_FP -DCONFIG_DRM_AMD_ACP \ @@ -66,11 +68,20 @@ export CFLAGS="-D__redox__ -D__KERNEL__ -DCONFIG_DRM_AMDGPU -DCONFIG_DRM_AMD_DC "${TARGET_CC}" -c ${CFLAGS} "${REDOX_GLUE}/amdgpu_redox_main.c" -o amdgpu_redox_main.o "${TARGET_CC}" -c ${CFLAGS} "${REDOX_GLUE}/redox_stubs.c" -o redox_stubs.o -# Stage 2: Compile AMD Display Core (DC) — all display sources including DML/DML2 -# Each file MUST compile. Any failure is a hard error. +# Stage 2: Bounded first-display path +# +# The current Red Bear AMD display bring-up path does not call into the imported +# Linux AMD Display Core tree directly. The live FFI surface comes from the +# Red Bear glue layer (`amdgpu_redox_main.c` / `redox_stubs.c`), while the +# broad `display/*.c` compile currently drags in optional and unsupported +# subtrees such as freesync before the retained path is even proven. +# +# Keep Stage 2 explicit and intentionally empty until a retained imported +# display-source subset is proven necessary by bounded compile triage. +DISPLAY_SRCS="" success=0 failed=0 -find "${AMD_SRC}/display/" -name '*.c' | while read -r src; do +for src in $DISPLAY_SRCS; do obj=$(basename "${src%.c}.o") if "${TARGET_CC}" -c ${CFLAGS} "$src" -o "$obj" 2>"${obj}.log"; then success=$((success + 1)) @@ -81,12 +92,17 @@ find "${AMD_SRC}/display/" -name '*.c' | while read -r src; do exit 1 fi done -echo "Stage 2: AMD DC compiled ${success} files, ${failed} failed" +echo "Stage 2: bounded AMD display path compiled ${success} imported display files, ${failed} failed" -# Stage 3: Compile TTM memory manager +# Stage 3: Imported TTM path +# +# The current bounded Red Bear display path uses Rust-side GEM/GTT/ring handling in +# `redox-drm`, not the imported Linux TTM stack. Keep this explicit and empty until +# the bounded path proves a concrete need for imported TTM code. +TTM_SRCS="" success=0 failed=0 -find "${TTM_SRC}/" -name '*.c' | while read -r src; do +for src in $TTM_SRCS; do obj=$(basename "${src%.c}.o") if "${TARGET_CC}" -c ${CFLAGS} "$src" -o "$obj" 2>"${obj}.log"; then success=$((success + 1)) @@ -97,13 +113,15 @@ find "${TTM_SRC}/" -name '*.c' | while read -r src; do exit 1 fi done -echo "Stage 3: TTM compiled ${success} files, ${failed} failed" +echo "Stage 3: bounded imported TTM path compiled ${success} files, ${failed} failed" -# Stage 4: Compile minimal amdgpu core (enough for display init) -CORE_SRCS="amdgpu_device.c amdgpu_drv.c amdgpu_i2c.c amdgpu_atombios.c \ - amdgpu_atombios_crtc.c amdgpu_bios.c amdgpu_mode.c amdgpu_display.c \ - amdgpu_fb.c amdgpu_gem.c amdgpu_object.c amdgpu_gmc.c amdgpu_mmhub.c \ - amdgpu_irq.c amdgpu_ring.c amdgpu_fence.c amdgpu_ttm.c amdgpu_bo_list.c" +# Stage 4: Imported amdgpu core path +# +# The current bounded Red Bear display path uses the custom glue layer for init, +# connector enumeration, and modeset, while Rust-side code owns GEM/GTT/ring state. +# Keep imported amdgpu core sources out of the retained compile surface until the +# bounded path proves a specific dependency on them. +CORE_SRCS="" success=0 failed=0 diff --git a/local/recipes/gpu/amdgpu/source/Makefile.redox b/local/recipes/gpu/amdgpu/source/Makefile.redox deleted file mode 100644 index 45e27b4d..00000000 --- a/local/recipes/gpu/amdgpu/source/Makefile.redox +++ /dev/null @@ -1,128 +0,0 @@ -CC = x86_64-unknown-redox-gcc -AR = x86_64-unknown-redox-ar - -AMDGPU_SRC ?= ../amdgpu-source/gpu/drm/amd -TTM_SRC ?= ../amdgpu-source/gpu/drm/ttm -AMDGPU_INCLUDES ?= ../amdgpu-source/include -LINUX_KPI ?= ../../drivers/linux-kpi/src/c_headers - -CFLAGS ?= -D__redox__ -D__KERNEL__ -DCONFIG_DRM_AMDGPU -DCONFIG_DRM_AMD_DC \ - -DCONFIG_DRM_AMD_DC_DML2=1 \ - -DCONFIG_DRM_AMD_DC_FP -DCONFIG_DRM_AMD_ACP \ - -I$(LINUX_KPI) \ - -I. \ - -I$(AMDGPU_INCLUDES) \ - -I$(AMDGPU_INCLUDES)/drm \ - -I$(AMDGPU_SRC)/include \ - -I$(AMDGPU_SRC)/include/asic_reg \ - -I$(AMDGPU_SRC)/display \ - -I$(AMDGPU_SRC)/display/dc \ - -I$(AMDGPU_SRC)/display/dc/dml \ - -I$(AMDGPU_SRC)/display/dc/dcn20 \ - -I$(AMDGPU_SRC)/display/dc/dcn21 \ - -I$(AMDGPU_SRC)/display/dc/dcn30 \ - -I$(AMDGPU_SRC)/display/dc/dcn301 \ - -I$(AMDGPU_SRC)/display/dc/dcn31 \ - -I$(AMDGPU_SRC)/display/dc/dcn32 \ - -I$(AMDGPU_SRC)/display/dc/dcn35 \ - -I$(AMDGPU_SRC)/display/dc/dml2_0 \ - -I$(AMDGPU_SRC)/display/dc/dml2_0/dml21 \ - -I$(AMDGPU_SRC)/display/dmub \ - -I$(AMDGPU_SRC)/display/modules \ - -I$(AMDGPU_SRC)/display/modules/freesync \ - -I$(AMDGPU_SRC)/display/modules/color \ - -I$(AMDGPU_SRC)/display/modules/info_packet \ - -I$(AMDGPU_SRC)/display/modules/power \ - -I$(AMDGPU_SRC)/pm/swsmu \ - -I$(AMDGPU_SRC)/pm/swsmu/inc \ - -I$(AMDGPU_SRC)/pm/powerplay \ - -I$(AMDGPU_SRC)/pm/powerplay/inc \ - -I$(AMDGPU_SRC)/pm/powerplay/hwmgr \ - -fPIC -O2 -Wall -Wno-unused-function -Wno-unused-variable \ - -Wno-address-of-packed-member -Wno-initializer-overrides - -LDFLAGS ?= -shared -LDLIBS ?= -lredox_driver_sys -llinux_kpi -lm -lpthread - -GLUE_OBJS := redox_stubs.o amdgpu_redox_main.o -CORE_SRCS := \ - $(AMDGPU_SRC)/amdgpu/amdgpu_device.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_drv.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_i2c.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_atombios.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_atombios_crtc.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_bios.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_mode.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_display.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_fb.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_gem.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_object.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_gmc.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_mmhub.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_irq.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_ring.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_fence.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_ttm.c \ - $(AMDGPU_SRC)/amdgpu/amdgpu_bo_list.c -CORE_OBJS := $(patsubst %.c,%.o,$(notdir $(CORE_SRCS))) -DISPLAY_SRCS := $(shell find $(AMDGPU_SRC)/display -name '*.c') -DISPLAY_OBJS := $(patsubst %.c,%.o,$(notdir $(DISPLAY_SRCS))) -TTM_SRCS := $(shell find $(TTM_SRC) -name '*.c') -TTM_OBJS := $(patsubst %.c,%.o,$(notdir $(TTM_SRCS))) - -ALL_OBJS := $(GLUE_OBJS) $(DISPLAY_OBJS) $(TTM_OBJS) $(CORE_OBJS) - -.PHONY: all clean check display core ttm - -all: libamdgpu_dc_redox.so - - libamdgpu_dc_redox.so: $(GLUE_OBJS) - @set -e; \ - success=0; failed=0; \ - for src in $(DISPLAY_SRCS); do \ - obj=$$(basename "$${src%.c}.o"); \ - if $(CC) -c $(CFLAGS) "$$src" -o "$$obj"; then \ - success=$$((success + 1)); \ - else \ - failed=$$((failed + 1)); \ - echo "ERROR: failed to compile $$src"; \ - exit 1; \ - fi; \ - done; \ - for src in $(TTM_SRCS); do \ - obj=$$(basename "$${src%.c}.o"); \ - if $(CC) -c $(CFLAGS) "$$src" -o "$$obj"; then \ - success=$$((success + 1)); \ - else \ - failed=$$((failed + 1)); \ - echo "ERROR: failed to compile $$src"; \ - exit 1; \ - fi; \ - done; \ - for src in $(CORE_SRCS); do \ - if [ -f "$$src" ]; then \ - obj=$$(basename "$${src%.c}.o"); \ - if $(CC) -c $(CFLAGS) "$$src" -o "$$obj"; then \ - success=$$((success + 1)); \ - else \ - failed=$$((failed + 1)); \ - echo "ERROR: failed to compile $$src"; \ - exit 1; \ - fi; \ - fi; \ - done; \ - echo "AMD DC: compiled $$success files successfully"; \ - $(CC) $(LDFLAGS) -o $@ $$(find . -maxdepth 1 -name '*.o' -size +0c) $(LDLIBS) - -redox_stubs.o: redox_stubs.c redox_glue.h - $(CC) -c $(CFLAGS) $< -o $@ - -amdgpu_redox_main.o: amdgpu_redox_main.c redox_glue.h - $(CC) -c $(CFLAGS) $< -o $@ - -check: $(GLUE_OBJS) - $(CC) -fsyntax-only $(CFLAGS) amdgpu_redox_main.c - $(CC) -fsyntax-only $(CFLAGS) redox_stubs.c - -clean: - rm -f *.o libamdgpu_dc_redox.so diff --git a/local/recipes/gpu/redox-drm/source/build.rs b/local/recipes/gpu/redox-drm/source/build.rs index 71063dfb..c5246159 100644 --- a/local/recipes/gpu/redox-drm/source/build.rs +++ b/local/recipes/gpu/redox-drm/source/build.rs @@ -5,6 +5,7 @@ const LIB_NAME: &str = "libamdgpu_dc_redox.so"; const ENV_HINTS: &[&str] = &[ "AMDGPU_DC_LIB_DIR", "COOKBOOK_STAGE", + "COOKBOOK_SYSROOT", "REDOX_SYSROOT", "SYSROOT", "TARGET_SYSROOT", diff --git a/local/recipes/gpu/redox-drm/source/src/scheme.rs b/local/recipes/gpu/redox-drm/source/src/scheme.rs index aa4cfe55..7179841f 100644 --- a/local/recipes/gpu/redox-drm/source/src/scheme.rs +++ b/local/recipes/gpu/redox-drm/source/src/scheme.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, HashSet}; +use std::collections::{BTreeMap, HashSet, VecDeque}; use std::mem::size_of; use std::sync::Arc; @@ -286,6 +286,7 @@ enum NodeKind { struct Handle { node: NodeKind, response: Vec, + event_queue: VecDeque>, mapped_gem: Option, mapped_gem_refs: usize, owned_fbs: Vec, @@ -548,6 +549,7 @@ impl DrmScheme { Handle { node, response: Vec::new(), + event_queue: VecDeque::new(), mapped_gem: None, mapped_gem_refs: 0, owned_fbs: Vec::new(), @@ -636,8 +638,34 @@ impl DrmScheme { pub fn handle_driver_event(&mut self, event: DriverEvent) { match event { - DriverEvent::Vblank { crtc_id, count } => self.retire_vblank(crtc_id, count), - DriverEvent::Hotplug { .. } => {} + DriverEvent::Vblank { crtc_id, count } => { + self.retire_vblank(crtc_id, count); + self.queue_card_event(format!("vblank:{crtc_id}:{count}\n").into_bytes()); + } + DriverEvent::Hotplug { connector_id } => self.queue_hotplug_event(connector_id), + } + } + + fn queue_card_event(&mut self, payload: Vec) { + for handle in self.handles.values_mut() { + if let NodeKind::Card = handle.node { + handle.event_queue.push_back(payload.clone()); + } + } + } + + fn queue_hotplug_event(&mut self, connector_id: u32) { + let payload = format!("hotplug:{}\n", connector_id).into_bytes(); + for handle in self.handles.values_mut() { + match handle.node { + NodeKind::Card => { + handle.event_queue.push_back(payload.clone()); + } + NodeKind::Connector(id) if id == connector_id => { + handle.event_queue.push_back(payload.clone()); + } + _ => {} + } } } @@ -1394,9 +1422,19 @@ impl SchemeBlockMut for DrmScheme { fn read(&mut self, id: usize, buf: &mut [u8]) -> Result> { let handle = self.handles.get_mut(&id).ok_or_else(|| Error::new(EBADF))?; - let len = handle.response.len().min(buf.len()); - buf[..len].copy_from_slice(&handle.response[..len]); - Ok(Some(len)) + if !handle.response.is_empty() { + let len = handle.response.len().min(buf.len()); + buf[..len].copy_from_slice(&handle.response[..len]); + return Ok(Some(len)); + } + + if let Some(event) = handle.event_queue.pop_front() { + let len = event.len().min(buf.len()); + buf[..len].copy_from_slice(&event[..len]); + return Ok(Some(len)); + } + + Ok(Some(0)) } fn write(&mut self, id: usize, buf: &[u8]) -> Result> { @@ -1428,7 +1466,11 @@ impl SchemeBlockMut for DrmScheme { fn fstat(&mut self, id: usize, stat: &mut Stat) -> Result> { let handle = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?; stat.st_mode = MODE_FILE | 0o666; - stat.st_size = handle.response.len() as u64; + stat.st_size = if !handle.response.is_empty() { + handle.response.len() as u64 + } else { + handle.event_queue.front().map(|payload| payload.len()).unwrap_or(0) as u64 + }; stat.st_blksize = 4096; Ok(Some(0)) } @@ -1441,9 +1483,14 @@ impl SchemeBlockMut for DrmScheme { Err(Error::new(EOPNOTSUPP)) } - fn fevent(&mut self, id: usize, _flags: EventFlags) -> Result> { - let _ = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?; - Ok(Some(EventFlags::empty())) + fn fevent(&mut self, id: usize, flags: EventFlags) -> Result> { + let handle = self.handles.get(&id).ok_or_else(|| Error::new(EBADF))?; + let readiness = if handle.event_queue.is_empty() { + EventFlags::empty() + } else { + flags & EventFlags::EVENT_READ + }; + Ok(Some(readiness)) } fn close(&mut self, id: usize) -> Result> { @@ -1785,6 +1832,13 @@ mod tests { scheme.open("card0", 0, 0, 0).unwrap().unwrap() } + fn open_connector(scheme: &mut DrmScheme, connector_id: u32) -> usize { + scheme + .open(&format!("card0Connector/{connector_id}"), 0, 0, 0) + .unwrap() + .unwrap() + } + fn write_ioctl(scheme: &mut DrmScheme, id: usize, request: usize, payload: &T) -> Result { let mut buf = request.to_le_bytes().to_vec(); buf.extend_from_slice(&bytes_of(payload)); @@ -1998,6 +2052,7 @@ mod tests { #[test] fn non_vblank_driver_event_does_not_retire_pending_page_flip() { let mut scheme = DrmScheme::new(Arc::new(FakeDriver::new(false))); + let card = open_card(&mut scheme); scheme.fb_registry.insert( 9, @@ -2015,6 +2070,73 @@ mod tests { assert_eq!(scheme.pending_flip_fb.get(&1), Some(&(2, 9))); assert!(scheme.fb_registry.contains_key(&9)); + assert_eq!( + scheme.fevent(card, EventFlags::EVENT_READ).unwrap(), + Some(EventFlags::EVENT_READ) + ); + } + + #[test] + fn hotplug_event_is_readable_from_card_handle() { + let mut scheme = DrmScheme::new(Arc::new(FakeDriver::new(false))); + let card = open_card(&mut scheme); + + scheme.handle_driver_event(DriverEvent::Hotplug { connector_id: 7 }); + + assert_eq!( + scheme.fevent(card, EventFlags::EVENT_READ).unwrap(), + Some(EventFlags::EVENT_READ) + ); + + let mut buf = [0u8; 32]; + let len = scheme.read(card, &mut buf).unwrap().unwrap(); + assert_eq!(&buf[..len], b"hotplug:7\n"); + assert_eq!( + scheme.fevent(card, EventFlags::EVENT_READ).unwrap(), + Some(EventFlags::empty()) + ); + } + + #[test] + fn hotplug_event_targets_matching_connector_handle_only() { + let mut scheme = DrmScheme::new(Arc::new(FakeDriver::new(false))); + let connector_a = open_connector(&mut scheme, 1); + let connector_b = open_connector(&mut scheme, 2); + + scheme.handle_driver_event(DriverEvent::Hotplug { connector_id: 2 }); + + assert_eq!( + scheme.fevent(connector_a, EventFlags::EVENT_READ).unwrap(), + Some(EventFlags::empty()) + ); + assert_eq!( + scheme.fevent(connector_b, EventFlags::EVENT_READ).unwrap(), + Some(EventFlags::EVENT_READ) + ); + } + + #[test] + fn vblank_event_is_readable_from_card_handle() { + let mut scheme = DrmScheme::new(Arc::new(FakeDriver::new(false))); + let card = open_card(&mut scheme); + + scheme.handle_driver_event(DriverEvent::Vblank { + crtc_id: 4, + count: 12, + }); + + assert_eq!( + scheme.fevent(card, EventFlags::EVENT_READ).unwrap(), + Some(EventFlags::EVENT_READ) + ); + + let mut buf = [0u8; 32]; + let len = scheme.read(card, &mut buf).unwrap().unwrap(); + assert_eq!(&buf[..len], b"vblank:4:12\n"); + assert_eq!( + scheme.fevent(card, EventFlags::EVENT_READ).unwrap(), + Some(EventFlags::empty()) + ); } #[test] diff --git a/local/recipes/kde/kf6-kcmutils/source/CMakeLists.txt b/local/recipes/kde/kf6-kcmutils/source/CMakeLists.txt index 712cd631..b09896f1 100644 --- a/local/recipes/kde/kf6-kcmutils/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-kcmutils/source/CMakeLists.txt @@ -24,7 +24,7 @@ include(ECMQtDeclareLoggingCategory) include(ECMDeprecationSettings) include(ECMMarkNonGuiExecutable) include(KDEGitCommitHooks) -########include(ECMQmlModule) +####################include(ECMQmlModule) include(CMakeDependentOption) @@ -41,6 +41,18 @@ find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) # shall we use DBus? # enabled per default on Linux & BSD systems @@ -63,7 +75,7 @@ ecm_setup_version(PROJECT VARIABLE_PREFIX KCMUTILS PACKAGE_VERSION_FILE "${CMAKE_CURRENT_BINARY_DIR}/KF6KCMUtilsConfigVersion.cmake" SOVERSION 6) -#######find_package(KF6KIO ${KF_DEP_VERSION} REQUIRED) +###################find_package(KF6KIO ${KF_DEP_VERSION} REQUIRED) find_package(KF6ItemViews ${KF_DEP_VERSION} REQUIRED) find_package(KF6ConfigWidgets ${KF_DEP_VERSION} REQUIRED) find_package(KF6CoreAddons ${KF_DEP_VERSION} REQUIRED) diff --git a/local/recipes/kde/kf6-kcmutils/source/src/CMakeLists.txt b/local/recipes/kde/kf6-kcmutils/source/src/CMakeLists.txt index 4c99dd13..78457f7c 100644 --- a/local/recipes/kde/kf6-kcmutils/source/src/CMakeLists.txt +++ b/local/recipes/kde/kf6-kcmutils/source/src/CMakeLists.txt @@ -14,8 +14,8 @@ ecm_qt_declare_logging_category(kcmutils_logging_STATIC add_subdirectory(core) -#######add_subdirectory(qml) -#######add_subdirectory(quick) +###################add_subdirectory(qml) +###################add_subdirectory(quick) ########### kcmutils ############### set(kcmutils_LIB_SRCS @@ -118,4 +118,4 @@ ecm_qt_install_logging_categories( DESTINATION "${KDE_INSTALL_LOGGINGCATEGORIESDIR}" ) -#######add_subdirectory(kcmshell) +###################add_subdirectory(kcmshell) diff --git a/local/recipes/kde/kf6-kcolorscheme/source/CMakeLists.txt b/local/recipes/kde/kf6-kcolorscheme/source/CMakeLists.txt index e1aee8e3..4dd00e9c 100644 --- a/local/recipes/kde/kf6-kcolorscheme/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-kcolorscheme/source/CMakeLists.txt @@ -34,6 +34,22 @@ find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) set(EXCLUDE_DEPRECATED_BEFORE_AND_AT 0 CACHE STRING "Control the range of deprecated API excluded from the build [default=0].") diff --git a/local/recipes/kde/kf6-kcompletion/source/CMakeLists.txt b/local/recipes/kde/kf6-kcompletion/source/CMakeLists.txt index 8242c8ed..0429834e 100644 --- a/local/recipes/kde/kf6-kcompletion/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-kcompletion/source/CMakeLists.txt @@ -42,6 +42,22 @@ find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(KF6Codecs ${KF_DEP_VERSION} REQUIRED) find_package(KF6Config ${KF_DEP_VERSION} REQUIRED) diff --git a/local/recipes/kde/kf6-kconfigwidgets/source/CMakeLists.txt b/local/recipes/kde/kf6-kconfigwidgets/source/CMakeLists.txt index 9563df95..74d2e9e2 100644 --- a/local/recipes/kde/kf6-kconfigwidgets/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-kconfigwidgets/source/CMakeLists.txt @@ -29,6 +29,22 @@ find_package(Qt6 ${REQUIRED_QT_VERSION} CONFIG REQUIRED Widgets) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) # shall we use DBus? # enabled per default on Linux & BSD systems diff --git a/local/recipes/kde/kf6-kcrash/recipe.toml b/local/recipes/kde/kf6-kcrash/recipe.toml index 8a78c910..356d8343 100644 --- a/local/recipes/kde/kf6-kcrash/recipe.toml +++ b/local/recipes/kde/kf6-kcrash/recipe.toml @@ -49,8 +49,8 @@ QString glRenderer()\ return QString();\ }' "${KCRASH_SRC}" -if ! grep -q "sys/wait.h" "${COOKBOOK_SOURCE}/src/kcrash.cpp"; then - printf "%s\n" "#include " "#include " > /tmp/wait_h.txt +if ! grep -q "" "${COOKBOOK_SOURCE}/src/kcrash.cpp"; then + printf "%s\n" "#include " "#include " "#include " > /tmp/wait_h.txt sed -i "\\//{ r /tmp/wait_h.txt d @@ -78,4 +78,4 @@ for lib in "${COOKBOOK_STAGE}/usr/lib/"libKF6*.so.*; do [ -f "${lib}" ] || continue patchelf --remove-rpath "${lib}" 2>/dev/null || true done -''' \ No newline at end of file +''' diff --git a/local/recipes/kde/kf6-kcrash/source/CMakeLists.txt b/local/recipes/kde/kf6-kcrash/source/CMakeLists.txt index a9f016a6..a01d32e6 100644 --- a/local/recipes/kde/kf6-kcrash/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-kcrash/source/CMakeLists.txt @@ -33,7 +33,7 @@ if(WITH_X11) find_package(X11 REQUIRED) set(HAVE_X11 TRUE) endif() -#######find_package(Qt6Test REQUIRED) +#########################find_package(Qt6Test REQUIRED) include(ECMGenerateExportHeader) include(ECMSetupVersion) include(ECMGenerateHeaders) diff --git a/local/recipes/kde/kf6-kcrash/source/src/kcrash.cpp b/local/recipes/kde/kf6-kcrash/source/src/kcrash.cpp index 5e110df7..80c241bd 100644 --- a/local/recipes/kde/kf6-kcrash/source/src/kcrash.cpp +++ b/local/recipes/kde/kf6-kcrash/source/src/kcrash.cpp @@ -24,8 +24,10 @@ #include #ifndef Q_OS_WIN #include +#include #include #include +#include #include #else #include diff --git a/local/recipes/kde/kf6-kdeclarative/source/CMakeLists.txt b/local/recipes/kde/kf6-kdeclarative/source/CMakeLists.txt index c61caff6..ca2d5837 100644 --- a/local/recipes/kde/kf6-kdeclarative/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-kdeclarative/source/CMakeLists.txt @@ -32,7 +32,7 @@ find_package(KF6GuiAddons ${KF_DEP_VERSION} REQUIRED) if(NOT WIN32 AND NOT APPLE AND NOT ANDROID AND NOT REDOX) -#### find_package(KF6GlobalAccel ${KF_DEP_VERSION} REQUIRED) +################# find_package(KF6GlobalAccel ${KF_DEP_VERSION} REQUIRED) set(HAVE_KGLOBALACCEL TRUE) else() set(HAVE_KGLOBALACCEL FALSE) diff --git a/local/recipes/kde/kf6-kiconthemes/source/CMakeLists.txt b/local/recipes/kde/kf6-kiconthemes/source/CMakeLists.txt index 3baf9189..18fa8a23 100644 --- a/local/recipes/kde/kf6-kiconthemes/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-kiconthemes/source/CMakeLists.txt @@ -25,7 +25,7 @@ include(ECMQtDeclareLoggingCategory) include(ECMDeprecationSettings) include(ECMAddQch) include(CMakeDependentOption) -##############################include(ECMQmlModule) +##############################################include(ECMQmlModule) set(EXCLUDE_DEPRECATED_BEFORE_AND_AT 0 CACHE STRING "Control the range of deprecated API excluded from the build [default=0].") @@ -70,10 +70,26 @@ find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6Svg ${REQUIRED_QT_VERSION} REQUIRED NO_MODULE) if (FALSE) -########################### find_package(Qt6 ${REQUIRED_QT_VERSION} NO_MODULE REQUIRED Qml Quick) +########################################### find_package(Qt6 ${REQUIRED_QT_VERSION} NO_MODULE REQUIRED Qml Quick) endif() # shall we use DBus? diff --git a/local/recipes/kde/kf6-kiconthemes/source/src/CMakeLists.txt b/local/recipes/kde/kf6-kiconthemes/source/src/CMakeLists.txt index a875c008..62f9863f 100644 --- a/local/recipes/kde/kf6-kiconthemes/source/src/CMakeLists.txt +++ b/local/recipes/kde/kf6-kiconthemes/source/src/CMakeLists.txt @@ -2,7 +2,7 @@ configure_file(config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h) add_subdirectory(tools/kiconfinder) if (KICONTHEMES_USE_QTQUICK) -######################### add_subdirectory(qml) +######################################### add_subdirectory(qml) endif() if (APPLE) add_subdirectory(tools/ksvg2icns) diff --git a/local/recipes/kde/kf6-kio/source/src/core/workerinterface.cpp b/local/recipes/kde/kf6-kio/source/src/core/workerinterface.cpp index c92d6543..4b5ff38d 100644 --- a/local/recipes/kde/kf6-kio/source/src/core/workerinterface.cpp +++ b/local/recipes/kde/kf6-kio/source/src/core/workerinterface.cpp @@ -17,6 +17,30 @@ #include +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + +#include + #include #include "usernotificationhandler_p.h" #include "workerbase.h" diff --git a/local/recipes/kde/kf6-kitemviews/source/CMakeLists.txt b/local/recipes/kde/kf6-kitemviews/source/CMakeLists.txt index bb0c5257..2c32e6f0 100644 --- a/local/recipes/kde/kf6-kitemviews/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-kitemviews/source/CMakeLists.txt @@ -30,6 +30,22 @@ find_package(Qt6 ${REQUIRED_QT_VERSION} CONFIG REQUIRED Widgets) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) set(EXCLUDE_DEPRECATED_BEFORE_AND_AT 0 CACHE STRING "Control the range of deprecated API excluded from the build [default=0].") diff --git a/local/recipes/kde/kf6-kjobwidgets/source/CMakeLists.txt b/local/recipes/kde/kf6-kjobwidgets/source/CMakeLists.txt index 36b62944..1fccfbf3 100644 --- a/local/recipes/kde/kf6-kjobwidgets/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-kjobwidgets/source/CMakeLists.txt @@ -35,6 +35,18 @@ find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) if(NOT WIN32 AND NOT APPLE AND NOT ANDROID AND NOT HAIKU) option(WITH_X11 "Build with support for QX11Info::appUserTime()" ON) diff --git a/local/recipes/kde/kf6-kjobwidgets/source/src/CMakeLists.txt b/local/recipes/kde/kf6-kjobwidgets/source/src/CMakeLists.txt index 2bd37a56..ba789cc6 100644 --- a/local/recipes/kde/kf6-kjobwidgets/source/src/CMakeLists.txt +++ b/local/recipes/kde/kf6-kjobwidgets/source/src/CMakeLists.txt @@ -75,7 +75,7 @@ target_link_libraries(KF6JobWidgets KF6::CoreAddons # KJob PRIVATE KF6::WidgetsAddons # KSqueezedTextLabel - ##KF6::Notifications + ##############KF6::Notifications ) if (HAVE_QTDBUS) target_link_libraries(KF6JobWidgets PRIVATE Qt6::DBus) @@ -93,7 +93,7 @@ ecm_generate_headers(KJobWidgets_HEADERS KUiServerV2JobTracker KStatusBarJobTracker KWidgetJobTracker - ##KNotificationJobUiDelegate + ##############KNotificationJobUiDelegate REQUIRED_HEADERS KJobWidgets_HEADERS ) diff --git a/local/recipes/kde/kf6-knotifications/recipe.toml b/local/recipes/kde/kf6-knotifications/recipe.toml index 18a26a34..a3d5d215 100644 --- a/local/recipes/kde/kf6-knotifications/recipe.toml +++ b/local/recipes/kde/kf6-knotifications/recipe.toml @@ -16,6 +16,7 @@ script = """ DYNAMIC_INIT HOST_BUILD="${COOKBOOK_ROOT}/build/qt-host-build" +QDBUSXML2CPP="${HOST_BUILD}/bin/qdbusxml2cpp" for qtdir in plugins mkspecs metatypes modules; do if [ -d "${COOKBOOK_SYSROOT}/usr/${qtdir}" ] && [ ! -e "${COOKBOOK_SYSROOT}/${qtdir}" ]; then @@ -36,9 +37,19 @@ sed -i 's/^include(ECMQmlModule)/#include(ECMQmlModule)/' \ "${COOKBOOK_SOURCE}/CMakeLists.txt" 2>/dev/null || true sed -i 's/^ add_subdirectory(qml)/# add_subdirectory(qml)/' \ "${COOKBOOK_SOURCE}/src/CMakeLists.txt" 2>/dev/null || true -sed -i 's/^#add_subdirectory(src)/add_subdirectory(src)/' \ +sed -i 's/^#\\+add_subdirectory(src)/add_subdirectory(src)/' \ "${COOKBOOK_SOURCE}/CMakeLists.txt" 2>/dev/null || true +if [ -x "${QDBUSXML2CPP}" ]; then + "${QDBUSXML2CPP}" -m -p "${COOKBOOK_SOURCE}/src/notifications_interface" \ + "${COOKBOOK_SOURCE}/src/org.freedesktop.Notifications.xml" + sed -i '/notifications_interface\\.moc/d' "${COOKBOOK_SOURCE}/src/notifications_interface.cpp" +fi +sed -i 's/^ qt_add_dbus_interface(knotifications_dbus_SRCS .*$/ set(knotifications_dbus_SRCS notifications_interface.cpp)/' \ + "${COOKBOOK_SOURCE}/src/CMakeLists.txt" 2>/dev/null || true +sed -i 's/^ target_sources(KF6Notifications PRIVATE ${knotifications_dbus_SRCS})$/ target_sources(KF6Notifications PRIVATE ${knotifications_dbus_SRCS})/' \ + "${COOKBOOK_SOURCE}/src/CMakeLists.txt" 2>/dev/null || true + rm -f CMakeCache.txt rm -rf CMakeFiles diff --git a/local/recipes/kde/kf6-knotifications/source/CMakeLists.txt b/local/recipes/kde/kf6-knotifications/source/CMakeLists.txt index e120aebd..8133f4cf 100644 --- a/local/recipes/kde/kf6-knotifications/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-knotifications/source/CMakeLists.txt @@ -78,52 +78,52 @@ endif() find_package(KF6Config ${KF_DEP_VERSION} REQUIRED) -##################if (NOT APPLE AND NOT ANDROID AND NOT WIN32 AND NOT HAIKU OR (WIN32 AND NOT WITH_SNORETOAST)) -################## find_package(Qt6 ${REQUIRED_QT_VERSION} CONFIG REQUIRED DBus) -################## find_package(Canberra REQUIRED) -################## set_package_properties(Canberra PROPERTIES -################## PURPOSE "Needed to build audio notification support") -################## if (TARGET Canberra::Canberra) -################## add_definitions(-DHAVE_CANBERRA) -################## endif() -##################endif() -################# -################## For the Python bindings -#################find_package(Python3 3.10 COMPONENTS Interpreter Development) -#################find_package(Shiboken6) -#################find_package(PySide6) -################# -################## Python Bindings -#################cmake_dependent_option(BUILD_PYTHON_BINDINGS "Generate Python Bindings" ON "TARGET Shiboken6::libshiboken AND TARGET PySide6::pyside6" OFF) -#################add_feature_info(PYTHON_BINDINGS ${BUILD_PYTHON_BINDINGS} "Python bindings") -################# -################## FreeBSD CI is missing required packages -#################if (CMAKE_SYSTEM_NAME MATCHES FreeBSD) -################# set(BUILD_PYTHON_BINDINGS OFF) -#################endif() -################ -################remove_definitions(-DQT_NO_CAST_FROM_BYTEARRAY) -################ -#################ecm_install_po_files_as_qm(poqm) -################ -################ecm_set_disabled_deprecation_versions( -################ QT 6.8 -################ KF 6.8 -################) -################ -################add_subdirectory(src) -################if (BUILD_TESTING) -################ add_subdirectory(tests) -################ add_subdirectory(autotests) -################ add_subdirectory(examples) -################endif() -############### -###############if (BUILD_PYTHON_BINDINGS) -############### include(ECMGeneratePythonBindings) -############### add_subdirectory(python) -###############endif() -############## -## create a Config.cmake and a ConfigVersion.cmake file and install them +###################################if (NOT APPLE AND NOT ANDROID AND NOT WIN32 AND NOT HAIKU OR (WIN32 AND NOT WITH_SNORETOAST)) +################################### find_package(Qt6 ${REQUIRED_QT_VERSION} CONFIG REQUIRED DBus) +################################### find_package(Canberra REQUIRED) +################################### set_package_properties(Canberra PROPERTIES +################################### PURPOSE "Needed to build audio notification support") +################################### if (TARGET Canberra::Canberra) +################################### add_definitions(-DHAVE_CANBERRA) +################################### endif() +###################################endif() +################################## +################################### For the Python bindings +##################################find_package(Python3 3.10 COMPONENTS Interpreter Development) +##################################find_package(Shiboken6) +##################################find_package(PySide6) +################################## +################################### Python Bindings +##################################cmake_dependent_option(BUILD_PYTHON_BINDINGS "Generate Python Bindings" ON "TARGET Shiboken6::libshiboken AND TARGET PySide6::pyside6" OFF) +##################################add_feature_info(PYTHON_BINDINGS ${BUILD_PYTHON_BINDINGS} "Python bindings") +################################## +################################### FreeBSD CI is missing required packages +##################################if (CMAKE_SYSTEM_NAME MATCHES FreeBSD) +################################## set(BUILD_PYTHON_BINDINGS OFF) +##################################endif() +################################# +#################################remove_definitions(-DQT_NO_CAST_FROM_BYTEARRAY) +################################# +##################################ecm_install_po_files_as_qm(poqm) +################################# +#################################ecm_set_disabled_deprecation_versions( +################################# QT 6.8 +################################# KF 6.8 +#################################) +################################# +add_subdirectory(src) +#################################if (BUILD_TESTING) +################################# add_subdirectory(tests) +################################# add_subdirectory(autotests) +################################# add_subdirectory(examples) +#################################endif() +################################ +################################if (BUILD_PYTHON_BINDINGS) +################################ include(ECMGeneratePythonBindings) +################################ add_subdirectory(python) +################################endif() +############################### +################### create a Config.cmake and a ConfigVersion.cmake file and install them set(CMAKECONFIG_INSTALL_DIR "${KDE_INSTALL_CMAKEPACKAGEDIR}/KF6Notifications") if (BUILD_QCH) diff --git a/local/recipes/kde/kf6-knotifications/source/src/CMakeLists.txt b/local/recipes/kde/kf6-knotifications/source/src/CMakeLists.txt index 4d6746f2..079cb124 100644 --- a/local/recipes/kde/kf6-knotifications/source/src/CMakeLists.txt +++ b/local/recipes/kde/kf6-knotifications/source/src/CMakeLists.txt @@ -62,7 +62,7 @@ endif() if (HAVE_DBUS) set(notifications_xml org.freedesktop.Notifications.xml) - qt_add_dbus_interface(knotifications_dbus_SRCS ${notifications_xml} notifications_interface) + set(knotifications_dbus_SRCS notifications_interface.cpp) target_sources(KF6Notifications PRIVATE ${knotifications_dbus_SRCS}) endif() diff --git a/local/recipes/kde/kf6-knotifications/source/src/notifications_interface.cpp b/local/recipes/kde/kf6-knotifications/source/src/notifications_interface.cpp new file mode 100644 index 00000000..30370e76 --- /dev/null +++ b/local/recipes/kde/kf6-knotifications/source/src/notifications_interface.cpp @@ -0,0 +1,27 @@ +/* + * This file was generated by qdbusxml2cpp version 0.8 + * Command line was: qdbusxml2cpp -m -p /mnt/data/homes/kellito/Builds/rbos/local/recipes/kde/kf6-knotifications/source/src/notifications_interface /mnt/data/homes/kellito/Builds/rbos/local/recipes/kde/kf6-knotifications/source/src/org.freedesktop.Notifications.xml + * + * qdbusxml2cpp is Copyright (C) 2023 The Qt Company Ltd. + * + * This is an auto-generated file. + * This file may have been hand-edited. Look for HAND-EDIT comments + * before re-generating it. + */ + +#include "/mnt/data/homes/kellito/Builds/rbos/local/recipes/kde/kf6-knotifications/source/src/notifications_interface.h" + +/* + * Implementation of interface class OrgFreedesktopNotificationsInterface + */ + +OrgFreedesktopNotificationsInterface::OrgFreedesktopNotificationsInterface(const QString &service, const QString &path, const QDBusConnection &connection, QObject *parent) + : QDBusAbstractInterface(service, path, staticInterfaceName(), connection, parent) +{ +} + +OrgFreedesktopNotificationsInterface::~OrgFreedesktopNotificationsInterface() +{ +} + + diff --git a/local/recipes/kde/kf6-knotifications/source/src/notifications_interface.h b/local/recipes/kde/kf6-knotifications/source/src/notifications_interface.h new file mode 100644 index 00000000..e8358a6e --- /dev/null +++ b/local/recipes/kde/kf6-knotifications/source/src/notifications_interface.h @@ -0,0 +1,106 @@ +/* + * This file was generated by qdbusxml2cpp version 0.8 + * Command line was: qdbusxml2cpp -m -p /mnt/data/homes/kellito/Builds/rbos/local/recipes/kde/kf6-knotifications/source/src/notifications_interface /mnt/data/homes/kellito/Builds/rbos/local/recipes/kde/kf6-knotifications/source/src/org.freedesktop.Notifications.xml + * + * qdbusxml2cpp is Copyright (C) 2023 The Qt Company Ltd. + * + * This is an auto-generated file. + * Do not edit! All changes made to it will be lost. + */ + +#ifndef NOTIFICATIONS_INTERFACE_H +#define NOTIFICATIONS_INTERFACE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Proxy class for interface org.freedesktop.Notifications + */ +class OrgFreedesktopNotificationsInterface: public QDBusAbstractInterface +{ + Q_OBJECT +public: + static inline const char *staticInterfaceName() + { return "org.freedesktop.Notifications"; } + +public: + OrgFreedesktopNotificationsInterface(const QString &service, const QString &path, const QDBusConnection &connection, QObject *parent = nullptr); + + ~OrgFreedesktopNotificationsInterface(); + + Q_PROPERTY(bool Inhibited READ inhibited) + inline bool inhibited() const + { return qvariant_cast< bool >(property("Inhibited")); } + +public Q_SLOTS: // METHODS + inline QDBusPendingReply<> CloseNotification(uint id) + { + QList argumentList; + argumentList << QVariant::fromValue(id); + return asyncCallWithArgumentList(QStringLiteral("CloseNotification"), argumentList); + } + + inline QDBusPendingReply GetCapabilities() + { + QList argumentList; + return asyncCallWithArgumentList(QStringLiteral("GetCapabilities"), argumentList); + } + + inline QDBusPendingReply GetServerInformation() + { + QList argumentList; + return asyncCallWithArgumentList(QStringLiteral("GetServerInformation"), argumentList); + } + inline QDBusReply GetServerInformation(QString &vendor, QString &version, QString &spec_version) + { + QList argumentList; + QDBusMessage reply = callWithArgumentList(QDBus::Block, QStringLiteral("GetServerInformation"), argumentList); + if (reply.type() == QDBusMessage::ReplyMessage && reply.arguments().count() == 4) { + vendor = qdbus_cast(reply.arguments().at(1)); + version = qdbus_cast(reply.arguments().at(2)); + spec_version = qdbus_cast(reply.arguments().at(3)); + } + return reply; + } + + inline QDBusPendingReply Inhibit(const QString &desktop_entry, const QString &reason, const QVariantMap &hints) + { + QList argumentList; + argumentList << QVariant::fromValue(desktop_entry) << QVariant::fromValue(reason) << QVariant::fromValue(hints); + return asyncCallWithArgumentList(QStringLiteral("Inhibit"), argumentList); + } + + inline QDBusPendingReply Notify(const QString &app_name, uint replaces_id, const QString &app_icon, const QString &summary, const QString &body, const QStringList &actions, const QVariantMap &hints, int timeout) + { + QList argumentList; + argumentList << QVariant::fromValue(app_name) << QVariant::fromValue(replaces_id) << QVariant::fromValue(app_icon) << QVariant::fromValue(summary) << QVariant::fromValue(body) << QVariant::fromValue(actions) << QVariant::fromValue(hints) << QVariant::fromValue(timeout); + return asyncCallWithArgumentList(QStringLiteral("Notify"), argumentList); + } + + inline QDBusPendingReply<> UnInhibit(uint in0) + { + QList argumentList; + argumentList << QVariant::fromValue(in0); + return asyncCallWithArgumentList(QStringLiteral("UnInhibit"), argumentList); + } + +Q_SIGNALS: // SIGNALS + void ActionInvoked(uint id, const QString &action_key); + void ActivationToken(uint id, const QString &activation_token); + void NotificationClosed(uint id, uint reason); + void NotificationReplied(uint id, const QString &text); +}; + +namespace org { + namespace freedesktop { + typedef ::OrgFreedesktopNotificationsInterface Notifications; + } +} +#endif diff --git a/local/recipes/kde/kf6-ktextwidgets/source/CMakeLists.txt b/local/recipes/kde/kf6-ktextwidgets/source/CMakeLists.txt index 27d842d1..73d4d7d6 100644 --- a/local/recipes/kde/kf6-ktextwidgets/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-ktextwidgets/source/CMakeLists.txt @@ -44,6 +44,22 @@ find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) +find_package(Qt6GuiPrivate ${REQUIRED_QT_VERSION} REQUIRED) if (WITH_TEXT_TO_SPEECH) find_package(Qt6 ${REQUIRED_QT_VERSION} CONFIG REQUIRED TextToSpeech) diff --git a/local/recipes/kde/kf6-kwayland/source/CMakeLists.txt b/local/recipes/kde/kf6-kwayland/source/CMakeLists.txt index 33ac2e04..d672bec6 100644 --- a/local/recipes/kde/kf6-kwayland/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-kwayland/source/CMakeLists.txt @@ -52,6 +52,18 @@ find_package(Qt6WaylandClientPrivate REQUIRED) find_package(Qt6WaylandClientPrivate REQUIRED) find_package(Qt6WaylandClientPrivate REQUIRED) find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) +find_package(Qt6WaylandClientPrivate REQUIRED) set_package_properties(Wayland PROPERTIES TYPE REQUIRED ) diff --git a/local/recipes/kde/kf6-kxmlgui/source/src/CMakeLists.txt b/local/recipes/kde/kf6-kxmlgui/source/src/CMakeLists.txt index 60f248e2..6f149f64 100644 --- a/local/recipes/kde/kf6-kxmlgui/source/src/CMakeLists.txt +++ b/local/recipes/kde/kf6-kxmlgui/source/src/CMakeLists.txt @@ -99,7 +99,7 @@ PUBLIC PRIVATE #QNetworkAccessManager in kaboutapplicationpersonmodel_p #QPrinter in kshortcutseditor -### Qt6::CorePrivate #QSystemLocale in initializeLanguages +################ Qt6::CorePrivate #QSystemLocale in initializeLanguages KF6::CoreAddons #KAboutData KF6::GuiAddons KF6::WidgetsAddons diff --git a/local/recipes/kde/kf6-kxmlgui/source/src/kswitchlanguagedialog_p.cpp b/local/recipes/kde/kf6-kxmlgui/source/src/kswitchlanguagedialog_p.cpp index 7edfc5d0..0cd2a78b 100644 --- a/local/recipes/kde/kf6-kxmlgui/source/src/kswitchlanguagedialog_p.cpp +++ b/local/recipes/kde/kf6-kxmlgui/source/src/kswitchlanguagedialog_p.cpp @@ -75,10 +75,10 @@ void initializeLanguages() // Ideally setting the LANGUAGE would change the default QLocale too // but unfortunately this is too late since the QCoreApplication constructor // already created a QLocale at this stage so we need to set the reset it -////// // by triggering the creation and destruction of a QSystemLocale +//////////////////////////////// // by triggering the creation and destruction of a QSystemLocale // this is highly dependent on Qt internals, so may break, but oh well -////// QSystemLocale *dummy = new QSystemLocale(); -////// delete dummy; +//////////////////////////////// QSystemLocale *dummy = new QSystemLocale(); +//////////////////////////////// delete dummy; } } diff --git a/local/recipes/kde/kf6-solid/source/CMakeLists.txt b/local/recipes/kde/kf6-solid/source/CMakeLists.txt index 4d50e335..9f3f728e 100644 --- a/local/recipes/kde/kf6-solid/source/CMakeLists.txt +++ b/local/recipes/kde/kf6-solid/source/CMakeLists.txt @@ -78,7 +78,7 @@ set_package_properties(PList PROPERTIES if (CMAKE_SYSTEM_NAME MATCHES Linux) # Used by the UDisks backend on Linux - ###find_package(LibMount) + ################find_package(LibMount) set_package_properties(LibMount PROPERTIES TYPE REQUIRED) endif() diff --git a/local/recipes/kde/kwin/inspect/kwin-v6.3.4/src/plugins/CMakeLists.txt b/local/recipes/kde/kwin/inspect/kwin-v6.3.4/src/plugins/CMakeLists.txt index 0b193dea..a2240cad 100644 --- a/local/recipes/kde/kwin/inspect/kwin-v6.3.4/src/plugins/CMakeLists.txt +++ b/local/recipes/kde/kwin/inspect/kwin-v6.3.4/src/plugins/CMakeLists.txt @@ -45,6 +45,8 @@ function(kwin_add_script name source) file(COPY ${source}/contents ${source}/metadata.json DESTINATION ${CMAKE_BINARY_DIR}/bin/kwin/scripts/${name}) endfunction() +add_subdirectory(qpa) + add_subdirectory(idletime) if (KWIN_BUILD_EFFECTS) diff --git a/local/recipes/kde/kwin/recipe.toml b/local/recipes/kde/kwin/recipe.toml index 47106199..e91e875b 100644 --- a/local/recipes/kde/kwin/recipe.toml +++ b/local/recipes/kde/kwin/recipe.toml @@ -133,9 +133,9 @@ if [ -f "${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage.tmp/usr/inc "${COOKBOOK_ROOT}/recipes/wip/qt/qtdeclarative/target/${TARGET}/sysroot/include/stdlib.h" cp -f "${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage.tmp/usr/include/stdlib.h" \ "${COOKBOOK_ROOT}/recipes/wip/qt/qtdeclarative/target/${TARGET}/sysroot/usr/include/stdlib.h" 2>/dev/null || true - sed -i '/^long double strtold(const char \\*nptr, char \\*\\*endptr);$/d' \ + sed -i '/strtold[[:space:]]*(/d' \ "${COOKBOOK_ROOT}/recipes/wip/qt/qtdeclarative/target/${TARGET}/sysroot/include/stdlib.h" 2>/dev/null || true - sed -i '/^long double strtold(const char \\*nptr, char \\*\\*endptr);$/d' \ + sed -i '/strtold[[:space:]]*(/d' \ "${COOKBOOK_ROOT}/recipes/wip/qt/qtdeclarative/target/${TARGET}/sysroot/usr/include/stdlib.h" 2>/dev/null || true fi @@ -159,7 +159,7 @@ fi mkdir -p "${COOKBOOK_SYSROOT}/include/QtGui/private" "${COOKBOOK_SYSROOT}/include/QtCore/private" mkdir -p "${COOKBOOK_SYSROOT}/usr/include/QtGui/private" "${COOKBOOK_SYSROOT}/usr/include/QtCore/private" -for hdr in "${COOKBOOK_ROOT}"/recipes/wip/qt/qtbase/source/src/corelib/global/*_p.h; do +find "${COOKBOOK_ROOT}/recipes/wip/qt/qtbase/source/src/corelib" -name '*_p.h' | while read -r hdr; do [ -f "$hdr" ] || continue base=$(basename "$hdr") ln -sf "$hdr" "${COOKBOOK_SYSROOT}/include/QtCore/private/$base" @@ -170,7 +170,17 @@ if [ -f "${qt_qconfig_priv}" ]; then ln -sf "${qt_qconfig_priv}" "${COOKBOOK_SYSROOT}/include/QtCore/private/qconfig_p.h" ln -sf "${qt_qconfig_priv}" "${COOKBOOK_SYSROOT}/usr/include/QtCore/private/qconfig_p.h" fi -for hdr in "${COOKBOOK_ROOT}"/recipes/wip/qt/qtbase/source/src/gui/kernel/*_p.h "${COOKBOOK_ROOT}"/recipes/wip/qt/qtbase/source/src/gui/platform/unix/*_p.h; do +qt_qtcore_config_priv="${COOKBOOK_ROOT}/recipes/wip/qt/qtdeclarative/target/${TARGET}/sysroot/usr/include/QtCore/6.11.0/QtCore/private/qtcore-config_p.h" +if [ -f "${qt_qtcore_config_priv}" ]; then + ln -sf "${qt_qtcore_config_priv}" "${COOKBOOK_SYSROOT}/include/QtCore/private/qtcore-config_p.h" + ln -sf "${qt_qtcore_config_priv}" "${COOKBOOK_SYSROOT}/usr/include/QtCore/private/qtcore-config_p.h" +fi +qt_qtgui_config_priv="${COOKBOOK_ROOT}/recipes/wip/qt/qtdeclarative/target/${TARGET}/sysroot/usr/include/QtGui/6.11.0/QtGui/private/qtgui-config_p.h" +if [ -f "${qt_qtgui_config_priv}" ]; then + ln -sf "${qt_qtgui_config_priv}" "${COOKBOOK_SYSROOT}/include/QtGui/private/qtgui-config_p.h" + ln -sf "${qt_qtgui_config_priv}" "${COOKBOOK_SYSROOT}/usr/include/QtGui/private/qtgui-config_p.h" +fi +find "${COOKBOOK_ROOT}/recipes/wip/qt/qtbase/source/src/gui" -name '*_p.h' | while read -r hdr; do [ -f "$hdr" ] || continue base=$(basename "$hdr") ln -sf "$hdr" "${COOKBOOK_SYSROOT}/include/QtGui/private/$base" @@ -394,5 +404,16 @@ PY cmake --build . -j"${COOKBOOK_MAKE_JOBS}" cmake --install . --prefix "${COOKBOOK_STAGE}/usr" -find "${COOKBOOK_STAGE}" -name '*.so*' -exec patchelf --remove-rpath {} ";" 2>/dev/null || true +find "${COOKBOOK_STAGE}/usr/lib" -name '*.so*' -exec patchelf --remove-rpath {} ";" 2>/dev/null || true +find "${COOKBOOK_STAGE}/usr/plugins" -name '*.so' -exec patchelf --set-rpath '$ORIGIN/../../lib' {} + 2>/dev/null || true +for bin in "${COOKBOOK_STAGE}/usr/bin/kwin_wayland" "${COOKBOOK_STAGE}/usr/bin/kwin_wayland_wrapper"; do + [ -f "${bin}" ] || continue + patchelf --set-rpath '$ORIGIN/../lib' "${bin}" 2>/dev/null || true +done """ + +[package] +dependencies = [ + "fontconfig", + "freetype2", +] diff --git a/local/recipes/kde/kwin/source/src/core/syncobjtimeline.cpp b/local/recipes/kde/kwin/source/src/core/syncobjtimeline.cpp index cb8296a8..cf7c21b6 100644 --- a/local/recipes/kde/kwin/source/src/core/syncobjtimeline.cpp +++ b/local/recipes/kde/kwin/source/src/core/syncobjtimeline.cpp @@ -6,7 +6,45 @@ #include "syncobjtimeline.h" #include +#ifdef __redox__ +#include +#include + +#ifndef EFD_CLOEXEC +#define EFD_CLOEXEC O_CLOEXEC +#endif + +#ifndef EFD_NONBLOCK +#define EFD_NONBLOCK O_NONBLOCK +#endif + +#ifndef EFD_SEMAPHORE +#define EFD_SEMAPHORE 0x1 +#endif + +static int eventfd(unsigned int initval, int flags) +{ + const int supported = EFD_CLOEXEC | EFD_NONBLOCK | EFD_SEMAPHORE; + int oflag = O_RDWR; + char path[64]; + + if ((flags & ~supported) != 0) { + errno = EINVAL; + return -1; + } + if (flags & EFD_CLOEXEC) { + oflag |= O_CLOEXEC; + } + if (flags & EFD_NONBLOCK) { + oflag |= O_NONBLOCK; + } + + snprintf(path, sizeof(path), "/scheme/event/eventfd/%u/%d", initval, (flags & EFD_SEMAPHORE) ? 1 : 0); + return open(path, oflag); +} +#else #include +#endif #include #include diff --git a/local/recipes/kde/kwin/source/src/plugins/CMakeLists.txt b/local/recipes/kde/kwin/source/src/plugins/CMakeLists.txt index 0b193dea..a2240cad 100644 --- a/local/recipes/kde/kwin/source/src/plugins/CMakeLists.txt +++ b/local/recipes/kde/kwin/source/src/plugins/CMakeLists.txt @@ -45,6 +45,8 @@ function(kwin_add_script name source) file(COPY ${source}/contents ${source}/metadata.json DESTINATION ${CMAKE_BINARY_DIR}/bin/kwin/scripts/${name}) endfunction() +add_subdirectory(qpa) + add_subdirectory(idletime) if (KWIN_BUILD_EFFECTS) diff --git a/local/recipes/kde/kwin/source/src/plugins/qpa/CMakeLists.txt b/local/recipes/kde/kwin/source/src/plugins/qpa/CMakeLists.txt index 62b7a12a..ff7c140f 100644 --- a/local/recipes/kde/kwin/source/src/plugins/qpa/CMakeLists.txt +++ b/local/recipes/kde/kwin/source/src/plugins/qpa/CMakeLists.txt @@ -1,5 +1,4 @@ -add_library(KWinQpaPlugin OBJECT) -target_sources(KWinQpaPlugin PRIVATE +add_library(KWinQpaPlugin MODULE backingstore.cpp clipboard.cpp eglhelpers.cpp @@ -13,6 +12,13 @@ target_sources(KWinQpaPlugin PRIVATE window.cpp ) +set_target_properties(KWinQpaPlugin PROPERTIES + AUTOMOC ON + LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/plugins/platforms" + RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/plugins/platforms" + OUTPUT_NAME "qwayland-org.kde.kwin.qpa" +) + ecm_qt_declare_logging_category(KWinQpaPlugin HEADER logging.h IDENTIFIER KWIN_QPA @@ -20,8 +26,6 @@ ecm_qt_declare_logging_category(KWinQpaPlugin DEFAULT_SEVERITY Critical ) -target_compile_definitions(KWinQpaPlugin PRIVATE QT_STATICPLUGIN) - target_link_libraries(KWinQpaPlugin PRIVATE Qt::Concurrent Qt::CorePrivate @@ -30,3 +34,5 @@ target_link_libraries(KWinQpaPlugin PRIVATE Fontconfig::Fontconfig kwin ) + +install(TARGETS KWinQpaPlugin DESTINATION plugins/platforms) diff --git a/local/recipes/kde/kwin/source/src/plugins/qpa/integration.cpp b/local/recipes/kde/kwin/source/src/plugins/qpa/integration.cpp index d6c156b3..b0b2cc43 100644 --- a/local/recipes/kde/kwin/source/src/plugins/qpa/integration.cpp +++ b/local/recipes/kde/kwin/source/src/plugins/qpa/integration.cpp @@ -137,7 +137,7 @@ QPlatformWindow *Integration::createPlatformWindow(QWindow *window) const QPlatformOffscreenSurface *Integration::createPlatformOffscreenSurface(QOffscreenSurface *surface) const { - return new OffscreenSurface(surface); + return new KWin::QPA::OffscreenSurface(surface); } QPlatformFontDatabase *Integration::fontDatabase() const diff --git a/local/recipes/qt/redox-toolchain.cmake b/local/recipes/qt/redox-toolchain.cmake index e26e459e..c5a4498f 100644 --- a/local/recipes/qt/redox-toolchain.cmake +++ b/local/recipes/qt/redox-toolchain.cmake @@ -101,6 +101,23 @@ set(CMAKE_PREFIX_PATH "${COOKBOOK_SYSROOT}") set(CMAKE_LIBRARY_PATH "${COOKBOOK_SYSROOT}/lib") set(CMAKE_INCLUDE_PATH "${COOKBOOK_SYSROOT}/include") +if(DEFINED ENV{COOKBOOK_SYSROOT} AND EXISTS "$ENV{COOKBOOK_SYSROOT}/lib") + set(_redbear_sysroot_link_flags "-L$ENV{COOKBOOK_SYSROOT}/lib -Wl,-rpath-link,$ENV{COOKBOOK_SYSROOT}/lib") + set(CMAKE_EXE_LINKER_FLAGS_INIT "${CMAKE_EXE_LINKER_FLAGS_INIT} ${_redbear_sysroot_link_flags}") + set(CMAKE_SHARED_LINKER_FLAGS_INIT "${CMAKE_SHARED_LINKER_FLAGS_INIT} ${_redbear_sysroot_link_flags}") + set(CMAKE_MODULE_LINKER_FLAGS_INIT "${CMAKE_MODULE_LINKER_FLAGS_INIT} ${_redbear_sysroot_link_flags}") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${_redbear_sysroot_link_flags}" CACHE STRING "" FORCE) + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${_redbear_sysroot_link_flags}" CACHE STRING "" FORCE) + set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${_redbear_sysroot_link_flags}" CACHE STRING "" FORCE) +endif() + +if(DEFINED ENV{COOKBOOK_SYSROOT} AND EXISTS "$ENV{COOKBOOK_SYSROOT}/lib/libredbear-qt-strtold-compat.so") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed -L$ENV{COOKBOOK_SYSROOT}/lib -lredbear-qt-strtold-compat" CACHE STRING "" FORCE) + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-as-needed -L$ENV{COOKBOOK_SYSROOT}/lib -lredbear-qt-strtold-compat" CACHE STRING "" FORCE) + set(CMAKE_C_STANDARD_LIBRARIES_INIT "${CMAKE_C_STANDARD_LIBRARIES_INIT} -Wl,--no-as-needed -L$ENV{COOKBOOK_SYSROOT}/lib -lredbear-qt-strtold-compat") + set(CMAKE_CXX_STANDARD_LIBRARIES_INIT "${CMAKE_CXX_STANDARD_LIBRARIES_INIT} -Wl,--no-as-needed -L$ENV{COOKBOOK_SYSROOT}/lib -lredbear-qt-strtold-compat") +endif() + # Install prefix — matches the cookbook convention (see cookbook_cmake in script.rs) set(CMAKE_INSTALL_PREFIX "/usr") diff --git a/local/recipes/system/iommu/source/src/main.rs b/local/recipes/system/iommu/source/src/main.rs index 4f734f11..fd05c095 100644 --- a/local/recipes/system/iommu/source/src/main.rs +++ b/local/recipes/system/iommu/source/src/main.rs @@ -28,6 +28,7 @@ struct DiscoveryResult { source: DiscoverySource, kernel_acpi_status: &'static str, ivrs_path: Option, + dmar_present: bool, } #[cfg_attr(not(target_os = "redox"), allow(dead_code))] @@ -141,7 +142,7 @@ fn read_sdt_from_physical(phys_addr: u64) -> Result, String> { } #[cfg(target_os = "redox")] -fn detect_units_from_kernel_acpi() -> Result, String> { +fn find_kernel_acpi_table(signature: &[u8; 4]) -> Result>, String> { let rxsdt = match fs::read("/scheme/kernel.acpi/rxsdt") { Ok(bytes) => bytes, Err(err) => { @@ -150,14 +151,14 @@ fn detect_units_from_kernel_acpi() -> Result, String> { }; if rxsdt.len() < ACPI_HEADER_LEN { - return Ok(Vec::new()); + return Ok(None); } - let signature = &rxsdt[0..4]; - let entry_size = match signature { + let root_signature = &rxsdt[0..4]; + let entry_size = match root_signature { b"RSDT" => 4, b"XSDT" => 8, - _ => return Ok(Vec::new()), + _ => return Ok(None), }; let mut offset = ACPI_HEADER_LEN; @@ -169,24 +170,46 @@ fn detect_units_from_kernel_acpi() -> Result, String> { }; let table = read_sdt_from_physical(phys_addr)?; - if table.len() >= 4 && &table[0..4] == b"IVRS" { - return AmdViUnit::detect(&table).map_err(|err| format!("failed to parse IVRS: {err}")); + if table.len() >= 4 && &table[0..4] == signature { + return Ok(Some(table)); } offset += entry_size; } - Ok(Vec::new()) + Ok(None) +} + +#[cfg(target_os = "redox")] +fn detect_units_from_kernel_acpi() -> Result, String> { + match find_kernel_acpi_table(b"IVRS")? { + Some(table) => AmdViUnit::detect(&table).map_err(|err| format!("failed to parse IVRS: {err}")), + None => Ok(Vec::new()), + } +} + +#[cfg(target_os = "redox")] +fn detect_dmar_from_kernel_acpi() -> Result { + Ok(find_kernel_acpi_table(b"DMAR")?.is_some()) } #[cfg(target_os = "redox")] fn discover_units() -> Result { + let dmar_present = match detect_dmar_from_kernel_acpi() { + Ok(present) => present, + Err(err) => { + info!("iommu: kernel ACPI DMAR discovery unavailable: {err}"); + false + } + }; + match detect_units_from_kernel_acpi() { Ok(units) if !units.is_empty() => Ok(DiscoveryResult { units, source: DiscoverySource::KernelAcpi, kernel_acpi_status: "ok", ivrs_path: None, + dmar_present, }), Ok(_units) => { let (units, ivrs_path) = detect_units_from_discovered_ivrs()?; @@ -199,6 +222,7 @@ fn discover_units() -> Result { units, kernel_acpi_status: "empty", ivrs_path, + dmar_present, }) } Err(err) => { @@ -213,6 +237,7 @@ fn discover_units() -> Result { units, kernel_acpi_status: "error", ivrs_path, + dmar_present, }) } } @@ -230,6 +255,7 @@ fn discover_units() -> Result { units, kernel_acpi_status: "unsupported", ivrs_path, + dmar_present: false, }) } @@ -254,6 +280,11 @@ fn run() -> Result<(), String> { discovery.source.as_str() ); } + if discovery.dmar_present { + info!( + "iommu: detected kernel ACPI DMAR table; Intel VT-d runtime ownership should converge here rather than remain in acpid" + ); + } for (index, unit) in discovery.units.iter().enumerate() { info!( "iommu: discovered unit {} at MMIO {:#x}; initialization is deferred until first use", @@ -308,6 +339,7 @@ fn run_self_test() -> Result<(), String> { println!("discovery_source={}", discovery.source.as_str()); println!("kernel_acpi_status={}", discovery.kernel_acpi_status); + println!("dmar_present={}", if discovery.dmar_present { 1 } else { 0 }); println!( "ivrs_path={}", discovery @@ -430,4 +462,10 @@ mod tests { assert_eq!(DiscoverySource::Filesystem.as_str(), "filesystem"); assert_eq!(DiscoverySource::None.as_str(), "none"); } + + #[test] + fn host_discovery_defaults_to_no_dmar() { + let discovery = super::discover_units().expect("host discovery should succeed"); + assert!(!discovery.dmar_present); + } } diff --git a/local/recipes/system/redbear-authd/recipe.toml b/local/recipes/system/redbear-authd/recipe.toml new file mode 100644 index 00000000..0e4bfe8b --- /dev/null +++ b/local/recipes/system/redbear-authd/recipe.toml @@ -0,0 +1,8 @@ +[source] +path = "source" + +[build] +template = "cargo" + +[package.files] +"/usr/bin/redbear-authd" = "redbear-authd" diff --git a/local/recipes/system/redbear-authd/source/Cargo.toml b/local/recipes/system/redbear-authd/source/Cargo.toml new file mode 100644 index 00000000..6eb1c16c --- /dev/null +++ b/local/recipes/system/redbear-authd/source/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "redbear-authd" +version = "0.1.0" +edition = "2024" + +[[bin]] +name = "redbear-authd" +path = "src/main.rs" + +[dependencies] +serde = { version = "1", features = ["derive"] } +serde_json = "1" +# Pure-Rust SHA-256/SHA-512 crypt verifier for /etc/shadow entries. +# Free/open-source (`MIT OR Apache-2.0` upstream; acceptable under the project's free-software policy). +sha-crypt = "0.6.0-rc.4" diff --git a/local/recipes/system/redbear-authd/source/src/main.rs b/local/recipes/system/redbear-authd/source/src/main.rs new file mode 100644 index 00000000..f186c63e --- /dev/null +++ b/local/recipes/system/redbear-authd/source/src/main.rs @@ -0,0 +1,741 @@ +use std::{ + collections::HashMap, + env, + fs, + io::{BufRead, BufReader, Write}, + os::unix::{fs::PermissionsExt, net::{UnixListener, UnixStream}}, + path::Path, + process::{self, Command}, + sync::{Arc, Mutex}, + time::{Duration, Instant}, +}; + +use serde::{Deserialize, Serialize}; +use sha_crypt::{PasswordVerifier, ShaCrypt}; + +#[derive(Debug, PartialEq, Eq)] +enum VerifyError { + UnsupportedHashFormat, +} + +const AUTH_SOCKET_PATH: &str = "/run/redbear-authd.sock"; +const SESSIOND_SOCKET_PATH: &str = "/run/redbear-sessiond-control.sock"; +const FAILURE_WINDOW: Duration = Duration::from_secs(60); +const LOCKOUT_DURATION: Duration = Duration::from_secs(30); + +#[derive(Clone, Debug)] +struct Account { + username: String, + password: String, + uid: u32, + shell: String, +} + +#[derive(Clone, Debug)] +struct Approval { + expires_at: Instant, + vt: u32, +} + +#[derive(Clone, Debug, Default)] +struct FailureState { + attempts: Vec, + locked_until: Option, +} + +#[derive(Clone, Debug, Default)] +struct RuntimeState { + approvals: Arc>>, + failures: Arc>>, +} + +#[derive(Debug, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum AuthRequest { + Authenticate { + request_id: u64, + username: String, + password: String, + vt: u32, + }, + StartSession { + request_id: u64, + username: String, + session: String, + vt: u32, + }, + PowerAction { + request_id: u64, + action: String, + }, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum AuthResponse { + AuthenticateResult { + request_id: u64, + ok: bool, + message: String, + }, + SessionResult { + request_id: u64, + ok: bool, + exit_code: Option, + message: String, + }, + PowerResult { + request_id: u64, + ok: bool, + message: String, + }, + Error { + message: String, + }, +} + +#[derive(Debug, Serialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum SessiondUpdate { + SetSession { + username: String, + uid: u32, + vt: u32, + leader: u32, + state: String, + }, + ResetSession { + vt: u32, + }, +} + +fn usage() -> &'static str { + "Usage: redbear-authd [--help]" +} + +fn parse_args() -> Result<(), String> { + let mut args = env::args().skip(1); + match args.next() { + None => Ok(()), + Some(arg) if arg == "--help" || arg == "-h" => Err(String::new()), + Some(arg) => Err(format!("unrecognized argument '{arg}'")), + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum AccountFormat { + Redox, + Unix, +} + +fn split_account_fields(line: &str) -> (AccountFormat, Vec) { + let format = if line.contains(';') { + AccountFormat::Redox + } else { + AccountFormat::Unix + }; + let delimiter = match format { + AccountFormat::Redox => ';', + AccountFormat::Unix => ':', + }; + (format, line.split(delimiter).map(str::to_string).collect::>()) +} + +fn load_shadow_passwords() -> Result, String> { + if !Path::new("/etc/shadow").exists() { + return Ok(HashMap::new()); + } + + let mut passwords = HashMap::new(); + let contents = fs::read_to_string("/etc/shadow") + .map_err(|err| format!("failed to read /etc/shadow: {err}"))?; + for (index, raw_line) in contents.lines().enumerate() { + let line = raw_line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + + let (_format, parts) = split_account_fields(line); + if parts.len() < 2 { + return Err(format!("invalid shadow entry on line {}", index + 1)); + } + passwords.insert(parts[0].clone(), parts[1].clone()); + } + Ok(passwords) +} + +fn load_account(username: &str) -> Result { + let shadow_passwords = load_shadow_passwords()?; + let contents = fs::read_to_string("/etc/passwd") + .map_err(|err| format!("failed to read /etc/passwd: {err}"))?; + for (index, raw_line) in contents.lines().enumerate() { + let line = raw_line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + + let (format, parts) = split_account_fields(line); + if parts[0] != username { + continue; + } + + let (uid_index, gid_index, shell_index, passwd_index) = match format { + AccountFormat::Redox if parts.len() >= 6 => (1, 2, 5, None), + AccountFormat::Unix if parts.len() >= 7 => (2, 3, 6, Some(1)), + AccountFormat::Redox => { + return Err(format!("invalid Redox passwd entry for user '{username}' on line {}", index + 1)) + } + AccountFormat::Unix => { + return Err(format!("invalid passwd entry for user '{username}' on line {}", index + 1)) + } + }; + + let uid = parts[uid_index] + .parse::() + .map_err(|_| format!("invalid uid for user '{username}'"))?; + let _gid = parts[gid_index] + .parse::() + .map_err(|_| format!("invalid gid for user '{username}'"))?; + let password = shadow_passwords + .get(username) + .cloned() + .unwrap_or_else(|| passwd_index.map(|index| parts[index].clone()).unwrap_or_default()); + + return Ok(Account { + username: parts[0].clone(), + password, + uid, + shell: parts[shell_index].clone(), + }); + } + + Err(format!("unknown user '{username}'")) +} + +fn trim_failures(entries: &mut Vec, now: Instant) { + entries.retain(|entry| now.saturating_duration_since(*entry) <= FAILURE_WINDOW); +} + +fn login_allowed(account: &Account) -> bool { + if account.uid != 0 && account.uid < 1000 { + return false; + } + !account.shell.is_empty() +} + +fn verify_shadow_password(password: &str, shadow_hash: &str) -> Result { + if shadow_hash.starts_with("$6$") || shadow_hash.starts_with("$5$") { + return Ok(ShaCrypt::default() + .verify_password(password.as_bytes(), shadow_hash) + .is_ok()); + } + Err(VerifyError::UnsupportedHashFormat) +} + +fn verify_password(account: &Account, password: &str) -> bool { + if account.password.is_empty() || account.password.starts_with('!') || account.password.starts_with('*') { + return false; + } + + if account.password.starts_with('$') { + match verify_shadow_password(password, &account.password) { + Ok(ok) => return ok, + Err(VerifyError::UnsupportedHashFormat) => { + eprintln!( + "redbear-authd: password hash for user {} uses an unsupported shadow format", + account.username + ); + return false; + } + } + } + + account.password == password +} + +fn remember_success(state: &RuntimeState, username: &str, vt: u32) -> Result<(), String> { + let mut approvals = state + .approvals + .lock() + .map_err(|_| String::from("approval state is poisoned"))?; + approvals.insert( + username.to_string(), + Approval { + expires_at: Instant::now() + Duration::from_secs(15), + vt, + }, + ); + + let mut failures = state + .failures + .lock() + .map_err(|_| String::from("failure state is poisoned"))?; + failures.remove(username); + Ok(()) +} + +fn remember_failure(state: &RuntimeState, username: &str) -> Result { + let mut failures = state + .failures + .lock() + .map_err(|_| String::from("failure state is poisoned"))?; + let now = Instant::now(); + let entry = failures.entry(username.to_string()).or_default(); + trim_failures(&mut entry.attempts, now); + entry.attempts.push(now); + if entry.attempts.len() >= 5 { + entry.locked_until = Some(now + LOCKOUT_DURATION); + Ok(String::from("Too many failed attempts. Try again shortly.")) + } else { + Ok(String::from("Invalid username or password.")) + } +} + +fn check_lockout(state: &RuntimeState, username: &str) -> Result, String> { + let mut failures = state + .failures + .lock() + .map_err(|_| String::from("failure state is poisoned"))?; + let now = Instant::now(); + if let Some(entry) = failures.get_mut(username) { + trim_failures(&mut entry.attempts, now); + if let Some(locked_until) = entry.locked_until { + if locked_until > now { + return Ok(Some(String::from("Too many failed attempts. Try again shortly."))); + } + entry.locked_until = None; + } + } + Ok(None) +} + +fn take_approval(state: &RuntimeState, username: &str, vt: u32) -> Result<(), String> { + let mut approvals = state + .approvals + .lock() + .map_err(|_| String::from("approval state is poisoned"))?; + let Some(approval) = approvals.remove(username) else { + return Err(String::from("No recent authentication approval exists for this user.")); + }; + if approval.expires_at < Instant::now() { + return Err(String::from("Authentication approval expired. Please log in again.")); + } + if approval.vt != vt { + return Err(String::from("Authentication approval does not match the requested VT.")); + } + Ok(()) +} + +fn send_sessiond_update(message: &SessiondUpdate) { + let Ok(mut stream) = UnixStream::connect(SESSIOND_SOCKET_PATH) else { + return; + }; + let Ok(json) = serde_json::to_string(message) else { + return; + }; + let _ = stream.write_all(json.as_bytes()); + let _ = stream.write_all(b"\n"); +} + +fn launch_session(account: &Account, session: &str, vt: u32) -> Result, String> { + if session != "kde-wayland" { + return Err(format!("unsupported session '{session}'")); + } + + let mut child = Command::new("/usr/bin/redbear-session-launch") + .arg("--username") + .arg(&account.username) + .arg("--mode") + .arg("session") + .arg("--session") + .arg(session) + .arg("--vt") + .arg(vt.to_string()) + .spawn() + .map_err(|err| format!("failed to launch session for {}: {err}", account.username))?; + + send_sessiond_update(&SessiondUpdate::SetSession { + username: account.username.clone(), + uid: account.uid, + vt, + leader: child.id(), + state: String::from("online"), + }); + + let status = child + .wait() + .map_err(|err| format!("failed while waiting for session process: {err}"))?; + + send_sessiond_update(&SessiondUpdate::ResetSession { vt }); + Ok(status.code()) +} + +fn run_power_action(action: &str) -> Result { + let candidates: &[&[&str]] = match action { + "shutdown" => &[&["/usr/bin/shutdown"], &["shutdown"], &["poweroff"]], + "reboot" => &[&["/usr/bin/reboot"], &["reboot"]], + other => return Err(format!("unsupported power action '{other}'")), + }; + + for candidate in candidates { + let program = candidate[0]; + let args = &candidate[1..]; + let Ok(status) = Command::new(program).args(args).status() else { + continue; + }; + if status.success() { + return Ok(format!("{action} requested")); + } + } + + Err(format!("failed to execute {action} command")) +} + +fn handle_request(request: AuthRequest, state: &RuntimeState) -> AuthResponse { + match request { + AuthRequest::Authenticate { + request_id, + username, + password, + vt, + } => { + match check_lockout(state, &username) { + Ok(Some(message)) => { + return AuthResponse::AuthenticateResult { + request_id, + ok: false, + message, + }; + } + Ok(None) => {} + Err(message) => return AuthResponse::Error { message }, + } + + match load_account(&username) { + Ok(account) if login_allowed(&account) && verify_password(&account, &password) => { + if let Err(message) = remember_success(state, &username, vt) { + return AuthResponse::Error { message }; + } + AuthResponse::AuthenticateResult { + request_id, + ok: true, + message: String::from("Authentication successful."), + } + } + Ok(_) | Err(_) => { + let message = remember_failure(state, &username) + .unwrap_or_else(|_| String::from("Invalid username or password.")); + AuthResponse::AuthenticateResult { + request_id, + ok: false, + message, + } + } + } + } + AuthRequest::StartSession { + request_id, + username, + session, + vt, + } => { + if let Err(message) = take_approval(state, &username, vt) { + return AuthResponse::SessionResult { + request_id, + ok: false, + exit_code: None, + message, + }; + } + + match load_account(&username).and_then(|account| { + let exit_code = launch_session(&account, &session, vt)?; + Ok((account, exit_code)) + }) { + Ok((_account, exit_code)) => AuthResponse::SessionResult { + request_id, + ok: true, + exit_code, + message: String::from("Session completed."), + }, + Err(message) => AuthResponse::SessionResult { + request_id, + ok: false, + exit_code: None, + message, + }, + } + } + AuthRequest::PowerAction { request_id, action } => match run_power_action(&action) { + Ok(message) => AuthResponse::PowerResult { + request_id, + ok: true, + message, + }, + Err(message) => AuthResponse::PowerResult { + request_id, + ok: false, + message, + }, + }, + } +} + +fn handle_connection(stream: UnixStream, state: RuntimeState) { + let mut reader = BufReader::new(stream); + let mut line = String::new(); + if reader.read_line(&mut line).is_err() { + return; + } + + let response = match serde_json::from_str::(line.trim()) { + Ok(request) => handle_request(request, &state), + Err(err) => AuthResponse::Error { + message: format!("invalid request: {err}"), + }, + }; + + let Ok(payload) = serde_json::to_string(&response) else { + return; + }; + let mut stream = reader.into_inner(); + let _ = stream.write_all(payload.as_bytes()); + let _ = stream.write_all(b"\n"); +} + +fn run() -> Result<(), String> { + match parse_args() { + Ok(()) => {} + Err(err) if err.is_empty() => { + println!("{}", usage()); + return Ok(()); + } + Err(err) => return Err(err), + } + + if Path::new(AUTH_SOCKET_PATH).exists() { + fs::remove_file(AUTH_SOCKET_PATH) + .map_err(|err| format!("failed to remove stale auth socket {AUTH_SOCKET_PATH}: {err}"))?; + } + + let listener = UnixListener::bind(AUTH_SOCKET_PATH) + .map_err(|err| format!("failed to bind auth socket {AUTH_SOCKET_PATH}: {err}"))?; + fs::set_permissions(AUTH_SOCKET_PATH, fs::Permissions::from_mode(0o600)) + .map_err(|err| format!("failed to set permissions on {AUTH_SOCKET_PATH}: {err}"))?; + let state = RuntimeState::default(); + + eprintln!("redbear-authd: listening on {AUTH_SOCKET_PATH}"); + for stream in listener.incoming() { + match stream { + Ok(stream) => handle_connection(stream, state.clone()), + Err(err) => eprintln!("redbear-authd: failed to accept connection: {err}"), + } + } + + Ok(()) +} + +fn main() { + if let Err(err) = run() { + eprintln!("redbear-authd: {err}"); + eprintln!("{}", usage()); + process::exit(1); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::{BufRead, BufReader, Write}; + + fn send_handle_connection_request(request: &str) -> AuthResponse { + let state = RuntimeState::default(); + let (mut client, server) = UnixStream::pair().expect("socket pair should open"); + client + .write_all(request.as_bytes()) + .and_then(|_| client.write_all(b"\n")) + .expect("request should write"); + handle_connection(server, state); + let mut line = String::new(); + BufReader::new(client) + .read_line(&mut line) + .expect("response should read"); + serde_json::from_str(line.trim()).expect("response should parse") + } + + #[test] + fn verify_password_accepts_plain_passwords() { + let account = Account { + username: String::from("root"), + password: String::from("password"), + uid: 0, + shell: String::from("/usr/bin/ion"), + }; + assert!(verify_password(&account, "password")); + assert!(!verify_password(&account, "wrong")); + } + + #[test] + fn verify_shadow_password_accepts_sha512_crypt() { + let hash = "$6$saltstring$adDbXsJjcDlq2662QPgd.tkSOVmnG9Tt3oXl4HR60SusC3AGjirnDenVZp3DGwLwqy6iYKCzannhaX9DR72nN1"; + assert_eq!(verify_shadow_password("password", hash), Ok(true)); + assert_eq!(verify_shadow_password("wrong", hash), Ok(false)); + } + + #[test] + fn verify_shadow_password_accepts_sha256_crypt() { + let hash = "$5$saltstring$OH4IDuTlsuTYPdED1gsuiRMyTAwNlRWyA6Xr3I4/dQ5"; + assert_eq!(verify_shadow_password("password", hash), Ok(true)); + assert_eq!(verify_shadow_password("wrong", hash), Ok(false)); + } + + #[test] + fn verify_shadow_password_rejects_unknown_hash_prefix() { + assert_eq!(verify_shadow_password("password", "$1$legacy$hash"), Err(VerifyError::UnsupportedHashFormat)); + } + + #[test] + fn verify_password_rejects_locked_accounts() { + let account = Account { + username: String::from("greeter"), + password: String::from("!"), + uid: 101, + shell: String::from("/usr/bin/ion"), + }; + assert!(!verify_password(&account, "anything")); + } + + #[test] + fn login_allowed_rejects_low_uid_non_root_accounts() { + let account = Account { + username: String::from("greeter"), + password: String::from("password"), + uid: 101, + shell: String::from("/usr/bin/ion"), + }; + assert!(!login_allowed(&account)); + } + + #[test] + fn remember_failure_locks_after_five_attempts() { + let state = RuntimeState::default(); + for _ in 0..4 { + let message = remember_failure(&state, "user").expect("failure tracking should succeed"); + assert_eq!(message, "Invalid username or password."); + } + + let message = remember_failure(&state, "user").expect("lockout tracking should succeed"); + assert_eq!(message, "Too many failed attempts. Try again shortly."); + assert_eq!( + check_lockout(&state, "user").expect("lockout lookup should succeed"), + Some(String::from("Too many failed attempts. Try again shortly.")) + ); + } + + #[test] + fn take_approval_rejects_vt_mismatch() { + let state = RuntimeState::default(); + remember_success(&state, "user", 3).expect("approval should be recorded"); + assert_eq!( + take_approval(&state, "user", 4), + Err(String::from("Authentication approval does not match the requested VT.")) + ); + } + + #[test] + fn start_session_request_rejects_missing_approval() { + let state = RuntimeState::default(); + let response = handle_request( + AuthRequest::StartSession { + request_id: 7, + username: String::from("user"), + session: String::from("kde-wayland"), + vt: 3, + }, + &state, + ); + + match response { + AuthResponse::SessionResult { + request_id, + ok, + exit_code, + message, + } => { + assert_eq!(request_id, 7); + assert!(!ok); + assert_eq!(exit_code, None); + assert_eq!(message, "No recent authentication approval exists for this user."); + } + _ => panic!("expected session_result response"), + } + } + + #[test] + fn authenticate_request_rejects_locked_account_marker() { + let account = Account { + username: String::from("greeter"), + password: String::from("!"), + uid: 101, + shell: String::from("/usr/bin/ion"), + }; + + assert!(!login_allowed(&account) || !verify_password(&account, "anything")); + } + + #[test] + fn power_action_request_rejects_unsupported_action() { + let state = RuntimeState::default(); + let response = handle_request( + AuthRequest::PowerAction { + request_id: 11, + action: String::from("hibernate"), + }, + &state, + ); + + match response { + AuthResponse::PowerResult { + request_id, + ok, + message, + } => { + assert_eq!(request_id, 11); + assert!(!ok); + assert_eq!(message, "unsupported power action 'hibernate'"); + } + _ => panic!("expected power_result response"), + } + } + + #[test] + fn handle_connection_returns_error_for_invalid_json() { + match send_handle_connection_request("not-json") { + AuthResponse::Error { message } => { + assert!(message.contains("invalid request:")); + } + _ => panic!("expected error response"), + } + } + + #[test] + fn split_account_fields_detects_redox_layout() { + let (format, parts) = split_account_fields("greeter;101;101;Greeter;/nonexistent;/usr/bin/ion"); + assert_eq!(format, AccountFormat::Redox); + assert_eq!(parts[0], "greeter"); + assert_eq!(parts[1], "101"); + } + + #[test] + fn split_account_fields_detects_unix_layout() { + let (format, parts) = split_account_fields("root:x:0:0:root:/root:/usr/bin/ion"); + assert_eq!(format, AccountFormat::Unix); + assert_eq!(parts[2], "0"); + } + + #[test] + fn split_account_fields_keeps_empty_redox_shadow_hash() { + let (_format, parts) = split_account_fields("greeter;"); + assert_eq!(parts, vec![String::from("greeter"), String::new()]); + } +} diff --git a/local/recipes/system/redbear-greeter/recipe.toml b/local/recipes/system/redbear-greeter/recipe.toml new file mode 100644 index 00000000..b73f19b5 --- /dev/null +++ b/local/recipes/system/redbear-greeter/recipe.toml @@ -0,0 +1,47 @@ +[source] +path = "source" + +[build] +template = "custom" +dependencies = ["qtbase", "qtdeclarative", "qtwayland"] +script = """ +set -ex + +DYNAMIC_INIT + +for qtdir in plugins mkspecs metatypes modules; do + if [ -d "${COOKBOOK_SYSROOT}/usr/${qtdir}" ] && [ -d "${COOKBOOK_SYSROOT}/${qtdir}" ] && [ ! -L "${COOKBOOK_SYSROOT}/${qtdir}" ]; then + rm -rf "${COOKBOOK_SYSROOT}/${qtdir}" + fi + if [ -d "${COOKBOOK_SYSROOT}/usr/${qtdir}" ] && [ ! -e "${COOKBOOK_SYSROOT}/${qtdir}" ]; then + ln -s "usr/${qtdir}" "${COOKBOOK_SYSROOT}/${qtdir}" + fi +done + +cookbook_cargo + +rm -f CMakeCache.txt +rm -rf CMakeFiles +cmake "${COOKBOOK_SOURCE}/ui" \ + -DCMAKE_TOOLCHAIN_FILE="${COOKBOOK_ROOT}/local/recipes/qt/redox-toolchain.cmake" \ + -DCMAKE_INSTALL_PREFIX=/usr \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_PREFIX_PATH="${COOKBOOK_SYSROOT}" \ + -DQT_NO_PRIVATE_MODULE_WARNING=ON \ + -Wno-dev + +cmake --build . -j${COOKBOOK_MAKE_JOBS} +cmake --install . --prefix "${COOKBOOK_STAGE}/usr" + +mkdir -pv "$COOKBOOK_STAGE/usr/bin" +mkdir -pv "$COOKBOOK_STAGE/usr/share/redbear/greeter" +cp -v "$COOKBOOK_SOURCE/redbear-greeter-compositor" "$COOKBOOK_STAGE/usr/share/redbear/greeter/redbear-greeter-compositor" +chmod 0755 "$COOKBOOK_STAGE/usr/share/redbear/greeter/redbear-greeter-compositor" +cp -v "$COOKBOOK_RECIPE/../../../../local/Assets/images/Red Bear OS loading background.png" "$COOKBOOK_STAGE/usr/share/redbear/greeter/background.png" +cp -v "$COOKBOOK_RECIPE/../../../../local/Assets/images/Red Bear OS icon.png" "$COOKBOOK_STAGE/usr/share/redbear/greeter/icon.png" +ln -svf ../share/redbear/greeter/redbear-greeter-compositor "$COOKBOOK_STAGE/usr/bin/redbear-greeter-compositor" +""" + +[package.files] +"/usr/bin/redbear-greeterd" = "redbear-greeterd" +"/usr/bin/redbear-greeter-ui" = "redbear-greeter-ui" diff --git a/local/recipes/system/redbear-greeter/source/Cargo.toml b/local/recipes/system/redbear-greeter/source/Cargo.toml new file mode 100644 index 00000000..d1715f61 --- /dev/null +++ b/local/recipes/system/redbear-greeter/source/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "redbear-greeter" +version = "0.1.0" +edition = "2024" + +[[bin]] +name = "redbear-greeterd" +path = "src/main.rs" + +[dependencies] +libc = "0.2" +serde = { version = "1", features = ["derive"] } +serde_json = "1" diff --git a/local/recipes/system/redbear-greeter/source/redbear-greeter-compositor b/local/recipes/system/redbear-greeter/source/redbear-greeter-compositor new file mode 100755 index 00000000..698b9c3a --- /dev/null +++ b/local/recipes/system/redbear-greeter/source/redbear-greeter-compositor @@ -0,0 +1,24 @@ +#!/usr/bin/env sh + +export DISPLAY="" +export WAYLAND_DISPLAY="${WAYLAND_DISPLAY:-wayland-0}" +export XDG_SESSION_TYPE=wayland +export LIBSEAT_BACKEND=seatd +export SEATD_SOCK=/run/seatd.sock +export QT_PLUGIN_PATH="${QT_PLUGIN_PATH:-/usr/plugins}" +export QT_QPA_PLATFORM_PLUGIN_PATH="${QT_QPA_PLATFORM_PLUGIN_PATH:-/usr/plugins/platforms}" +export QML2_IMPORT_PATH="${QML2_IMPORT_PATH:-/usr/qml}" +export XCURSOR_THEME="${XCURSOR_THEME:-Pop}" +export XKB_CONFIG_ROOT="${XKB_CONFIG_ROOT:-/usr/share/X11/xkb}" + +if [ -z "${XDG_RUNTIME_DIR:-}" ]; then + export XDG_RUNTIME_DIR="/tmp/run/greeter" +fi + +mkdir -p "$XDG_RUNTIME_DIR" + +if [ -z "${DBUS_SESSION_BUS_ADDRESS:-}" ] && command -v dbus-launch >/scheme/null 2>&1; then + eval "$(dbus-launch --sh-syntax)" +fi + +exec kwin_wayland --replace diff --git a/local/recipes/system/redbear-greeter/source/src/main.rs b/local/recipes/system/redbear-greeter/source/src/main.rs new file mode 100644 index 00000000..d74b9394 --- /dev/null +++ b/local/recipes/system/redbear-greeter/source/src/main.rs @@ -0,0 +1,699 @@ +use std::{ + env, + fs, + io::{self, BufRead, BufReader, Write}, + os::unix::{fs::PermissionsExt, net::{UnixListener, UnixStream}}, + path::{Path, PathBuf}, + process::{self, Child, Command, ExitStatus}, + thread, + time::{Duration, Instant}, +}; + +use serde::{Deserialize, Serialize}; + +const GREETER_SOCKET_PATH: &str = "/run/redbear-greeterd.sock"; +const AUTH_SOCKET_PATH: &str = "/run/redbear-authd.sock"; +const BACKGROUND_PATH: &str = "/usr/share/redbear/greeter/background.png"; +const ICON_PATH: &str = "/usr/share/redbear/greeter/icon.png"; + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum GreeterState { + Starting, + GreeterReady, + Authenticating, + LaunchingSession, + SessionRunning, + ReturningToGreeter, + PowerAction, + FatalError, +} + +impl GreeterState { + fn as_str(self) -> &'static str { + match self { + GreeterState::Starting => "starting", + GreeterState::GreeterReady => "greeter_ready", + GreeterState::Authenticating => "authenticating", + GreeterState::LaunchingSession => "launching_session", + GreeterState::SessionRunning => "session_running", + GreeterState::ReturningToGreeter => "returning_to_greeter", + GreeterState::PowerAction => "power_action", + GreeterState::FatalError => "fatal_error", + } + } +} + +#[derive(Debug)] +struct GreeterDaemon { + listener: UnixListener, + vt: u32, + greeter_user: String, + runtime_dir: PathBuf, + wayland_display: String, + state: GreeterState, + message: String, + compositor: Option, + ui: Option, + restart_attempts: Vec, +} + +#[derive(Debug, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum GreeterRequest { + Hello { version: u32 }, + SubmitLogin { username: String, password: String }, + RequestShutdown, + RequestReboot, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum GreeterResponse { + HelloOk { + background: String, + icon: String, + session_name: String, + state: String, + message: String, + }, + LoginResult { + ok: bool, + state: String, + message: String, + }, + ActionResult { + ok: bool, + message: String, + }, + Error { + message: String, + }, +} + +#[derive(Debug, Serialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum AuthRequest<'a> { + Authenticate { + request_id: u64, + username: &'a str, + password: &'a str, + vt: u32, + }, + StartSession { + request_id: u64, + username: &'a str, + session: &'a str, + vt: u32, + }, + PowerAction { + request_id: u64, + action: &'a str, + }, +} + +#[derive(Debug, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum AuthResponse { + AuthenticateResult { + ok: bool, + message: String, + #[allow(dead_code)] + request_id: u64, + }, + SessionResult { + ok: bool, + message: String, + #[allow(dead_code)] + request_id: u64, + #[allow(dead_code)] + exit_code: Option, + }, + PowerResult { + ok: bool, + message: String, + #[allow(dead_code)] + request_id: u64, + }, + Error { + message: String, + }, +} + +fn usage() -> &'static str { + "Usage: redbear-greeterd [--help]" +} + +fn parse_args() -> Result<(), String> { + let mut args = env::args().skip(1); + match args.next() { + None => Ok(()), + Some(arg) if arg == "--help" || arg == "-h" => Err(String::new()), + Some(arg) => Err(format!("unrecognized argument '{arg}'")), + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum AccountFormat { + Redox, + Unix, +} + +fn split_account_fields(line: &str) -> (AccountFormat, Vec<&str>) { + let format = if line.contains(';') { + AccountFormat::Redox + } else { + AccountFormat::Unix + }; + let delimiter = match format { + AccountFormat::Redox => ';', + AccountFormat::Unix => ':', + }; + (format, line.split(delimiter).collect()) +} + +fn parse_uid_gid(parts: &[&str], format: AccountFormat) -> Option<(u32, u32)> { + let (uid_index, gid_index) = match format { + AccountFormat::Redox if parts.len() >= 3 => (1, 2), + AccountFormat::Unix if parts.len() >= 4 => (2, 3), + _ => return None, + }; + + let uid = parts[uid_index].parse::().ok()?; + let gid = parts[gid_index].parse::().ok()?; + Some((uid, gid)) +} + +fn load_uid_gid(username: &str) -> Result<(u32, u32), String> { + let passwd = fs::read_to_string("/etc/passwd").map_err(|err| format!("failed to read /etc/passwd: {err}"))?; + for line in passwd.lines() { + if line.trim().is_empty() || line.starts_with('#') { + continue; + } + let (format, parts) = split_account_fields(line); + if parts.len() < 3 || parts[0] != username { + continue; + } + if let Some((uid, gid)) = parse_uid_gid(&parts, format) { + return Ok((uid, gid)); + } + return Err(format!("invalid uid/gid for user '{username}'")); + } + Err(format!("unknown greeter user '{username}'")) +} + +fn change_socket_ownership(path: &Path, uid: u32, gid: u32) -> Result<(), String> { + let c_path = std::ffi::CString::new(path.as_os_str().as_encoded_bytes()) + .map_err(|_| format!("socket path {} contains interior NUL", path.display()))?; + let result = unsafe { libc::chown(c_path.as_ptr(), uid, gid) }; + if result == 0 { + Ok(()) + } else { + Err(format!("failed to chown {}: {}", path.display(), io::Error::last_os_error())) + } +} + +fn send_auth_request(request: &AuthRequest<'_>) -> Result { + let mut stream = UnixStream::connect(AUTH_SOCKET_PATH) + .map_err(|err| format!("failed to connect to {AUTH_SOCKET_PATH}: {err}"))?; + let payload = serde_json::to_string(request).map_err(|err| format!("failed to serialize auth request: {err}"))?; + stream + .write_all(payload.as_bytes()) + .and_then(|_| stream.write_all(b"\n")) + .map_err(|err| format!("failed to write auth request: {err}"))?; + + let mut reader = BufReader::new(stream); + let mut line = String::new(); + reader + .read_line(&mut line) + .map_err(|err| format!("failed to read auth response: {err}"))?; + serde_json::from_str(line.trim()).map_err(|err| format!("failed to parse auth response: {err}")) +} + +impl GreeterDaemon { + fn hello_response(&self) -> GreeterResponse { + GreeterResponse::HelloOk { + background: String::from(BACKGROUND_PATH), + icon: String::from(ICON_PATH), + session_name: String::from("KDE on Wayland"), + state: String::from(self.state.as_str()), + message: self.message.clone(), + } + } + + fn new() -> Result { + let vt = env::var("VT") + .ok() + .and_then(|value| value.parse::().ok()) + .unwrap_or(3); + let greeter_user = env::var("REDBEAR_GREETER_USER").unwrap_or_else(|_| String::from("greeter")); + + if Path::new(GREETER_SOCKET_PATH).exists() { + fs::remove_file(GREETER_SOCKET_PATH) + .map_err(|err| format!("failed to remove stale greeter socket: {err}"))?; + } + let listener = UnixListener::bind(GREETER_SOCKET_PATH) + .map_err(|err| format!("failed to bind {GREETER_SOCKET_PATH}: {err}"))?; + listener + .set_nonblocking(true) + .map_err(|err| format!("failed to set nonblocking socket mode: {err}"))?; + let (uid, gid) = load_uid_gid(&greeter_user)?; + fs::set_permissions(GREETER_SOCKET_PATH, fs::Permissions::from_mode(0o660)) + .map_err(|err| format!("failed to chmod {GREETER_SOCKET_PATH}: {err}"))?; + change_socket_ownership(Path::new(GREETER_SOCKET_PATH), uid, gid)?; + + Ok(Self { + listener, + vt, + greeter_user, + runtime_dir: PathBuf::from("/tmp/run/redbear-greeter"), + wayland_display: String::from("wayland-0"), + state: GreeterState::Starting, + message: String::from("Starting greeter"), + compositor: None, + ui: None, + restart_attempts: Vec::new(), + }) + } + + fn set_state(&mut self, state: GreeterState, message: impl Into) { + self.state = state; + self.message = message.into(); + } + + fn configure_command(&self, command: &mut Command) { + command.env("QT_PLUGIN_PATH", "/usr/plugins"); + command.env("QT_QPA_PLATFORM_PLUGIN_PATH", "/usr/plugins/platforms"); + command.env("QML2_IMPORT_PATH", "/usr/qml"); + command.env("XCURSOR_THEME", "Pop"); + command.env("XKB_CONFIG_ROOT", "/usr/share/X11/xkb"); + command.env("WAYLAND_DISPLAY", &self.wayland_display); + } + + fn spawn_as_greeter(&self, program: &str) -> Result { + let mut command = Command::new("/usr/bin/redbear-session-launch"); + command + .arg("--username") + .arg(&self.greeter_user) + .arg("--mode") + .arg("command") + .arg("--vt") + .arg(self.vt.to_string()) + .arg("--runtime-dir") + .arg(&self.runtime_dir) + .arg("--wayland-display") + .arg(&self.wayland_display) + .arg("--command") + .arg(program); + self.configure_command(&mut command); + command + .spawn() + .map_err(|err| format!("failed to spawn {program} as {}: {err}", self.greeter_user)) + } + + fn wait_for_wayland_socket(&self) -> Result<(), String> { + let socket_path = self.runtime_dir.join(&self.wayland_display); + for _ in 0..60 { + if socket_path.exists() { + return Ok(()); + } + thread::sleep(Duration::from_millis(250)); + } + Err(format!("timed out waiting for compositor socket {}", socket_path.display())) + } + + fn start_surface(&mut self) -> Result<(), String> { + self.set_state(GreeterState::Starting, "Starting greeter surface"); + self.compositor = Some(self.spawn_as_greeter("/usr/bin/redbear-greeter-compositor")?); + self.wait_for_wayland_socket()?; + self.ui = Some(self.spawn_as_greeter("/usr/bin/redbear-greeter-ui")?); + self.set_state(GreeterState::GreeterReady, "Ready"); + Ok(()) + } + + fn kill_child(child: &mut Option) { + if let Some(process) = child.as_mut() { + let _ = process.kill(); + let _ = process.wait(); + } + *child = None; + } + + fn note_restart(&mut self) -> Result<(), String> { + let now = Instant::now(); + self.restart_attempts + .retain(|attempt| now.saturating_duration_since(*attempt) <= Duration::from_secs(60)); + self.restart_attempts.push(now); + if self.restart_attempts.len() > 3 { + self.set_state(GreeterState::FatalError, "Greeter restart limit reached"); + return Err(String::from("greeter restart limit reached; leaving fallback consoles available")); + } + Ok(()) + } + + fn handle_surface_exit(&mut self, status: ExitStatus) -> Result<(), String> { + self.ui = None; + if status.success() { + self.message = String::from("Greeter UI exited"); + } else { + self.message = format!("Greeter UI exited unexpectedly: {status}"); + } + self.note_restart()?; + Self::kill_child(&mut self.compositor); + self.start_surface() + } + + fn launch_session(&mut self, username: &str) -> Result<(), String> { + self.set_state(GreeterState::LaunchingSession, "Starting session"); + Self::kill_child(&mut self.ui); + Self::kill_child(&mut self.compositor); + self.set_state(GreeterState::SessionRunning, "Session running"); + + let response = send_auth_request(&AuthRequest::StartSession { + request_id: 2, + username, + session: "kde-wayland", + vt: self.vt, + })?; + + self.set_state(GreeterState::ReturningToGreeter, "Returning to greeter"); + match response { + AuthResponse::SessionResult { ok, message, .. } => { + if !ok { + self.set_state(GreeterState::GreeterReady, message.clone()); + } + self.message = message; + } + AuthResponse::Error { message } => self.message = message, + _ => self.message = String::from("Unexpected auth response while starting session"), + } + self.start_surface() + } + + fn handle_connection(&mut self, stream: UnixStream) -> Result<(), String> { + let mut reader = BufReader::new(stream); + let mut line = String::new(); + reader + .read_line(&mut line) + .map_err(|err| format!("failed to read greeter request: {err}"))?; + + let request = serde_json::from_str::(line.trim()) + .map_err(|err| format!("invalid greeter request: {err}"))?; + let mut launch_username = None; + let response = match request { + GreeterRequest::Hello { version } => { + if version != 1 { + GreeterResponse::Error { + message: format!("unsupported greeter protocol version {version}"), + } + } else { + self.hello_response() + } + } + GreeterRequest::SubmitLogin { username, password } => { + self.set_state(GreeterState::Authenticating, "Authenticating"); + match send_auth_request(&AuthRequest::Authenticate { + request_id: 1, + username: &username, + password: &password, + vt: self.vt, + })? { + AuthResponse::AuthenticateResult { ok, message, .. } => { + if ok { + self.set_state(GreeterState::LaunchingSession, "Starting session"); + launch_username = Some(username); + } else { + self.set_state(GreeterState::GreeterReady, message.clone()); + } + GreeterResponse::LoginResult { + ok, + state: String::from(self.state.as_str()), + message, + } + } + AuthResponse::Error { message } => { + self.set_state(GreeterState::GreeterReady, message.clone()); + GreeterResponse::Error { message } + } + _ => GreeterResponse::Error { + message: String::from("unexpected auth response"), + }, + } + } + GreeterRequest::RequestShutdown => { + self.set_state(GreeterState::PowerAction, "Requesting shutdown"); + match send_auth_request(&AuthRequest::PowerAction { + request_id: 3, + action: "shutdown", + })? { + AuthResponse::PowerResult { ok, message, .. } => GreeterResponse::ActionResult { ok, message }, + AuthResponse::Error { message } => GreeterResponse::Error { message }, + _ => GreeterResponse::Error { + message: String::from("unexpected power-action response"), + }, + } + } + GreeterRequest::RequestReboot => { + self.set_state(GreeterState::PowerAction, "Requesting reboot"); + match send_auth_request(&AuthRequest::PowerAction { + request_id: 4, + action: "reboot", + })? { + AuthResponse::PowerResult { ok, message, .. } => GreeterResponse::ActionResult { ok, message }, + AuthResponse::Error { message } => GreeterResponse::Error { message }, + _ => GreeterResponse::Error { + message: String::from("unexpected power-action response"), + }, + } + } + }; + + let payload = serde_json::to_string(&response) + .map_err(|err| format!("failed to serialize greeter response: {err}"))?; + let mut stream = reader.into_inner(); + stream + .write_all(payload.as_bytes()) + .and_then(|_| stream.write_all(b"\n")) + .map_err(|err| format!("failed to write greeter response: {err}"))?; + + if let Some(username) = launch_username { + self.launch_session(&username)?; + } + Ok(()) + } + + fn check_children(&mut self) -> Result<(), String> { + if let Some(process) = self.compositor.as_mut() { + if let Some(status) = process.try_wait().map_err(|err| format!("failed to poll compositor: {err}"))? { + self.compositor = None; + self.note_restart()?; + self.message = format!("Greeter compositor exited unexpectedly: {status}"); + Self::kill_child(&mut self.ui); + self.start_surface()?; + return Ok(()); + } + } + + if let Some(process) = self.ui.as_mut() { + if let Some(status) = process.try_wait().map_err(|err| format!("failed to poll greeter UI: {err}"))? { + return self.handle_surface_exit(status); + } + } + + Ok(()) + } + + fn run(&mut self) -> Result<(), String> { + self.start_surface()?; + loop { + self.check_children()?; + match self.listener.accept() { + Ok((stream, _)) => { + if let Err(err) = self.handle_connection(stream) { + eprintln!("redbear-greeterd: {err}"); + } + } + Err(err) if err.kind() == io::ErrorKind::WouldBlock => { + thread::sleep(Duration::from_millis(100)); + } + Err(err) => return Err(format!("failed to accept greeter connection: {err}")), + } + } + } +} + +fn run() -> Result<(), String> { + match parse_args() { + Ok(()) => {} + Err(err) if err.is_empty() => { + println!("{}", usage()); + return Ok(()); + } + Err(err) => return Err(err), + } + + let mut daemon = GreeterDaemon::new()?; + daemon.run() +} + +fn main() { + if let Err(err) = run() { + eprintln!("redbear-greeterd: {err}"); + eprintln!("{}", usage()); + process::exit(1); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::{BufRead, BufReader, Write}; + use std::sync::atomic::{AtomicU64, Ordering}; + + static TEST_SOCKET_COUNTER: AtomicU64 = AtomicU64::new(0); + + fn test_daemon() -> GreeterDaemon { + let unique = TEST_SOCKET_COUNTER.fetch_add(1, Ordering::Relaxed); + let socket_path = std::env::temp_dir().join(format!( + "redbear-greeterd-test-{}-{}.sock", + process::id(), + unique + )); + let _ = fs::remove_file(&socket_path); + let listener = UnixListener::bind(&socket_path).expect("test listener should bind"); + listener + .set_nonblocking(true) + .expect("test listener should become nonblocking"); + + GreeterDaemon { + listener, + vt: 3, + greeter_user: String::from("greeter"), + runtime_dir: PathBuf::from("/tmp/run/redbear-greeter-test"), + wayland_display: String::from("wayland-0"), + state: GreeterState::Starting, + message: String::from("Starting greeter"), + compositor: None, + ui: None, + restart_attempts: Vec::new(), + } + } + + fn send_daemon_request(daemon: &mut GreeterDaemon, request: &str) -> GreeterResponse { + let (mut client, server) = UnixStream::pair().expect("socket pair should open"); + client + .write_all(request.as_bytes()) + .and_then(|_| client.write_all(b"\n")) + .expect("request should write"); + daemon.handle_connection(server).expect("handler should succeed"); + let mut line = String::new(); + BufReader::new(client) + .read_line(&mut line) + .expect("response should read"); + serde_json::from_str(line.trim()).expect("response should parse") + } + + #[test] + fn greeter_state_strings_match_protocol_contract() { + assert_eq!(GreeterState::Starting.as_str(), "starting"); + assert_eq!(GreeterState::GreeterReady.as_str(), "greeter_ready"); + assert_eq!(GreeterState::Authenticating.as_str(), "authenticating"); + assert_eq!(GreeterState::LaunchingSession.as_str(), "launching_session"); + assert_eq!(GreeterState::SessionRunning.as_str(), "session_running"); + assert_eq!(GreeterState::ReturningToGreeter.as_str(), "returning_to_greeter"); + assert_eq!(GreeterState::PowerAction.as_str(), "power_action"); + assert_eq!(GreeterState::FatalError.as_str(), "fatal_error"); + } + + #[test] + fn hello_response_uses_installed_asset_paths() { + let mut daemon = test_daemon(); + daemon.set_state(GreeterState::GreeterReady, "Ready"); + + match daemon.hello_response() { + GreeterResponse::HelloOk { + background, + icon, + session_name, + state, + message, + } => { + assert_eq!(background, BACKGROUND_PATH); + assert_eq!(icon, ICON_PATH); + assert_eq!(session_name, "KDE on Wayland"); + assert_eq!(state, "greeter_ready"); + assert_eq!(message, "Ready"); + } + _ => panic!("expected hello_ok response"), + } + } + + #[test] + fn note_restart_bounds_repeated_failures() { + let mut daemon = test_daemon(); + + for _ in 0..3 { + daemon.note_restart().expect("restart should remain bounded"); + assert_ne!(daemon.state, GreeterState::FatalError); + } + + let error = daemon.note_restart().expect_err("fourth restart should fail"); + assert!(error.contains("restart limit")); + assert_eq!(daemon.state, GreeterState::FatalError); + assert_eq!(daemon.message, "Greeter restart limit reached"); + } + + #[test] + fn handle_connection_rejects_unsupported_protocol_version() { + let mut daemon = test_daemon(); + + match send_daemon_request(&mut daemon, r#"{"type":"hello","version":99}"#) { + GreeterResponse::Error { message } => { + assert_eq!(message, "unsupported greeter protocol version 99"); + } + _ => panic!("expected error response"), + } + } + + #[test] + fn handle_connection_rejects_invalid_json_request() { + let mut daemon = test_daemon(); + let (mut client, server) = UnixStream::pair().expect("socket pair should open"); + client + .write_all(b"not-json\n") + .expect("request should write"); + let error = daemon + .handle_connection(server) + .expect_err("invalid request should fail"); + assert!(error.contains("invalid greeter request")); + } + + #[test] + fn parse_uid_gid_accepts_redox_style_layout() { + assert_eq!( + parse_uid_gid( + &["greeter", "101", "101", "Greeter", "/nonexistent", "/usr/bin/ion"], + AccountFormat::Redox, + ), + Some((101, 101)) + ); + } + + #[test] + fn parse_uid_gid_accepts_unix_style_layout() { + assert_eq!( + parse_uid_gid( + &["root", "x", "0", "0", "root", "/root", "/usr/bin/ion"], + AccountFormat::Unix, + ), + Some((0, 0)) + ); + } + + #[test] + fn split_account_fields_detects_redox_layout() { + let (format, parts) = split_account_fields("greeter;101;101;Greeter;/nonexistent;/usr/bin/ion"); + assert_eq!(format, AccountFormat::Redox); + assert_eq!(parts[0], "greeter"); + assert_eq!(parts[2], "101"); + } +} diff --git a/local/recipes/system/redbear-greeter/source/ui/CMakeLists.txt b/local/recipes/system/redbear-greeter/source/ui/CMakeLists.txt new file mode 100644 index 00000000..a5f85a2b --- /dev/null +++ b/local/recipes/system/redbear-greeter/source/ui/CMakeLists.txt @@ -0,0 +1,29 @@ +cmake_minimum_required(VERSION 3.20) +project(redbear-greeter-ui LANGUAGES CXX) + +set(CMAKE_AUTOMOC ON) +set(CMAKE_AUTORCC ON) +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +find_package(Qt6 REQUIRED COMPONENTS Core Gui Qml Quick QuickControls2) + +qt_add_executable(redbear-greeter-ui + main.cpp + greeter_backend.cpp + greeter_backend.h + resources.qrc +) + +target_compile_options(redbear-greeter-ui PRIVATE -fcf-protection=none) +target_link_options(redbear-greeter-ui PRIVATE -fcf-protection=none) + +target_link_libraries(redbear-greeter-ui PRIVATE + Qt6::Core + Qt6::Gui + Qt6::Qml + Qt6::Quick + Qt6::QuickControls2 +) + +install(TARGETS redbear-greeter-ui RUNTIME DESTINATION bin) diff --git a/local/recipes/system/redbear-greeter/source/ui/Main.qml b/local/recipes/system/redbear-greeter/source/ui/Main.qml new file mode 100644 index 00000000..727b7592 --- /dev/null +++ b/local/recipes/system/redbear-greeter/source/ui/Main.qml @@ -0,0 +1,152 @@ +import QtQuick +import QtQuick.Controls +import QtQuick.Layouts + +ApplicationWindow { + id: root + visible: true + visibility: Window.FullScreen + color: "#11090a" + title: "Red Bear Greeter" + + function submitLogin() { + greeterBackend.submitLogin(usernameField.text, passwordField.text) + } + + Rectangle { + anchors.fill: parent + color: "#11090a" + + Image { + anchors.fill: parent + source: greeterBackend.backgroundUrl + fillMode: Image.PreserveAspectCrop + asynchronous: true + opacity: 0.88 + } + + Rectangle { + anchors.fill: parent + color: "#230a0d" + opacity: 0.45 + } + } + + Pane { + width: Math.min(parent.width * 0.42, 620) + anchors.centerIn: parent + padding: 28 + + background: Rectangle { + radius: 18 + color: "#cc150c0f" + border.color: "#66f7d7d7" + border.width: 1 + } + + ColumnLayout { + anchors.fill: parent + spacing: 18 + + Item { + Layout.fillWidth: true + Layout.preferredHeight: 156 + + Image { + anchors.horizontalCenter: parent.horizontalCenter + anchors.top: parent.top + anchors.topMargin: 2 + source: greeterBackend.iconUrl + width: 108 + height: 108 + fillMode: Image.PreserveAspectFit + asynchronous: true + } + + Column { + anchors.horizontalCenter: parent.horizontalCenter + anchors.bottom: parent.bottom + spacing: 4 + + Label { + anchors.horizontalCenter: parent.horizontalCenter + text: "Red Bear OS" + font.pixelSize: 26 + font.bold: true + color: "#fff4f4" + } + + Label { + anchors.horizontalCenter: parent.horizontalCenter + text: greeterBackend.sessionName + font.pixelSize: 15 + color: "#f1c5c5" + } + } + } + + TextField { + id: usernameField + Layout.fillWidth: true + placeholderText: "Username" + enabled: !greeterBackend.busy + selectByMouse: true + color: "#fff8f8" + font.pixelSize: 18 + onAccepted: passwordField.forceActiveFocus() + } + + TextField { + id: passwordField + Layout.fillWidth: true + placeholderText: "Password" + enabled: !greeterBackend.busy + selectByMouse: true + echoMode: TextInput.Password + color: "#fff8f8" + font.pixelSize: 18 + onAccepted: root.submitLogin() + } + + Label { + Layout.fillWidth: true + wrapMode: Text.Wrap + text: greeterBackend.message + color: greeterBackend.state === "fatal_error" ? "#ffb4b4" : "#ffe7e7" + font.pixelSize: 15 + } + + BusyIndicator { + Layout.alignment: Qt.AlignHCenter + running: greeterBackend.busy + visible: running + } + + RowLayout { + Layout.fillWidth: true + spacing: 12 + + Button { + Layout.fillWidth: true + text: greeterBackend.busy ? "Working…" : "Log In" + enabled: !greeterBackend.busy + onClicked: root.submitLogin() + } + + Button { + text: "Shutdown" + enabled: !greeterBackend.busy + onClicked: greeterBackend.requestShutdown() + } + + Button { + text: "Reboot" + enabled: !greeterBackend.busy + onClicked: greeterBackend.requestReboot() + } + } + } + } + + Component.onCompleted: usernameField.forceActiveFocus() +} diff --git a/local/recipes/system/redbear-greeter/source/ui/greeter_backend.cpp b/local/recipes/system/redbear-greeter/source/ui/greeter_backend.cpp new file mode 100644 index 00000000..996d4539 --- /dev/null +++ b/local/recipes/system/redbear-greeter/source/ui/greeter_backend.cpp @@ -0,0 +1,296 @@ +#include "greeter_backend.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +namespace { +constexpr auto kGreeterSocketPath = "/run/redbear-greeterd.sock"; +constexpr auto kConnectTimeoutMs = 1500; +constexpr auto kReadTimeoutMs = 5000; + +bool waitForReadable(int fd, int timeoutMs, QString *error) { + pollfd descriptor{}; + descriptor.fd = fd; + descriptor.events = POLLIN; + + const auto pollResult = ::poll(&descriptor, 1, timeoutMs); + if (pollResult > 0) { + return true; + } + if (pollResult == 0) { + *error = QStringLiteral("timed out waiting for greeter response"); + return false; + } + + *error = QStringLiteral("failed while waiting for greeter response: %1").arg(QString::fromLocal8Bit(std::strerror(errno))); + return false; +} +} + +GreeterBackend::GreeterBackend(QObject *parent) : QObject(parent) {} + +QUrl GreeterBackend::backgroundUrl() const { + return m_backgroundUrl; +} + +QUrl GreeterBackend::iconUrl() const { + return m_iconUrl; +} + +QString GreeterBackend::sessionName() const { + return m_sessionName; +} + +QString GreeterBackend::state() const { + return m_state; +} + +QString GreeterBackend::message() const { + return m_message; +} + +bool GreeterBackend::busy() const { + return m_busy; +} + +void GreeterBackend::initialize() { + const auto response = sendRequest(QJsonDocument(QJsonObject{{QStringLiteral("type"), QStringLiteral("hello")}, + {QStringLiteral("version"), 1}}) + .toJson(QJsonDocument::Compact)); + if (!response.transportOk) { + applyError(response.transportError); + return; + } + + if (response.type != QStringLiteral("hello_ok")) { + applyError(response.message.isEmpty() ? QStringLiteral("unexpected greeter hello response") : response.message); + return; + } + + setGreeting(response.backgroundPath, response.iconPath, response.sessionName); + setStatus(response.state, response.message); +} + +void GreeterBackend::submitLogin(const QString &username, const QString &password) { + if (m_busy) { + return; + } + if (username.trimmed().isEmpty() || password.isEmpty()) { + setStatus(QStringLiteral("greeter_ready"), QStringLiteral("Enter both username and password.")); + return; + } + + setBusy(true); + setStatus(QStringLiteral("authenticating"), QStringLiteral("Authenticating")); + + const auto response = sendRequest(QJsonDocument(QJsonObject{{QStringLiteral("type"), QStringLiteral("submit_login")}, + {QStringLiteral("username"), username}, + {QStringLiteral("password"), password}}) + .toJson(QJsonDocument::Compact)); + setBusy(false); + if (!response.transportOk) { + applyError(response.transportError); + return; + } + + if (response.type == QStringLiteral("login_result")) { + setStatus(response.state, response.message); + if (response.ok) { + QTimer::singleShot(0, qApp, &QCoreApplication::quit); + } + return; + } + + applyError(response.message.isEmpty() ? QStringLiteral("unexpected login response") : response.message); +} + +void GreeterBackend::requestShutdown() { + if (m_busy) { + return; + } + + setBusy(true); + setStatus(QStringLiteral("power_action"), QStringLiteral("Requesting shutdown")); + const auto response = sendRequest( + QJsonDocument(QJsonObject{{QStringLiteral("type"), QStringLiteral("request_shutdown")}}) + .toJson(QJsonDocument::Compact)); + setBusy(false); + + if (!response.transportOk) { + applyError(response.transportError); + return; + } + + if (response.type == QStringLiteral("action_result")) { + setStatus(response.ok ? QStringLiteral("power_action") : QStringLiteral("greeter_ready"), response.message); + return; + } + + applyError(response.message.isEmpty() ? QStringLiteral("unexpected shutdown response") : response.message); +} + +void GreeterBackend::requestReboot() { + if (m_busy) { + return; + } + + setBusy(true); + setStatus(QStringLiteral("power_action"), QStringLiteral("Requesting reboot")); + const auto response = sendRequest( + QJsonDocument(QJsonObject{{QStringLiteral("type"), QStringLiteral("request_reboot")}}) + .toJson(QJsonDocument::Compact)); + setBusy(false); + + if (!response.transportOk) { + applyError(response.transportError); + return; + } + + if (response.type == QStringLiteral("action_result")) { + setStatus(response.ok ? QStringLiteral("power_action") : QStringLiteral("greeter_ready"), response.message); + return; + } + + applyError(response.message.isEmpty() ? QStringLiteral("unexpected reboot response") : response.message); +} + +GreeterBackend::Response GreeterBackend::sendRequest(const QByteArray &payload) const { + Response response; + + const int fd = ::socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0); + if (fd < 0) { + response.transportError = QStringLiteral("failed to create greeter socket: %1") + .arg(QString::fromLocal8Bit(std::strerror(errno))); + return response; + } + + sockaddr_un address{}; + address.sun_family = AF_UNIX; + std::strncpy(address.sun_path, kGreeterSocketPath, sizeof(address.sun_path) - 1); + const auto addressSize = static_cast(offsetof(sockaddr_un, sun_path) + std::strlen(address.sun_path) + 1); + if (::connect(fd, reinterpret_cast(&address), addressSize) != 0) { + response.transportError = QStringLiteral("failed to connect to %1: %2") + .arg(QString::fromLatin1(kGreeterSocketPath), + QString::fromLocal8Bit(std::strerror(errno))); + ::close(fd); + return response; + } + + const auto fullPayload = payload + '\n'; + qsizetype written = 0; + while (written < fullPayload.size()) { + const auto chunk = ::write(fd, fullPayload.constData() + written, static_cast(fullPayload.size() - written)); + if (chunk < 0) { + response.transportError = QStringLiteral("failed to write greeter request: %1") + .arg(QString::fromLocal8Bit(std::strerror(errno))); + ::close(fd); + return response; + } + written += chunk; + } + + QString waitError; + if (!waitForReadable(fd, kReadTimeoutMs, &waitError)) { + response.transportError = waitError; + ::close(fd); + return response; + } + + QByteArray reply; + char buffer[1024]; + while (reply.indexOf('\n') < 0) { + const auto chunk = ::read(fd, buffer, sizeof(buffer)); + if (chunk < 0) { + response.transportError = QStringLiteral("failed to read greeter response: %1") + .arg(QString::fromLocal8Bit(std::strerror(errno))); + ::close(fd); + return response; + } + if (chunk == 0) { + break; + } + reply.append(buffer, static_cast(chunk)); + if (reply.indexOf('\n') < 0 && !waitForReadable(fd, kConnectTimeoutMs, &waitError)) { + response.transportError = waitError; + ::close(fd); + return response; + } + } + ::close(fd); + + const auto newlineIndex = reply.indexOf('\n'); + if (newlineIndex >= 0) { + reply.truncate(newlineIndex); + } + + const auto document = QJsonDocument::fromJson(reply); + if (!document.isObject()) { + response.transportError = QStringLiteral("invalid greeter response payload"); + return response; + } + + const auto object = document.object(); + response.transportOk = true; + response.type = object.value(QStringLiteral("type")).toString(); + response.ok = object.value(QStringLiteral("ok")).toBool(); + response.state = object.value(QStringLiteral("state")).toString(); + response.message = object.value(QStringLiteral("message")).toString(); + response.sessionName = object.value(QStringLiteral("session_name")).toString(); + response.backgroundPath = object.value(QStringLiteral("background")).toString(); + response.iconPath = object.value(QStringLiteral("icon")).toString(); + if (response.type == QStringLiteral("error") && response.message.isEmpty()) { + response.message = QStringLiteral("greeter returned an unspecified error"); + } + return response; +} + +void GreeterBackend::setGreeting(const QString &backgroundPath, const QString &iconPath, const QString &sessionName) { + const auto nextBackground = backgroundPath.isEmpty() ? QUrl() : QUrl::fromLocalFile(backgroundPath); + const auto nextIcon = iconPath.isEmpty() ? QUrl() : QUrl::fromLocalFile(iconPath); + const auto nextSessionName = sessionName.isEmpty() ? QStringLiteral("KDE on Wayland") : sessionName; + + if (m_backgroundUrl == nextBackground && m_iconUrl == nextIcon && m_sessionName == nextSessionName) { + return; + } + + m_backgroundUrl = nextBackground; + m_iconUrl = nextIcon; + m_sessionName = nextSessionName; + emit greetingChanged(); +} + +void GreeterBackend::setStatus(const QString &state, const QString &message) { + const auto nextState = state.isEmpty() ? QStringLiteral("greeter_ready") : state; + if (m_state == nextState && m_message == message) { + return; + } + + m_state = nextState; + m_message = message; + emit statusChanged(); +} + +void GreeterBackend::setBusy(bool busy) { + if (m_busy == busy) { + return; + } + + m_busy = busy; + emit busyChanged(); +} + +void GreeterBackend::applyError(const QString &message) { + setStatus(QStringLiteral("fatal_error"), message); +} diff --git a/local/recipes/system/redbear-greeter/source/ui/greeter_backend.h b/local/recipes/system/redbear-greeter/source/ui/greeter_backend.h new file mode 100644 index 00000000..fa3a10f3 --- /dev/null +++ b/local/recipes/system/redbear-greeter/source/ui/greeter_backend.h @@ -0,0 +1,60 @@ +#pragma once + +#include +#include + +class GreeterBackend final : public QObject { + Q_OBJECT + Q_PROPERTY(QUrl backgroundUrl READ backgroundUrl NOTIFY greetingChanged) + Q_PROPERTY(QUrl iconUrl READ iconUrl NOTIFY greetingChanged) + Q_PROPERTY(QString sessionName READ sessionName NOTIFY greetingChanged) + Q_PROPERTY(QString state READ state NOTIFY statusChanged) + Q_PROPERTY(QString message READ message NOTIFY statusChanged) + Q_PROPERTY(bool busy READ busy NOTIFY busyChanged) + +public: + explicit GreeterBackend(QObject *parent = nullptr); + + [[nodiscard]] QUrl backgroundUrl() const; + [[nodiscard]] QUrl iconUrl() const; + [[nodiscard]] QString sessionName() const; + [[nodiscard]] QString state() const; + [[nodiscard]] QString message() const; + [[nodiscard]] bool busy() const; + + Q_INVOKABLE void initialize(); + Q_INVOKABLE void submitLogin(const QString &username, const QString &password); + Q_INVOKABLE void requestShutdown(); + Q_INVOKABLE void requestReboot(); + +signals: + void greetingChanged(); + void statusChanged(); + void busyChanged(); + +private: + struct Response { + bool transportOk = false; + QString transportError; + QString type; + bool ok = false; + QString state; + QString message; + QString sessionName; + QString backgroundPath; + QString iconPath; + }; + + [[nodiscard]] Response sendRequest(const QByteArray &payload) const; + void setGreeting(const QString &backgroundPath, const QString &iconPath, const QString &sessionName); + void setStatus(const QString &state, const QString &message); + void setBusy(bool busy); + void applyError(const QString &message); + + QUrl m_backgroundUrl; + QUrl m_iconUrl; + QString m_sessionName = QStringLiteral("KDE on Wayland"); + QString m_state = QStringLiteral("starting"); + QString m_message = QStringLiteral("Connecting to greeter"); + bool m_busy = false; +}; diff --git a/local/recipes/system/redbear-greeter/source/ui/main.cpp b/local/recipes/system/redbear-greeter/source/ui/main.cpp new file mode 100644 index 00000000..1a65f1fb --- /dev/null +++ b/local/recipes/system/redbear-greeter/source/ui/main.cpp @@ -0,0 +1,25 @@ +#include +#include +#include +#include + +#include "greeter_backend.h" + +int main(int argc, char *argv[]) { + qputenv("QT_QUICK_CONTROLS_STYLE", QByteArrayLiteral("Basic")); + + QGuiApplication app(argc, argv); + QQuickStyle::setStyle(QStringLiteral("Basic")); + + GreeterBackend backend; + QQmlApplicationEngine engine; + engine.rootContext()->setContextProperty(QStringLiteral("greeterBackend"), &backend); + engine.load(QUrl(QStringLiteral("qrc:/Main.qml"))); + + if (engine.rootObjects().isEmpty()) { + return 1; + } + + backend.initialize(); + return app.exec(); +} diff --git a/local/recipes/system/redbear-greeter/source/ui/resources.qrc b/local/recipes/system/redbear-greeter/source/ui/resources.qrc new file mode 100644 index 00000000..f907b18e --- /dev/null +++ b/local/recipes/system/redbear-greeter/source/ui/resources.qrc @@ -0,0 +1,5 @@ + + + Main.qml + + diff --git a/local/recipes/system/redbear-hwutils/recipe.toml b/local/recipes/system/redbear-hwutils/recipe.toml index 341937f1..f04b2c27 100644 --- a/local/recipes/system/redbear-hwutils/recipe.toml +++ b/local/recipes/system/redbear-hwutils/recipe.toml @@ -10,6 +10,7 @@ template = "cargo" "/usr/bin/redbear-usb-check" = "redbear-usb-check" "/usr/bin/redbear-bluetooth-battery-check" = "redbear-bluetooth-battery-check" "/usr/bin/redbear-drm-display-check" = "redbear-drm-display-check" +"/usr/bin/redbear-greeter-check" = "redbear-greeter-check" "/usr/bin/redbear-phase4-wayland-check" = "redbear-phase4-wayland-check" "/usr/bin/redbear-phase5-network-check" = "redbear-phase5-network-check" "/usr/bin/redbear-phase5-wifi-check" = "redbear-phase5-wifi-check" diff --git a/local/recipes/system/redbear-hwutils/source/Cargo.toml b/local/recipes/system/redbear-hwutils/source/Cargo.toml index ec1b6947..d75994f0 100644 --- a/local/recipes/system/redbear-hwutils/source/Cargo.toml +++ b/local/recipes/system/redbear-hwutils/source/Cargo.toml @@ -59,6 +59,10 @@ path = "src/bin/redbear-phase5-wifi-link-check.rs" name = "redbear-phase6-kde-check" path = "src/bin/redbear-phase6-kde-check.rs" +[[bin]] +name = "redbear-greeter-check" +path = "src/bin/redbear-greeter-check.rs" + [[bin]] name = "redbear-drm-display-check" path = "src/bin/redbear-drm-display-check.rs" diff --git a/local/recipes/system/redbear-hwutils/source/src/bin/redbear-greeter-check.rs b/local/recipes/system/redbear-hwutils/source/src/bin/redbear-greeter-check.rs new file mode 100644 index 00000000..17c51a47 --- /dev/null +++ b/local/recipes/system/redbear-hwutils/source/src/bin/redbear-greeter-check.rs @@ -0,0 +1,350 @@ +use std::{ + fs, + io::{BufRead, BufReader, Write}, + os::unix::net::UnixStream, + path::Path, + process, + thread, + time::{Duration, Instant}, +}; + +use serde::{Deserialize, Serialize}; + +const PROGRAM: &str = "redbear-greeter-check"; +const USAGE: &str = "Usage: redbear-greeter-check [--invalid USER PASSWORD | --valid USER PASSWORD]\n\nQuery the installed Red Bear greeter surface inside the guest."; +const GREETER_SOCKET: &str = "/run/redbear-greeterd.sock"; +const GREETERD_BIN: &str = "/usr/bin/redbear-greeterd"; +const GREETER_UI_BIN: &str = "/usr/bin/redbear-greeter-ui"; +const AUTHD_BIN: &str = "/usr/bin/redbear-authd"; +const SESSION_LAUNCH_BIN: &str = "/usr/bin/redbear-session-launch"; +const GREETER_BACKGROUND: &str = "/usr/share/redbear/greeter/background.png"; +const GREETER_ICON: &str = "/usr/share/redbear/greeter/icon.png"; +const AUTHD_SERVICE: &str = "/usr/lib/init.d/19_redbear-authd.service"; +const DISPLAY_SHIM_SERVICE: &str = "/usr/lib/init.d/20_display.service"; +const GREETER_SERVICE: &str = "/usr/lib/init.d/20_greeter.service"; +const ACTIVATE_CONSOLE_SERVICE: &str = "/usr/lib/init.d/29_activate_console.service"; +const CONSOLE_SERVICE: &str = "/usr/lib/init.d/30_console.service"; +const DEBUG_CONSOLE_SERVICE: &str = "/usr/lib/init.d/31_debug_console.service"; +const VALIDATION_REQUEST: &str = "/run/redbear-kde-session.validation-request"; +const VALIDATION_SUCCESS: &str = "/run/redbear-kde-session.validation-success"; + +#[derive(Debug, Serialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum Request<'a> { + Hello { version: u32 }, + SubmitLogin { username: &'a str, password: &'a str }, +} + +#[derive(Debug, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum Response { + HelloOk { + background: String, + icon: String, + session_name: String, + state: String, + message: String, + }, + LoginResult { + ok: bool, + state: String, + message: String, + }, + Error { + message: String, + }, + #[serde(other)] + Other, +} + +#[derive(Debug, PartialEq, Eq)] +enum Mode { + Status, + Invalid { username: String, password: String }, + Valid { username: String, password: String }, +} + +fn parse_mode_from_args(args: I) -> Result +where + I: IntoIterator, +{ + let mut args = args.into_iter(); + match args.next() { + None => Ok(Mode::Status), + Some(flag) if flag == "--help" || flag == "-h" => Err(String::new()), + Some(flag) if flag == "--invalid" => { + let username = args.next().ok_or_else(|| String::from("missing username after --invalid"))?; + let password = args.next().ok_or_else(|| String::from("missing password after --invalid"))?; + if args.next().is_some() { + return Err(String::from("unexpected extra arguments after --invalid USER PASSWORD")); + } + Ok(Mode::Invalid { username, password }) + } + Some(flag) if flag == "--valid" => { + let username = args.next().ok_or_else(|| String::from("missing username after --valid"))?; + let password = args.next().ok_or_else(|| String::from("missing password after --valid"))?; + if args.next().is_some() { + return Err(String::from("unexpected extra arguments after --valid USER PASSWORD")); + } + Ok(Mode::Valid { username, password }) + } + Some(other) => Err(format!("unsupported argument '{other}'")), + } +} + +fn parse_mode() -> Result { + parse_mode_from_args(std::env::args().skip(1)) +} + +fn send_request(request: &Request<'_>) -> Result { + let mut stream = UnixStream::connect(GREETER_SOCKET) + .map_err(|err| format!("failed to connect to {GREETER_SOCKET}: {err}"))?; + let payload = serde_json::to_string(request) + .map_err(|err| format!("failed to serialize greeter request: {err}"))?; + stream + .write_all(payload.as_bytes()) + .and_then(|_| stream.write_all(b"\n")) + .map_err(|err| format!("failed to write greeter request: {err}"))?; + + let mut reader = BufReader::new(stream); + let mut line = String::new(); + reader + .read_line(&mut line) + .map_err(|err| format!("failed to read greeter response: {err}"))?; + serde_json::from_str(line.trim()).map_err(|err| format!("failed to parse greeter response: {err}")) +} + +fn require_path(path: &str) -> Result<(), String> { + if Path::new(path).exists() { + println!("{path}"); + Ok(()) + } else { + Err(format!("missing {path}")) + } +} + +fn wait_for_validation_marker(path: &str, timeout: Duration) -> Result<(), String> { + let start = Instant::now(); + while start.elapsed() <= timeout { + if Path::new(path).exists() { + return Ok(()); + } + thread::sleep(Duration::from_millis(250)); + } + + Err(format!("timed out waiting for {path}")) +} + +fn wait_for_greeter_ready(timeout: Duration) -> Result<(), String> { + let start = Instant::now(); + while start.elapsed() <= timeout { + match send_request(&Request::Hello { version: 1 }) { + Ok(Response::HelloOk { state, message, .. }) if state == "greeter_ready" => { + println!("GREETER_VALID_READY_MESSAGE={message}"); + return Ok(()); + } + Ok(_) => {} + Err(_) => {} + } + thread::sleep(Duration::from_millis(250)); + } + + Err(String::from("timed out waiting for greeter to return to greeter_ready")) +} + +fn run_status() -> Result<(), String> { + println!("=== Red Bear Greeter Runtime Check ==="); + require_path(GREETERD_BIN)?; + require_path(GREETER_UI_BIN)?; + require_path(AUTHD_BIN)?; + require_path(SESSION_LAUNCH_BIN)?; + require_path(GREETER_BACKGROUND)?; + require_path(GREETER_ICON)?; + require_path(AUTHD_SERVICE)?; + require_path(DISPLAY_SHIM_SERVICE)?; + require_path(GREETER_SERVICE)?; + require_path(ACTIVATE_CONSOLE_SERVICE)?; + require_path(CONSOLE_SERVICE)?; + require_path(DEBUG_CONSOLE_SERVICE)?; + require_path(GREETER_SOCKET)?; + + match send_request(&Request::Hello { version: 1 })? { + Response::HelloOk { + background, + icon, + session_name, + state, + message, + } => { + println!("GREETER_BACKGROUND={background}"); + println!("GREETER_ICON={icon}"); + println!("GREETER_SESSION={session_name}"); + println!("GREETER_STATE={state}"); + println!("GREETER_MESSAGE={message}"); + println!("GREETER_HELLO=ok"); + Ok(()) + } + Response::Error { message } => Err(format!("greeter hello failed: {message}")), + Response::Other => Err(String::from("unexpected greeter hello response")), + Response::LoginResult { .. } => Err(String::from("unexpected login result when greeting greeter")), + } +} + +fn run_invalid(username: &str, password: &str) -> Result<(), String> { + match send_request(&Request::SubmitLogin { username, password })? { + Response::LoginResult { ok, state, message } => { + println!("GREETER_INVALID_STATE={state}"); + println!("GREETER_INVALID_MESSAGE={message}"); + if ok { + Err(String::from("invalid login unexpectedly succeeded")) + } else { + println!("GREETER_INVALID=ok"); + Ok(()) + } + } + Response::Error { message } => Err(format!("invalid-login request failed: {message}")), + Response::Other => Err(String::from("unexpected greeter response for invalid login")), + Response::HelloOk { .. } => Err(String::from("unexpected hello response for invalid login")), + } +} + +fn run_valid(username: &str, password: &str) -> Result<(), String> { + let _ = fs::remove_file(VALIDATION_REQUEST); + let _ = fs::remove_file(VALIDATION_SUCCESS); + fs::write(VALIDATION_REQUEST, b"bounded-session\n") + .map_err(|err| format!("failed to create validation request: {err}"))?; + + match send_request(&Request::SubmitLogin { username, password })? { + Response::LoginResult { ok, state, message } => { + println!("GREETER_VALID_STATE={state}"); + println!("GREETER_VALID_MESSAGE={message}"); + if !ok { + let _ = fs::remove_file(VALIDATION_REQUEST); + return Err(String::from("valid login unexpectedly failed")); + } + } + Response::Error { message } => { + let _ = fs::remove_file(VALIDATION_REQUEST); + return Err(format!("valid-login request failed: {message}")); + } + Response::Other => { + let _ = fs::remove_file(VALIDATION_REQUEST); + return Err(String::from("unexpected greeter response for valid login")); + } + Response::HelloOk { .. } => { + let _ = fs::remove_file(VALIDATION_REQUEST); + return Err(String::from("unexpected hello response for valid login")); + } + } + + wait_for_validation_marker(VALIDATION_SUCCESS, Duration::from_secs(30))?; + println!("GREETER_VALID_SESSION=started"); + wait_for_greeter_ready(Duration::from_secs(30))?; + + let _ = fs::remove_file(VALIDATION_REQUEST); + let _ = fs::remove_file(VALIDATION_SUCCESS); + println!("GREETER_VALID=ok"); + Ok(()) +} + +fn main() { + let mode = match parse_mode() { + Ok(mode) => mode, + Err(err) if err.is_empty() => { + println!("{USAGE}"); + process::exit(0); + } + Err(err) => { + eprintln!("{PROGRAM}: {err}"); + eprintln!("{USAGE}"); + process::exit(1); + } + }; + + let result = match mode { + Mode::Status => run_status(), + Mode::Invalid { username, password } => run_invalid(&username, &password), + Mode::Valid { username, password } => run_valid(&username, &password), + }; + + if let Err(err) = result { + eprintln!("{PROGRAM}: {err}"); + process::exit(1); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_mode_defaults_to_status() { + assert_eq!(parse_mode_from_args(Vec::::new()).expect("status mode should parse"), Mode::Status); + } + + #[test] + fn parse_mode_accepts_invalid_login_arguments() { + assert_eq!( + parse_mode_from_args(vec![ + String::from("--invalid"), + String::from("alice"), + String::from("wrong"), + ]) + .expect("invalid-login mode should parse"), + Mode::Invalid { + username: String::from("alice"), + password: String::from("wrong"), + } + ); + } + + #[test] + fn parse_mode_accepts_valid_login_arguments() { + assert_eq!( + parse_mode_from_args(vec![ + String::from("--valid"), + String::from("alice"), + String::from("password"), + ]) + .expect("valid-login mode should parse"), + Mode::Valid { + username: String::from("alice"), + password: String::from("password"), + } + ); + } + + #[test] + fn parse_mode_rejects_extra_valid_arguments() { + assert_eq!( + parse_mode_from_args(vec![ + String::from("--valid"), + String::from("alice"), + String::from("password"), + String::from("extra"), + ]), + Err(String::from("unexpected extra arguments after --valid USER PASSWORD")) + ); + } + + #[test] + fn parse_mode_rejects_extra_invalid_arguments() { + assert_eq!( + parse_mode_from_args(vec![ + String::from("--invalid"), + String::from("alice"), + String::from("wrong"), + String::from("extra"), + ]), + Err(String::from("unexpected extra arguments after --invalid USER PASSWORD")) + ); + } + + #[test] + fn parse_mode_rejects_unknown_flags() { + assert_eq!( + parse_mode_from_args(vec![String::from("--bogus")]), + Err(String::from("unsupported argument '--bogus'")) + ); + } +} diff --git a/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase-iommu-check.rs b/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase-iommu-check.rs index 34656410..cfa95724 100644 --- a/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase-iommu-check.rs +++ b/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase-iommu-check.rs @@ -49,6 +49,9 @@ fn run() -> Result<(), String> { if !stdout.contains("discovery_source=") { return Err("iommu self-test did not report discovery source".to_string()); } + if !stdout.contains("dmar_present=") { + return Err("iommu self-test did not report DMAR presence state".to_string()); + } if !stdout.contains("units_initialized_now=") { return Err("iommu self-test did not report initialized unit count".to_string()); } diff --git a/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase-ps2-check.rs b/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase-ps2-check.rs index b6fb31ee..d77d5e48 100644 --- a/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase-ps2-check.rs +++ b/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase-ps2-check.rs @@ -1,6 +1,10 @@ +use std::fs::OpenOptions; +use std::os::unix::fs::OpenOptionsExt; use std::path::Path; use std::process::{self, Command}; +use syscall::O_NONBLOCK; + use redbear_hwutils::parse_args; const PROGRAM: &str = "redbear-phase-ps2-check"; @@ -8,7 +12,14 @@ const USAGE: &str = "Usage: redbear-phase-ps2-check\n\nRun the bounded PS/2 and serio proof check inside the guest."; fn require_path(path: &str) -> Result<(), String> { - if Path::new(path).exists() { + if Path::new(path).exists() + || OpenOptions::new() + .read(true) + .write(true) + .custom_flags(O_NONBLOCK as i32) + .open(path) + .is_ok() + { println!("present={path}"); Ok(()) } else { diff --git a/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase4-wayland-check.rs b/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase4-wayland-check.rs index ed402e0d..0bee0c4c 100644 --- a/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase4-wayland-check.rs +++ b/local/recipes/system/redbear-hwutils/source/src/bin/redbear-phase4-wayland-check.rs @@ -1,4 +1,4 @@ -use std::path::Path; +use std::{env, path::{Path, PathBuf}}; use std::process::{self, Command}; use redbear_hwutils::parse_args; @@ -51,6 +51,25 @@ fn require_wayland_smoke_marker() -> Result<(), String> { Err("qt6-wayland-smoke did not leave a success marker".to_string()) } +fn require_wayland_socket() -> Result<(), String> { + let runtime_dir = env::var("XDG_RUNTIME_DIR") + .ok() + .filter(|value| !value.is_empty()) + .unwrap_or_else(|| "/tmp/run/user/0".to_string()); + let display = env::var("WAYLAND_DISPLAY") + .ok() + .filter(|value| !value.is_empty()) + .unwrap_or_else(|| "wayland-0".to_string()); + let socket = PathBuf::from(runtime_dir).join(display); + + if socket.exists() { + println!("{}", socket.display()); + Ok(()) + } else { + Err(format!("missing Wayland socket {}", socket.display())) + } +} + fn run() -> Result<(), String> { parse_args(PROGRAM, USAGE, std::env::args()).map_err(|err| { if err.is_empty() { @@ -66,6 +85,7 @@ fn run() -> Result<(), String> { require_path("/usr/bin/qt6-plugin-check")?; require_path("/usr/bin/qt6-wayland-smoke")?; require_path("/home/root/.wayland-session.started")?; + require_wayland_socket()?; require_wayland_smoke_marker()?; let status = Command::new("redbear-info") diff --git a/local/recipes/system/redbear-session-launch/recipe.toml b/local/recipes/system/redbear-session-launch/recipe.toml new file mode 100644 index 00000000..b2692488 --- /dev/null +++ b/local/recipes/system/redbear-session-launch/recipe.toml @@ -0,0 +1,8 @@ +[source] +path = "source" + +[build] +template = "cargo" + +[package.files] +"/usr/bin/redbear-session-launch" = "redbear-session-launch" diff --git a/local/recipes/system/redbear-session-launch/source/Cargo.toml b/local/recipes/system/redbear-session-launch/source/Cargo.toml new file mode 100644 index 00000000..b561faf6 --- /dev/null +++ b/local/recipes/system/redbear-session-launch/source/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "redbear-session-launch" +version = "0.1.0" +edition = "2024" + +[[bin]] +name = "redbear-session-launch" +path = "src/main.rs" + +[dependencies] +libc = "0.2" diff --git a/local/recipes/system/redbear-session-launch/source/src/main.rs b/local/recipes/system/redbear-session-launch/source/src/main.rs new file mode 100644 index 00000000..a78bdeef --- /dev/null +++ b/local/recipes/system/redbear-session-launch/source/src/main.rs @@ -0,0 +1,536 @@ +use std::{ + collections::{BTreeMap, HashMap}, + env, + ffi::CString, + fs, + io, + os::unix::process::CommandExt, + path::{Path, PathBuf}, + process::{self, Command}, +}; + +#[derive(Clone, Debug, PartialEq, Eq)] +struct Account { + username: String, + uid: u32, + gid: u32, + home: String, + shell: String, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +struct GroupEntry { + gid: u32, + members: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +enum LaunchMode { + Session, + Command { program: String, args: Vec }, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +struct Args { + username: String, + vt: u32, + session: String, + runtime_dir: Option, + wayland_display: String, + mode: LaunchMode, +} + +fn usage() -> &'static str { + "Usage: redbear-session-launch --username USER [--mode session|command] [--session kde-wayland] [--vt N] [--runtime-dir PATH] [--wayland-display NAME] [--command PROGRAM [ARGS...]]" +} + +fn parse_args_from(args: I) -> Result +where + I: IntoIterator, +{ + let mut args = args.into_iter(); + let mut username = None; + let mut vt = 3_u32; + let mut session = String::from("kde-wayland"); + let mut runtime_dir = None; + let mut wayland_display = String::from("wayland-0"); + let mut mode = String::from("session"); + let mut command = None; + + while let Some(arg) = args.next() { + match arg.as_str() { + "--help" | "-h" => return Err(String::new()), + "--username" => username = Some(args.next().ok_or_else(|| String::from("missing value after --username"))?), + "--vt" => { + let value = args.next().ok_or_else(|| String::from("missing value after --vt"))?; + vt = value.parse().map_err(|_| format!("invalid VT '{value}'"))?; + } + "--session" => session = args.next().ok_or_else(|| String::from("missing value after --session"))?, + "--runtime-dir" => { + runtime_dir = Some(PathBuf::from( + args.next().ok_or_else(|| String::from("missing value after --runtime-dir"))?, + )); + } + "--wayland-display" => { + wayland_display = args + .next() + .ok_or_else(|| String::from("missing value after --wayland-display"))?; + } + "--mode" => mode = args.next().ok_or_else(|| String::from("missing value after --mode"))?, + "--command" => { + let program = args.next().ok_or_else(|| String::from("missing program after --command"))?; + let rest = args.collect::>(); + command = Some((program, rest)); + break; + } + other => return Err(format!("unrecognized argument '{other}'")), + } + } + + let username = username.ok_or_else(|| String::from("--username is required"))?; + let mode = match mode.as_str() { + "session" => LaunchMode::Session, + "command" => { + let (program, args) = command.ok_or_else(|| String::from("--command is required when --mode=command"))?; + LaunchMode::Command { program, args } + } + other => return Err(format!("unsupported launch mode '{other}'")), + }; + + Ok(Args { + username, + vt, + session, + runtime_dir, + wayland_display, + mode, + }) +} + +fn parse_args() -> Result { + parse_args_from(env::args().skip(1)) +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum AccountFormat { + Redox, + Unix, +} + +fn split_account_fields(line: &str) -> (AccountFormat, Vec<&str>) { + let format = if line.contains(';') { + AccountFormat::Redox + } else { + AccountFormat::Unix + }; + let delimiter = match format { + AccountFormat::Redox => ';', + AccountFormat::Unix => ':', + }; + (format, line.split(delimiter).collect::>()) +} + +fn parse_passwd(contents: &str) -> Result, String> { + let mut accounts = HashMap::new(); + + for (index, raw_line) in contents.lines().enumerate() { + let line = raw_line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + + let (format, parts) = split_account_fields(line); + let (uid_index, gid_index, home_index, shell_index) = match format { + AccountFormat::Redox if parts.len() >= 6 => (1, 2, 4, 5), + AccountFormat::Unix if parts.len() >= 7 => (2, 3, 5, 6), + AccountFormat::Redox => return Err(format!("invalid Redox passwd entry on line {}", index + 1)), + AccountFormat::Unix => return Err(format!("invalid passwd entry on line {}", index + 1)), + }; + + let uid = parts[uid_index] + .parse::() + .map_err(|_| format!("invalid uid on line {}", index + 1))?; + let gid = parts[gid_index] + .parse::() + .map_err(|_| format!("invalid gid on line {}", index + 1))?; + + accounts.insert( + parts[0].to_string(), + Account { + username: parts[0].to_string(), + uid, + gid, + home: parts[home_index].to_string(), + shell: parts[shell_index].to_string(), + }, + ); + } + + Ok(accounts) +} + +fn parse_groups(contents: &str) -> Result, String> { + let mut groups = Vec::new(); + + for (index, raw_line) in contents.lines().enumerate() { + let line = raw_line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + + let (_format, parts) = split_account_fields(line); + if parts.len() < 4 { + return Err(format!("invalid group entry on line {}", index + 1)); + } + + let gid = parts[2] + .parse::() + .map_err(|_| format!("invalid group gid on line {}", index + 1))?; + let members = if parts[3].is_empty() { + Vec::new() + } else { + parts[3].split(',').map(str::to_string).collect::>() + }; + + groups.push(GroupEntry { gid, members }); + } + + Ok(groups) +} + +fn load_account(username: &str) -> Result { + let passwd = fs::read_to_string("/etc/passwd").map_err(|err| format!("failed to read /etc/passwd: {err}"))?; + let accounts = parse_passwd(&passwd)?; + accounts + .get(username) + .cloned() + .ok_or_else(|| format!("unknown user '{username}'")) +} + +fn load_supplementary_groups(username: &str, primary_gid: u32) -> Result, String> { + let Ok(group_contents) = fs::read_to_string("/etc/group") else { + return Ok(vec![primary_gid]); + }; + + let mut groups = parse_groups(&group_contents)? + .into_iter() + .filter(|entry| entry.gid == primary_gid || entry.members.iter().any(|member| member == username)) + .map(|entry| entry.gid) + .collect::>(); + groups.sort_unstable(); + groups.dedup(); + if groups.is_empty() { + groups.push(primary_gid); + } + Ok(groups) +} + +fn default_runtime_dir(uid: u32) -> PathBuf { + if Path::new("/run/user").exists() { + PathBuf::from(format!("/run/user/{uid}")) + } else { + PathBuf::from(format!("/tmp/run/user/{uid}")) + } +} + +fn ensure_runtime_dir(path: &Path, uid: u32, gid: u32) -> Result<(), String> { + fs::create_dir_all(path).map_err(|err| format!("failed to create runtime dir {}: {err}", path.display()))?; + let c_path = CString::new(path.as_os_str().as_encoded_bytes()) + .map_err(|_| format!("runtime dir {} contains interior NUL", path.display()))?; + let result = unsafe { libc::chown(c_path.as_ptr(), uid, gid) }; + if result != 0 { + return Err(format!("failed to chown runtime dir {}: {}", path.display(), io::Error::last_os_error())); + } + fs::set_permissions(path, std::os::unix::fs::PermissionsExt::from_mode(0o700)) + .map_err(|err| format!("failed to set runtime dir permissions on {}: {err}", path.display())) +} + +fn env_value(keys: &[&str]) -> Option { + keys.iter().find_map(|key| env::var(key).ok()) +} + +fn build_environment(account: &Account, args: &Args, runtime_dir: &Path) -> BTreeMap { + let mut values = BTreeMap::new(); + values.insert(String::from("HOME"), account.home.clone()); + values.insert(String::from("USER"), account.username.clone()); + values.insert(String::from("LOGNAME"), account.username.clone()); + values.insert(String::from("SHELL"), account.shell.clone()); + values.insert(String::from("PATH"), String::from("/usr/bin:/bin")); + values.insert(String::from("XDG_RUNTIME_DIR"), runtime_dir.display().to_string()); + values.insert(String::from("WAYLAND_DISPLAY"), args.wayland_display.clone()); + values.insert(String::from("XDG_SEAT"), String::from("seat0")); + values.insert(String::from("XDG_VTNR"), args.vt.to_string()); + values.insert(String::from("LIBSEAT_BACKEND"), String::from("seatd")); + values.insert(String::from("SEATD_SOCK"), String::from("/run/seatd.sock")); + values.insert(String::from("DISPLAY"), String::new()); + values.insert(String::from("XDG_SESSION_TYPE"), String::from("wayland")); + + if let Some(theme) = env_value(&["XCURSOR_THEME"]) { + values.insert(String::from("XCURSOR_THEME"), theme); + } + if let Some(root) = env_value(&["XKB_CONFIG_ROOT"]) { + values.insert(String::from("XKB_CONFIG_ROOT"), root); + } + if let Some(path) = env_value(&["QT_PLUGIN_PATH"]) { + values.insert(String::from("QT_PLUGIN_PATH"), path); + } + if let Some(path) = env_value(&["QT_QPA_PLATFORM_PLUGIN_PATH"]) { + values.insert(String::from("QT_QPA_PLATFORM_PLUGIN_PATH"), path); + } + if let Some(path) = env_value(&["QML2_IMPORT_PATH"]) { + values.insert(String::from("QML2_IMPORT_PATH"), path); + } + + match args.mode { + LaunchMode::Session => { + values.insert(String::from("XDG_CURRENT_DESKTOP"), String::from("KDE")); + values.insert(String::from("KDE_FULL_SESSION"), String::from("true")); + } + LaunchMode::Command { .. } => {} + } + + values +} + +#[cfg(not(target_os = "redox"))] +fn apply_groups(groups: &[u32]) -> io::Result<()> { + let raw_groups = groups.iter().map(|gid| *gid as libc::gid_t).collect::>(); + let result = unsafe { libc::setgroups(raw_groups.len(), raw_groups.as_ptr()) }; + if result == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } +} + +#[cfg(target_os = "redox")] +fn apply_groups(_groups: &[u32]) -> io::Result<()> { + Ok(()) +} + +fn command_for(args: &Args) -> Result<(String, Vec), String> { + match &args.mode { + LaunchMode::Session => { + if args.session != "kde-wayland" { + return Err(format!("unsupported session '{}'", args.session)); + } + + if Path::new("/usr/bin/dbus-run-session").exists() { + Ok(( + String::from("/usr/bin/dbus-run-session"), + vec![String::from("--"), String::from("/usr/bin/redbear-kde-session")], + )) + } else { + Ok((String::from("/usr/bin/redbear-kde-session"), Vec::new())) + } + } + LaunchMode::Command { program, args } => Ok((program.clone(), args.clone())), + } +} + +fn run() -> Result<(), String> { + let args = match parse_args() { + Ok(parsed) => parsed, + Err(err) if err.is_empty() => { + println!("{}", usage()); + return Ok(()); + } + Err(err) => return Err(err), + }; + + let account = load_account(&args.username)?; + let groups = load_supplementary_groups(&account.username, account.gid)?; + let runtime_dir = args + .runtime_dir + .clone() + .unwrap_or_else(|| default_runtime_dir(account.uid)); + ensure_runtime_dir(&runtime_dir, account.uid, account.gid)?; + let envs = build_environment(&account, &args, &runtime_dir); + let (program, program_args) = command_for(&args)?; + + let group_clone = groups.clone(); + let mut command = Command::new(&program); + command.args(&program_args); + command.env_clear(); + command.envs(&envs); + command.uid(account.uid); + command.gid(account.gid); + unsafe { + command.pre_exec(move || apply_groups(&group_clone)); + } + + let error = command.exec(); + Err(format!("failed to exec {program}: {error}")) +} + +fn main() { + if let Err(err) = run() { + eprintln!("redbear-session-launch: {err}"); + eprintln!("{}", usage()); + process::exit(1); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_args_accepts_command_mode() { + let parsed = parse_args_from(vec![ + String::from("--username"), + String::from("greeter"), + String::from("--mode"), + String::from("command"), + String::from("--vt"), + String::from("7"), + String::from("--runtime-dir"), + String::from("/tmp/greeter"), + String::from("--wayland-display"), + String::from("wayland-7"), + String::from("--command"), + String::from("/usr/bin/redbear-greeter-ui"), + String::from("--fullscreen"), + ]) + .expect("command mode should parse"); + + assert_eq!(parsed.username, "greeter"); + assert_eq!(parsed.vt, 7); + assert_eq!(parsed.runtime_dir, Some(PathBuf::from("/tmp/greeter"))); + assert_eq!(parsed.wayland_display, "wayland-7"); + assert_eq!( + parsed.mode, + LaunchMode::Command { + program: String::from("/usr/bin/redbear-greeter-ui"), + args: vec![String::from("--fullscreen")], + } + ); + } + + #[test] + fn parse_args_requires_command_when_mode_is_command() { + assert_eq!( + parse_args_from(vec![ + String::from("--username"), + String::from("greeter"), + String::from("--mode"), + String::from("command"), + ]), + Err(String::from("--command is required when --mode=command")) + ); + } + + #[test] + fn parse_args_rejects_unknown_mode() { + assert_eq!( + parse_args_from(vec![ + String::from("--username"), + String::from("user"), + String::from("--mode"), + String::from("bogus"), + ]), + Err(String::from("unsupported launch mode 'bogus'")) + ); + } + + #[test] + fn parse_passwd_accepts_basic_entries() { + let accounts = parse_passwd("root:x:0:0:root:/root:/usr/bin/ion\nuser:x:1000:1000:User:/home/user:/usr/bin/ion\n") + .expect("passwd should parse"); + assert_eq!(accounts["root"].uid, 0); + assert_eq!(accounts["user"].home, "/home/user"); + } + + #[test] + fn parse_passwd_accepts_redox_style_layout() { + let accounts = parse_passwd("greeter;101;101;Greeter;/nonexistent;/usr/bin/ion\n") + .expect("redox passwd layout should parse"); + let greeter = accounts.get("greeter").expect("greeter entry should exist"); + assert_eq!(greeter.uid, 101); + assert_eq!(greeter.gid, 101); + assert_eq!(greeter.home, "/nonexistent"); + assert_eq!(greeter.shell, "/usr/bin/ion"); + } + + #[test] + fn parse_groups_collects_members() { + let groups = parse_groups("sudo:x:1:user,root\nusers:x:1000:user\n").expect("group should parse"); + assert_eq!(groups[0].gid, 1); + assert_eq!(groups[0].members, vec![String::from("user"), String::from("root")]); + } + + #[test] + fn parse_groups_accepts_redox_style_layout() { + let groups = parse_groups("greeter;x;101;greeter\n").expect("redox group should parse"); + assert_eq!(groups[0].gid, 101); + assert_eq!(groups[0].members, vec![String::from("greeter")]); + } + + #[test] + fn build_environment_sets_kde_session_values() { + let account = Account { + username: String::from("user"), + uid: 1000, + gid: 1000, + home: String::from("/home/user"), + shell: String::from("/usr/bin/ion"), + }; + let args = Args { + username: String::from("user"), + vt: 3, + session: String::from("kde-wayland"), + runtime_dir: None, + wayland_display: String::from("wayland-0"), + mode: LaunchMode::Session, + }; + + let envs = build_environment(&account, &args, Path::new("/run/user/1000")); + assert_eq!(envs["XDG_CURRENT_DESKTOP"], "KDE"); + assert_eq!(envs["KDE_FULL_SESSION"], "true"); + assert_eq!(envs["XDG_VTNR"], "3"); + } + + #[test] + fn build_environment_omits_kde_session_values_for_command_mode() { + let account = Account { + username: String::from("greeter"), + uid: 101, + gid: 101, + home: String::from("/nonexistent"), + shell: String::from("/usr/bin/ion"), + }; + let args = Args { + username: String::from("greeter"), + vt: 3, + session: String::from("kde-wayland"), + runtime_dir: None, + wayland_display: String::from("wayland-0"), + mode: LaunchMode::Command { + program: String::from("/usr/bin/redbear-greeter-ui"), + args: Vec::new(), + }, + }; + + let envs = build_environment(&account, &args, Path::new("/tmp/run/greeter")); + assert!(!envs.contains_key("XDG_CURRENT_DESKTOP")); + assert!(!envs.contains_key("KDE_FULL_SESSION")); + assert_eq!(envs["XDG_SESSION_TYPE"], "wayland"); + } + + #[test] + fn command_for_rejects_unknown_session_name() { + let args = Args { + username: String::from("user"), + vt: 3, + session: String::from("plasma-x11"), + runtime_dir: None, + wayland_display: String::from("wayland-0"), + mode: LaunchMode::Session, + }; + + assert_eq!( + command_for(&args), + Err(String::from("unsupported session 'plasma-x11'")) + ); + } +} diff --git a/local/recipes/system/redbear-sessiond/source/Cargo.toml b/local/recipes/system/redbear-sessiond/source/Cargo.toml index 949c9ecc..92f674c6 100644 --- a/local/recipes/system/redbear-sessiond/source/Cargo.toml +++ b/local/recipes/system/redbear-sessiond/source/Cargo.toml @@ -11,5 +11,6 @@ path = "src/main.rs" zbus = { version = "5", default-features = false, features = ["tokio"] } tokio = { version = "1", features = ["full"] } serde = { version = "1", features = ["derive"] } +serde_json = "1" libredox = "0.1" redox-syscall = { package = "redox_syscall", version = "0.7" } diff --git a/local/recipes/system/redbear-sessiond/source/src/control.rs b/local/recipes/system/redbear-sessiond/source/src/control.rs new file mode 100644 index 00000000..dd5fa332 --- /dev/null +++ b/local/recipes/system/redbear-sessiond/source/src/control.rs @@ -0,0 +1,176 @@ +use std::{ + fs, + io::{BufRead, BufReader}, + os::unix::{fs::PermissionsExt, net::UnixListener}, + path::Path, +}; + +use serde::Deserialize; + +use crate::runtime_state::SharedRuntime; + +pub const CONTROL_SOCKET_PATH: &str = "/run/redbear-sessiond-control.sock"; + +#[derive(Debug, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum ControlMessage { + SetSession { + username: String, + uid: u32, + vt: u32, + leader: u32, + state: String, + }, + ResetSession { + vt: u32, + }, +} + +fn apply_message(runtime: &SharedRuntime, message: ControlMessage) { + let Ok(mut runtime) = runtime.write() else { + eprintln!("redbear-sessiond: runtime state is poisoned"); + return; + }; + + match message { + ControlMessage::SetSession { + username, + uid, + vt, + leader, + state, + } => { + runtime.username = username; + runtime.uid = uid; + runtime.vt = vt; + runtime.leader = leader; + runtime.state = state; + runtime.active = true; + } + ControlMessage::ResetSession { vt } => { + runtime.username = String::from("root"); + runtime.uid = 0; + runtime.vt = vt; + runtime.leader = std::process::id(); + runtime.state = String::from("closing"); + runtime.active = true; + } + } +} + +pub fn start_control_socket(runtime: SharedRuntime) { + std::thread::spawn(move || { + if Path::new(CONTROL_SOCKET_PATH).exists() { + if let Err(err) = fs::remove_file(CONTROL_SOCKET_PATH) { + eprintln!("redbear-sessiond: failed to remove stale control socket: {err}"); + return; + } + } + + let listener = match UnixListener::bind(CONTROL_SOCKET_PATH) { + Ok(listener) => listener, + Err(err) => { + eprintln!("redbear-sessiond: failed to bind control socket: {err}"); + return; + } + }; + + if let Err(err) = fs::set_permissions(CONTROL_SOCKET_PATH, fs::Permissions::from_mode(0o600)) { + eprintln!("redbear-sessiond: failed to chmod control socket: {err}"); + } + + for stream in listener.incoming() { + let Ok(stream) = stream else { + continue; + }; + let mut reader = BufReader::new(stream); + let mut line = String::new(); + if reader.read_line(&mut line).is_err() { + continue; + } + match serde_json::from_str::(line.trim()) { + Ok(message) => apply_message(&runtime, message), + Err(err) => eprintln!("redbear-sessiond: invalid control message: {err}"), + } + } + }); +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::runtime_state::shared_runtime; + + #[test] + fn set_session_message_updates_runtime_state() { + let runtime = shared_runtime(); + + apply_message( + &runtime, + ControlMessage::SetSession { + username: String::from("user"), + uid: 1000, + vt: 7, + leader: 4242, + state: String::from("active"), + }, + ); + + let runtime = runtime.read().expect("runtime lock should remain healthy"); + assert_eq!(runtime.username, "user"); + assert_eq!(runtime.uid, 1000); + assert_eq!(runtime.vt, 7); + assert_eq!(runtime.leader, 4242); + assert_eq!(runtime.state, "active"); + assert!(runtime.active); + } + + #[test] + fn reset_session_message_restores_root_scaffold() { + let runtime = shared_runtime(); + + apply_message( + &runtime, + ControlMessage::SetSession { + username: String::from("user"), + uid: 1000, + vt: 7, + leader: 4242, + state: String::from("active"), + }, + ); + apply_message(&runtime, ControlMessage::ResetSession { vt: 3 }); + + let runtime = runtime.read().expect("runtime lock should remain healthy"); + assert_eq!(runtime.username, "root"); + assert_eq!(runtime.uid, 0); + assert_eq!(runtime.vt, 3); + assert_eq!(runtime.state, "closing"); + assert!(runtime.active); + } + + #[test] + fn control_message_json_matches_expected_shape() { + let message = serde_json::from_str::( + r#"{"type":"set_session","username":"user","uid":1000,"vt":3,"leader":99,"state":"online"}"#, + ) + .expect("control message json should parse"); + + match message { + ControlMessage::SetSession { + username, + uid, + vt, + leader, + state, + } => { + assert_eq!(username, "user"); + assert_eq!(uid, 1000); + assert_eq!(vt, 3); + assert_eq!(leader, 99); + assert_eq!(state, "online"); + } + ControlMessage::ResetSession { .. } => panic!("expected set_session message"), + } + } +} diff --git a/local/recipes/system/redbear-sessiond/source/src/device_map.rs b/local/recipes/system/redbear-sessiond/source/src/device_map.rs index 01a8019c..7938046d 100644 --- a/local/recipes/system/redbear-sessiond/source/src/device_map.rs +++ b/local/recipes/system/redbear-sessiond/source/src/device_map.rs @@ -1,4 +1,12 @@ -use std::{collections::HashMap, fs::File, io}; +use std::{ + collections::HashMap, + fs::{self, File, OpenOptions}, + io, + path::{Path, PathBuf}, +}; + +#[cfg(unix)] +use std::os::unix::fs::MetadataExt; #[derive(Clone, Debug)] pub struct DeviceMap { @@ -28,13 +36,11 @@ impl DeviceMap { return Some(path.clone()); } - match (major, minor) { - (13, minor) if minor >= 68 => Some(format!("/dev/input/event{}", minor - 64)), - _ => None, - } + self.find_dynamic_path(major, minor) + .or_else(|| self.fallback_path(major, minor)) } - pub fn open_device(&self, major: u32, minor: u32) -> io::Result { + pub fn open_device(&self, major: u32, minor: u32) -> io::Result<(String, File)> { let Some(path) = self.resolve(major, minor) else { return Err(io::Error::new( io::ErrorKind::NotFound, @@ -42,6 +48,118 @@ impl DeviceMap { )); }; - File::open(path) + let file = OpenOptions::new() + .read(true) + .write(true) + .open(&path) + .or_else(|_| OpenOptions::new().read(true).open(&path)) + .or_else(|_| OpenOptions::new().write(true).open(&path))?; + + Ok((path, file)) + } + + fn fallback_path(&self, major: u32, minor: u32) -> Option { + match (major, minor) { + (13, minor) if minor >= 64 => { + let path = format!("/dev/input/event{}", minor - 64); + Path::new(&path).exists().then_some(path) + } + (226, minor) => { + let path = format!("/scheme/drm/card{minor}"); + Path::new(&path).exists().then_some(path) + } + _ => None, + } + } + + fn find_dynamic_path(&self, major: u32, minor: u32) -> Option { + for path in candidate_paths() { + if path_matches_device(&path, major, minor) { + return Some(path.to_string_lossy().into_owned()); + } + } + + None + } +} + +fn candidate_paths() -> Vec { + let mut paths = Vec::new(); + + paths.extend(read_dir_paths("/dev/input", |name| name.starts_with("event"))); + paths.extend(read_dir_paths("/scheme/drm", |name| name.starts_with("card"))); + + for direct in ["/dev/fb0", "/scheme/null", "/scheme/zero", "/scheme/rand"] { + let path = PathBuf::from(direct); + if path.exists() { + paths.push(path); + } + } + + paths +} + +fn read_dir_paths(dir: &str, include: impl Fn(&str) -> bool) -> Vec { + let mut paths = Vec::new(); + let Ok(entries) = fs::read_dir(dir) else { + return paths; + }; + + for entry in entries.flatten() { + let path = entry.path(); + let Some(name) = path.file_name().and_then(|name| name.to_str()) else { + continue; + }; + if include(name) { + paths.push(path); + } + } + + paths.sort(); + paths +} + +#[cfg(unix)] +fn path_matches_device(path: &Path, major: u32, minor: u32) -> bool { + let Ok(metadata) = fs::metadata(path) else { + return false; + }; + let rdev = metadata.rdev(); + dev_major(rdev) == major && dev_minor(rdev) == minor +} + +#[cfg(not(unix))] +fn path_matches_device(_path: &Path, _major: u32, _minor: u32) -> bool { + false +} + +fn dev_major(device: u64) -> u32 { + (((device >> 31 >> 1) & 0xfffff000) | ((device >> 8) & 0x00000fff)) as u32 +} + +fn dev_minor(device: u64) -> u32 { + (((device >> 12) & 0xffffff00) | (device & 0x000000ff)) as u32 +} + +#[cfg(test)] +mod tests { + use super::{dev_major, dev_minor}; + + fn make_dev(major: u64, minor: u64) -> u64 { + ((major & 0xfffff000) << 32) + | ((major & 0x00000fff) << 8) + | ((minor & 0xffffff00) << 12) + | (minor & 0x000000ff) + } + + #[test] + fn splits_compound_dev_numbers() { + let device = make_dev(226, 3); + assert_eq!(dev_major(device), 226); + assert_eq!(dev_minor(device), 3); + + let event = make_dev(13, 67); + assert_eq!(dev_major(event), 13); + assert_eq!(dev_minor(event), 67); } } diff --git a/local/recipes/system/redbear-sessiond/source/src/main.rs b/local/recipes/system/redbear-sessiond/source/src/main.rs index 6c36dd75..4c5ab617 100644 --- a/local/recipes/system/redbear-sessiond/source/src/main.rs +++ b/local/recipes/system/redbear-sessiond/source/src/main.rs @@ -1,6 +1,8 @@ mod acpi_watcher; +mod control; mod device_map; mod manager; +mod runtime_state; mod seat; mod session; @@ -15,6 +17,7 @@ use device_map::DeviceMap; use manager::LoginManager; use seat::LoginSeat; use session::LoginSession; +use runtime_state::shared_runtime; use tokio::runtime::Builder as RuntimeBuilder; use zbus::{ Address, @@ -26,7 +29,7 @@ const BUS_NAME: &str = "org.freedesktop.login1"; const MANAGER_PATH: &str = "/org/freedesktop/login1"; const SESSION_PATH: &str = "/org/freedesktop/login1/session/c1"; const SEAT_PATH: &str = "/org/freedesktop/login1/seat/seat0"; -const USER_PATH: &str = "/org/freedesktop/login1/user/0"; +const USER_PATH: &str = "/org/freedesktop/login1/user/current"; enum Command { Run, @@ -113,10 +116,11 @@ async fn run_daemon() -> Result<(), Box> { let session_path = parse_object_path(SESSION_PATH)?; let seat_path = parse_object_path(SEAT_PATH)?; let user_path = parse_object_path(USER_PATH)?; + let runtime = shared_runtime(); - let session = LoginSession::new(seat_path.clone(), user_path, DeviceMap::new()); - let seat = LoginSeat::new(session_path.clone()); - let manager = LoginManager::new(session_path, seat_path); + let session = LoginSession::new(seat_path.clone(), user_path, DeviceMap::new(), runtime.clone()); + let seat = LoginSeat::new(session_path.clone(), runtime.clone()); + let manager = LoginManager::new(session_path, seat_path, runtime.clone()); match system_connection_builder()? .name(BUS_NAME)? @@ -128,6 +132,7 @@ async fn run_daemon() -> Result<(), Box> { { Ok(connection) => { eprintln!("redbear-sessiond: registered {BUS_NAME} on the system bus"); + control::start_control_socket(runtime.clone()); tokio::spawn(acpi_watcher::watch_and_emit(connection.clone())); wait_for_shutdown().await?; drop(connection); diff --git a/local/recipes/system/redbear-sessiond/source/src/manager.rs b/local/recipes/system/redbear-sessiond/source/src/manager.rs index a00c1a7a..28ceed66 100644 --- a/local/recipes/system/redbear-sessiond/source/src/manager.rs +++ b/local/recipes/system/redbear-sessiond/source/src/manager.rs @@ -5,20 +5,20 @@ use zbus::{ zvariant::OwnedObjectPath, }; +use crate::runtime_state::SharedRuntime; + #[derive(Clone, Debug)] pub struct LoginManager { - session_id: String, + runtime: SharedRuntime, session_path: OwnedObjectPath, - seat_id: String, seat_path: OwnedObjectPath, } impl LoginManager { - pub fn new(session_path: OwnedObjectPath, seat_path: OwnedObjectPath) -> Self { + pub fn new(session_path: OwnedObjectPath, seat_path: OwnedObjectPath, runtime: SharedRuntime) -> Self { Self { - session_id: String::from("c1"), + runtime, session_path, - seat_id: String::from("seat0"), seat_path, } } @@ -27,7 +27,11 @@ impl LoginManager { #[interface(name = "org.freedesktop.login1.Manager")] impl LoginManager { fn get_session(&self, id: &str) -> fdo::Result { - if id == self.session_id { + let runtime = self + .runtime + .read() + .map_err(|_| fdo::Error::Failed(String::from("login1 runtime state is poisoned")))?; + if id == runtime.session_id { return Ok(self.session_path.clone()); } @@ -35,17 +39,25 @@ impl LoginManager { } fn list_sessions(&self) -> fdo::Result> { + let runtime = self + .runtime + .read() + .map_err(|_| fdo::Error::Failed(String::from("login1 runtime state is poisoned")))?; Ok(vec![( - self.session_id.clone(), - 0, - String::from("root"), - self.seat_id.clone(), + runtime.session_id.clone(), + runtime.uid, + runtime.username.clone(), + runtime.seat_id.clone(), self.session_path.clone(), )]) } fn get_seat(&self, id: &str) -> fdo::Result { - if id == self.seat_id { + let runtime = self + .runtime + .read() + .map_err(|_| fdo::Error::Failed(String::from("login1 runtime state is poisoned")))?; + if id == runtime.seat_id { return Ok(self.seat_path.clone()); } diff --git a/local/recipes/system/redbear-sessiond/source/src/runtime_state.rs b/local/recipes/system/redbear-sessiond/source/src/runtime_state.rs new file mode 100644 index 00000000..3d49b09c --- /dev/null +++ b/local/recipes/system/redbear-sessiond/source/src/runtime_state.rs @@ -0,0 +1,34 @@ +use std::sync::{Arc, RwLock}; + +#[derive(Clone, Debug)] +pub struct SessionRuntime { + pub session_id: String, + pub seat_id: String, + pub username: String, + pub uid: u32, + pub vt: u32, + pub leader: u32, + pub state: String, + pub active: bool, +} + +impl Default for SessionRuntime { + fn default() -> Self { + Self { + session_id: String::from("c1"), + seat_id: String::from("seat0"), + username: String::from("root"), + uid: 0, + vt: 3, + leader: std::process::id(), + state: String::from("online"), + active: true, + } + } +} + +pub type SharedRuntime = Arc>; + +pub fn shared_runtime() -> SharedRuntime { + Arc::new(RwLock::new(SessionRuntime::default())) +} diff --git a/local/recipes/system/redbear-sessiond/source/src/seat.rs b/local/recipes/system/redbear-sessiond/source/src/seat.rs index b9e4e68a..8f9b67a9 100644 --- a/local/recipes/system/redbear-sessiond/source/src/seat.rs +++ b/local/recipes/system/redbear-sessiond/source/src/seat.rs @@ -2,20 +2,22 @@ use std::sync::Mutex; use zbus::{fdo, interface, zvariant::OwnedObjectPath}; +use crate::runtime_state::SharedRuntime; + #[derive(Debug)] pub struct LoginSeat { id: String, - session_id: String, session_path: OwnedObjectPath, + runtime: SharedRuntime, last_requested_vt: Mutex, } impl LoginSeat { - pub fn new(session_path: OwnedObjectPath) -> Self { + pub fn new(session_path: OwnedObjectPath, runtime: SharedRuntime) -> Self { Self { id: String::from("seat0"), - session_id: String::from("c1"), session_path, + runtime, last_requested_vt: Mutex::new(1), } } @@ -46,12 +48,24 @@ impl LoginSeat { #[zbus(property(emits_changed_signal = "const"), name = "ActiveSession")] fn active_session(&self) -> (String, OwnedObjectPath) { - (self.session_id.clone(), self.session_path.clone()) + ( + self.runtime + .read() + .map(|runtime| runtime.session_id.clone()) + .unwrap_or_else(|_| String::from("c1")), + self.session_path.clone(), + ) } #[zbus(property(emits_changed_signal = "const"), name = "Sessions")] fn sessions(&self) -> Vec<(String, OwnedObjectPath)> { - vec![(self.session_id.clone(), self.session_path.clone())] + vec![( + self.runtime + .read() + .map(|runtime| runtime.session_id.clone()) + .unwrap_or_else(|_| String::from("c1")), + self.session_path.clone(), + )] } #[zbus(property(emits_changed_signal = "const"), name = "CanGraphical")] diff --git a/local/recipes/system/redbear-sessiond/source/src/session.rs b/local/recipes/system/redbear-sessiond/source/src/session.rs index a1598d1d..a56a1e6a 100644 --- a/local/recipes/system/redbear-sessiond/source/src/session.rs +++ b/local/recipes/system/redbear-sessiond/source/src/session.rs @@ -13,16 +13,14 @@ use zbus::{ }; use crate::device_map::DeviceMap; +use crate::runtime_state::SharedRuntime; #[derive(Debug)] pub struct LoginSession { - id: String, - seat_id: String, seat_path: OwnedObjectPath, - user_uid: u32, user_path: OwnedObjectPath, - leader: u32, device_map: DeviceMap, + runtime: SharedRuntime, controlled: Mutex, taken_devices: Mutex>, } @@ -32,15 +30,13 @@ impl LoginSession { seat_path: OwnedObjectPath, user_path: OwnedObjectPath, device_map: DeviceMap, + runtime: SharedRuntime, ) -> Self { Self { - id: String::from("c1"), - seat_id: String::from("seat0"), seat_path, - user_uid: 0, user_path, - leader: process::id(), device_map, + runtime, controlled: Mutex::new(false), taken_devices: Mutex::new(HashSet::new()), } @@ -57,21 +53,35 @@ impl LoginSession { .lock() .map_err(|_| fdo::Error::Failed(String::from("login1 device state is poisoned"))) } + + fn runtime(&self) -> fdo::Result { + self.runtime + .read() + .map(|runtime| runtime.clone()) + .map_err(|_| fdo::Error::Failed(String::from("login1 runtime state is poisoned"))) + } } #[interface(name = "org.freedesktop.login1.Session")] impl LoginSession { fn activate(&self) -> fdo::Result<()> { - eprintln!("redbear-sessiond: Activate requested for session {}", self.id); + eprintln!("redbear-sessiond: Activate requested for session {}", self.runtime()?.session_id); Ok(()) } fn take_control(&self, force: bool) -> fdo::Result<()> { let mut controlled = self.control_state()?; + let runtime = self.runtime()?; + if *controlled && !force { + return Err(fdo::Error::Failed(format!( + "session {} is already under control", + runtime.session_id + ))); + } *controlled = true; eprintln!( "redbear-sessiond: TakeControl requested for session {} (force={force})", - self.id + runtime.session_id ); Ok(()) } @@ -79,34 +89,56 @@ impl LoginSession { fn release_control(&self) -> fdo::Result<()> { let mut controlled = self.control_state()?; *controlled = false; - eprintln!("redbear-sessiond: ReleaseControl requested for session {}", self.id); + self.taken_devices()?.clear(); + eprintln!("redbear-sessiond: ReleaseControl requested for session {}", self.runtime()?.session_id); Ok(()) } fn take_device(&self, major: u32, minor: u32) -> fdo::Result { - let file = self + let runtime = self.runtime()?; + if !*self.control_state()? { + return Err(fdo::Error::AccessDenied(format!( + "session {} must TakeControl before TakeDevice", + runtime.session_id + ))); + } + + let mut taken_devices = self.taken_devices()?; + if taken_devices.contains(&(major, minor)) { + return Err(fdo::Error::Failed(format!( + "device ({major}, {minor}) is already taken for session {}", + runtime.session_id + ))); + } + + let (path, file) = self .device_map .open_device(major, minor) .map_err(|err| fdo::Error::Failed(format!("TakeDevice({major}, {minor}) failed: {err}")))?; - let mut taken_devices = self.taken_devices()?; taken_devices.insert((major, minor)); let owned_fd: StdOwnedFd = file.into(); eprintln!( - "redbear-sessiond: TakeDevice granted for session {} -> ({major}, {minor})", - self.id + "redbear-sessiond: TakeDevice granted for session {} -> ({major}, {minor}) at {}", + runtime.session_id, path ); Ok(OwnedFd::from(owned_fd)) } fn release_device(&self, major: u32, minor: u32) -> fdo::Result<()> { + let runtime = self.runtime()?; let mut taken_devices = self.taken_devices()?; - taken_devices.remove(&(major, minor)); + if !taken_devices.remove(&(major, minor)) { + return Err(fdo::Error::Failed(format!( + "device ({major}, {minor}) was not taken for session {}", + runtime.session_id + ))); + } eprintln!( "redbear-sessiond: ReleaseDevice requested for session {} -> ({major}, {minor})", - self.id + runtime.session_id ); Ok(()) } @@ -114,14 +146,14 @@ impl LoginSession { fn pause_device_complete(&self, major: u32, minor: u32) -> fdo::Result<()> { eprintln!( "redbear-sessiond: PauseDeviceComplete received for session {} -> ({major}, {minor})", - self.id + self.runtime()?.session_id ); Ok(()) } #[zbus(property(emits_changed_signal = "const"), name = "Active")] fn active(&self) -> bool { - true + self.runtime().map(|runtime| runtime.active).unwrap_or(true) } #[zbus(property(emits_changed_signal = "const"), name = "Remote")] @@ -156,32 +188,40 @@ impl LoginSession { #[zbus(property(emits_changed_signal = "const"), name = "Id")] fn id(&self) -> String { - self.id.clone() + self.runtime().map(|runtime| runtime.session_id).unwrap_or_else(|_| String::from("c1")) } #[zbus(property(emits_changed_signal = "const"), name = "State")] fn state(&self) -> String { - String::from("online") + self.runtime().map(|runtime| runtime.state).unwrap_or_else(|_| String::from("online")) } #[zbus(property(emits_changed_signal = "const"), name = "Seat")] fn seat(&self) -> (String, OwnedObjectPath) { - (self.seat_id.clone(), self.seat_path.clone()) + ( + self.runtime() + .map(|runtime| runtime.seat_id) + .unwrap_or_else(|_| String::from("seat0")), + self.seat_path.clone(), + ) } #[zbus(property(emits_changed_signal = "const"), name = "User")] fn user(&self) -> (u32, OwnedObjectPath) { - (self.user_uid, self.user_path.clone()) + ( + self.runtime().map(|runtime| runtime.uid).unwrap_or(0), + self.user_path.clone(), + ) } #[zbus(property(emits_changed_signal = "const"), name = "VTNr")] fn vt_nr(&self) -> u32 { - 1 + self.runtime().map(|runtime| runtime.vt).unwrap_or(3) } #[zbus(property(emits_changed_signal = "const"), name = "Leader")] fn leader(&self) -> u32 { - self.leader + self.runtime().map(|runtime| runtime.leader).unwrap_or(process::id()) } #[zbus(property(emits_changed_signal = "const"), name = "Audit")] @@ -191,7 +231,7 @@ impl LoginSession { #[zbus(property(emits_changed_signal = "const"), name = "TTY")] fn tty(&self) -> String { - String::new() + format!("tty{}", self.runtime().map(|runtime| runtime.vt).unwrap_or(3)) } #[zbus(property(emits_changed_signal = "const"), name = "RemoteUser")] diff --git a/local/recipes/system/redbear-wifictl/source/Cargo.toml b/local/recipes/system/redbear-wifictl/source/Cargo.toml index e89aa52a..9821154a 100644 --- a/local/recipes/system/redbear-wifictl/source/Cargo.toml +++ b/local/recipes/system/redbear-wifictl/source/Cargo.toml @@ -13,6 +13,7 @@ libredox = { version = "0.1", features = ["call", "std"] } log = { version = "0.4", features = ["std"] } redox-scheme = "0.11" syscall = { package = "redox_syscall", version = "0.7", features = ["std"] } +redox-driver-sys = { path = "../../../drivers/redox-driver-sys/source" } [target.'cfg(target_os = "redox")'.dependencies] redox-driver-sys = { path = "../../../drivers/redox-driver-sys/source", features = ["redox"] } diff --git a/local/recipes/system/redbear-wifictl/source/src/backend.rs b/local/recipes/system/redbear-wifictl/source/src/backend.rs index 3929bba7..b6868f06 100644 --- a/local/recipes/system/redbear-wifictl/source/src/backend.rs +++ b/local/recipes/system/redbear-wifictl/source/src/backend.rs @@ -10,10 +10,9 @@ use std::process::Command; pub(crate) static TEST_ENV_LOCK: std::sync::LazyLock> = std::sync::LazyLock::new(|| std::sync::Mutex::new(())); +use redox_driver_sys::pci::{parse_device_info_from_config_space, PciLocation}; #[cfg(target_os = "redox")] use redox_driver_sys::pci::PciDevice; -#[cfg(target_os = "redox")] -use redox_driver_sys::pci::PciLocation; #[derive(Clone, Debug)] struct ParsedPciLocation { @@ -862,7 +861,7 @@ fn detect_intel_wifi_interfaces( device_id, subsystem_id, firmware_family, - transport_status: transport_status_from_config(&config), + transport_status: transport_status_from_config(&location, &config), ucode_candidates, selected_ucode, pnvm_candidate, @@ -945,7 +944,7 @@ fn intel_firmware_candidates( ) } -fn transport_status_from_config(config: &[u8]) -> String { +fn transport_status_from_config(location: &ParsedPciLocation, config: &[u8]) -> String { let command = u16::from_le_bytes([config[0x04], config[0x05]]); let bar0 = u32::from_le_bytes([config[0x10], config[0x11], config[0x12], config[0x13]]); let irq_pin = config[0x3D]; @@ -954,13 +953,38 @@ fn transport_status_from_config(config: &[u8]) -> String { let bus_master = (command & 0x4) != 0; let bar_present = bar0 != 0; let irq_present = irq_pin != 0; + let interrupt_support = parse_device_info_from_config_space( + PciLocation { + segment: location.segment, + bus: location.bus, + device: location.device, + function: location.function, + }, + config, + ) + .map(|info| { + let support = info.interrupt_support(); + if support.as_str() == "none" && irq_present { + "legacy".to_string() + } else { + support.as_str().to_string() + } + }) + .unwrap_or_else(|| { + if irq_present { + "legacy".to_string() + } else { + "none".to_string() + } + }); format!( - "transport=pci memory_enabled={} bus_master={} bar0_present={} irq_pin_present={}", + "transport=pci memory_enabled={} bus_master={} bar0_present={} irq_pin_present={} interrupt_support={}", if memory_enabled { "yes" } else { "no" }, if bus_master { "yes" } else { "no" }, if bar_present { "yes" } else { "no" }, - if irq_present { "yes" } else { "no" } + if irq_present { "yes" } else { "no" }, + interrupt_support ) } @@ -1174,7 +1198,8 @@ fn read_transport_status(config_path: &PathBuf) -> Result { config_path.display() )); } - Ok(transport_status_from_config(&config)) + let location = parse_location_from_config_path(config_path)?; + Ok(transport_status_from_config(&location, &config)) } fn program_transport_bits(config_path: &PathBuf) -> Result<(), String> { @@ -1286,6 +1311,38 @@ mod tests { .transport_status("wlan0") .contains("memory_enabled=yes")); assert!(backend.transport_status("wlan0").contains("bus_master=yes")); + assert!(backend + .transport_status("wlan0") + .contains("interrupt_support=legacy")); + } + + #[test] + fn transport_status_reports_interrupt_support_from_shared_pci_parser() { + let location = ParsedPciLocation { + segment: 0, + bus: 0, + device: 0x14, + function: 3, + }; + let mut cfg = vec![0u8; 256]; + cfg[0x00] = 0x86; + cfg[0x01] = 0x80; + cfg[0x02] = 0x25; + cfg[0x03] = 0x27; + cfg[0x04] = 0x06; + cfg[0x06] = 0x10; + cfg[0x0A] = 0x80; + cfg[0x0B] = 0x02; + cfg[0x0E] = 0x00; + cfg[0x34] = 0x50; + cfg[0x3C] = 11; + cfg[0x50] = 0x05; + cfg[0x51] = 0x00; + + let status = transport_status_from_config(&location, &cfg); + assert!(status.contains("memory_enabled=yes")); + assert!(status.contains("bus_master=yes")); + assert!(status.contains("interrupt_support=msi")); } #[test] diff --git a/local/recipes/wayland/qt6-wayland-smoke/source/plugincheck.cpp b/local/recipes/wayland/qt6-wayland-smoke/source/plugincheck.cpp index 37c5cb9e..33a670d3 100644 --- a/local/recipes/wayland/qt6-wayland-smoke/source/plugincheck.cpp +++ b/local/recipes/wayland/qt6-wayland-smoke/source/plugincheck.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -26,6 +27,20 @@ int main(int argc, char **argv) { ? QString::fromLocal8Bit(argv[1]) : QStringLiteral("/usr/plugins/platforms/libqminimal.so"); + QFile rawFile(plugin); + if (rawFile.open(QIODevice::ReadOnly)) { + const QByteArray header = rawFile.read(64); + qInfo() << "qt6-plugin-check raw-header" << header.toHex(' '); + if (header.size() >= 56) { + const quint8 low = static_cast(header[54]); + const quint8 high = static_cast(header[55]); + const quint16 phentsize = quint16(low) | (quint16(high) << 8); + qInfo() << "qt6-plugin-check raw-e_phentsize" << phentsize; + } + } else { + qWarning() << "qt6-plugin-check failed to open raw file" << rawFile.errorString(); + } + QPluginLoader loader(plugin); mark("before-metadata"); std::fprintf(stderr, "qt6-plugin-check before metadata\n"); diff --git a/local/scripts/apply-patches.sh b/local/scripts/apply-patches.sh index ac7a35f3..c125bbcc 100755 --- a/local/scripts/apply-patches.sh +++ b/local/scripts/apply-patches.sh @@ -119,6 +119,9 @@ symlink "../../local/recipes/system/redbear-nmap" "recipes/system/redbear-nm symlink "../../local/recipes/system/redbear-meta" "recipes/system/redbear-meta" symlink "../../local/recipes/system/udev-shim" "recipes/system/udev-shim" symlink "../../local/recipes/system/redbear-sessiond" "recipes/system/redbear-sessiond" +symlink "../../local/recipes/system/redbear-authd" "recipes/system/redbear-authd" +symlink "../../local/recipes/system/redbear-session-launch" "recipes/system/redbear-session-launch" +symlink "../../local/recipes/system/redbear-greeter" "recipes/system/redbear-greeter" symlink "../../local/recipes/system/redbear-dbus-services" "recipes/system/redbear-dbus-services" symlink "../../local/recipes/system/redbear-notifications" "recipes/system/redbear-notifications" symlink "../../local/recipes/system/redbear-upower" "recipes/system/redbear-upower" diff --git a/local/scripts/test-dbus-qemu.sh b/local/scripts/test-dbus-qemu.sh index 1cc4c27d..9044a094 100755 --- a/local/scripts/test-dbus-qemu.sh +++ b/local/scripts/test-dbus-qemu.sh @@ -6,7 +6,7 @@ # # Options: # --check Run non-interactively, exit 0 on pass, 1 on fail -# --config CONFIG Build config to test (default: redbear-kde) +# --config CONFIG Build config to test (default: redbear-full) # # --check mode boots the image, waits for the login prompt, then sends D-Bus # validation commands via the serial console. Output is captured and parsed. @@ -28,7 +28,7 @@ set -euo pipefail CHECK_MODE=0 -CONFIG_NAME="redbear-kde" +CONFIG_NAME="redbear-full" while [[ $# -gt 0 ]]; do case "$1" in diff --git a/local/scripts/test-greeter-qemu.sh b/local/scripts/test-greeter-qemu.sh new file mode 100755 index 00000000..b2b94929 --- /dev/null +++ b/local/scripts/test-greeter-qemu.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +# test-greeter-qemu.sh — bounded QEMU proof for the Red Bear greeter/auth surface. + +set -euo pipefail + +usage() { + cat <<'USAGE' +Usage: test-greeter-qemu.sh [--check] + +Boot redbear-full in QEMU, log in on the fallback console, and verify the greeter daemon/socket +surface, invalid-login handling, and a bounded successful-login return-to-greeter proof. +USAGE +} + +check_mode=0 +for arg in "$@"; do + case "$arg" in + --help|-h|help) + usage + exit 0 + ;; + --check) + check_mode=1 + ;; + *) + echo "ERROR: unsupported argument $arg" >&2 + usage >&2 + exit 1 + ;; + esac +done + +firmware="" +for candidate in \ + /usr/share/ovmf/x64/OVMF.4m.fd \ + /usr/share/OVMF/x64/OVMF.4m.fd \ + /usr/share/ovmf/OVMF.fd \ + /usr/share/OVMF/OVMF_CODE.fd \ + /usr/share/qemu/edk2-x86_64-code.fd +do + if [[ -f "$candidate" ]]; then + firmware="$candidate" + break + fi +done + +if [[ -z "$firmware" ]]; then + echo "ERROR: no usable x86_64 UEFI firmware found" >&2 + exit 1 +fi + +arch="${ARCH:-$(uname -m)}" +image="build/$arch/redbear-full/harddrive.img" + +if [[ ! -f "$image" ]]; then + echo "ERROR: missing image $image" >&2 + echo "Build it first with: ./local/scripts/build-redbear.sh redbear-full" >&2 + exit 1 +fi + +if [[ "$check_mode" -eq 0 ]]; then + exec qemu-system-x86_64 \ + -name "Red Bear Greeter Validation" \ + -device qemu-xhci \ + -smp 4 \ + -m 2048 \ + -bios "$firmware" \ + -chardev stdio,id=debug,signal=off,mux=on \ + -serial chardev:debug \ + -mon chardev=debug \ + -machine q35 \ + -device ich9-intel-hda -device hda-output \ + -device virtio-net,netdev=net0 \ + -netdev user,id=net0 \ + -vga none \ + -device virtio-gpu \ + -drive file="$image",format=raw,if=none,id=drv0 \ + -device nvme,drive=drv0,serial=NVME_SERIAL \ + -enable-kvm -cpu host +fi + +expect </dev/null || true echo "IOMMU first-use validation path completed via guest runtime check" exit 0 fi @@ -133,9 +134,9 @@ exec qemu-system-x86_64 \ -netdev user,id=net0 \ -object filter-dump,id=f1,netdev=net0,file="build/$arch/$config/network.pcap" \ -nographic -vga none \ - -drive file="$image",format=raw,if=none,id=drv0 \ + -drive file="$image",format=raw,if=none,id=drv0,snapshot=on \ -device nvme,drive=drv0,serial=NVME_SERIAL \ - -drive file="$extra",format=raw,if=none,id=drv1 \ + -drive file="$extra",format=raw,if=none,id=drv1,snapshot=on \ -device nvme,drive=drv1,serial=NVME_EXTRA \ -enable-kvm -cpu host \ $extra_qemu_args diff --git a/local/scripts/test-phase1-desktop-substrate.sh b/local/scripts/test-phase1-desktop-substrate.sh index 2b086ed9..7fb19ec0 100755 --- a/local/scripts/test-phase1-desktop-substrate.sh +++ b/local/scripts/test-phase1-desktop-substrate.sh @@ -47,7 +47,7 @@ run_guest_checks() { require_command() { local cmd="$1" local message="$2" - if command -v "$cmd" >/dev/null 2>&1; then + if which "$cmd" >/dev/null 2>&1; then echo " PASS $message" else echo " FAIL $message" @@ -96,7 +96,7 @@ run_guest_checks() { echo "--- DRM/KMS ---" local drm_found=false - if [ -e /usr/bin/redox-drm ] || command -v redox-drm >/dev/null 2>&1; then + if [ -e /usr/bin/redox-drm ] || which redox-drm >/dev/null 2>&1; then drm_found=true fi if $drm_found; then @@ -111,13 +111,13 @@ run_guest_checks() { echo " FAIL /scheme/drm does not exist" failures=$((failures + 1)) fi - if command -v redbear-drm-display-check >/dev/null 2>&1; then + if which redbear-drm-display-check >/dev/null 2>&1; then echo " NOTE redbear-drm-display-check available (run manually for bounded display validation)" fi echo echo "--- health check summary ---" - if command -v redbear-info >/dev/null 2>&1; then + if which redbear-info >/dev/null 2>&1; then local report report="$(redbear-info --json 2>/dev/null || true)" if [ -n "$report" ]; then @@ -224,7 +224,7 @@ expect { "__WAYLAND_LIB_OK__" { } "__WAYLAND_LIB_FAIL__" { puts "FAIL: libwayland-client missing"; exit 1 } } -send "command -v evdevd && echo __EVDVD_OK__ || echo __EVDVD_FAIL__\r" + send "which evdevd >/scheme/null && echo __EVDVD_OK__ || echo __EVDVD_FAIL__\r" expect { "__EVDVD_OK__" { } "__EVDVD_FAIL__" { puts "FAIL: evdevd missing"; exit 1 } @@ -234,7 +234,7 @@ expect { "__EVDEV_SCH_OK__" { } "__EVDEV_SCH_FAIL__" { puts "FAIL: /scheme/evdev missing"; exit 1 } } -send "command -v udev-shim && echo __UDEV_OK__ || echo __UDEV_FAIL__\r" + send "which udev-shim >/scheme/null && echo __UDEV_OK__ || echo __UDEV_FAIL__\r" expect { "__UDEV_OK__" { } "__UDEV_FAIL__" { puts "FAIL: udev-shim missing"; exit 1 } @@ -254,7 +254,7 @@ expect { "__FW_DIR_OK__" { } "__FW_DIR_FAIL__" { puts "FAIL: /lib/firmware missing"; exit 1 } } -send "command -v redox-drm && echo __DRM_OK__ || echo __DRM_FAIL__\r" + send "test -e /usr/bin/redox-drm && echo __DRM_OK__ || echo __DRM_FAIL__\r" expect { "__DRM_OK__" { } "__DRM_FAIL__" { puts "FAIL: redox-drm missing"; exit 1 } @@ -279,7 +279,7 @@ usage() { cat <<'USAGE' Usage: ./local/scripts/test-phase1-desktop-substrate.sh --guest - ./local/scripts/test-phase1-desktop-substrate.sh --qemu [redbear-wayland] + ./local/scripts/test-phase1-desktop-substrate.sh --qemu [redbear-full] USAGE } @@ -288,7 +288,7 @@ case "${1:-}" in run_guest_checks ;; --qemu) - run_qemu_checks "${2:-redbear-wayland}" + run_qemu_checks "${2:-redbear-full}" ;; *) usage diff --git a/local/scripts/test-phase4-wayland-qemu.sh b/local/scripts/test-phase4-wayland-qemu.sh index 53373441..335ee0d7 100644 --- a/local/scripts/test-phase4-wayland-qemu.sh +++ b/local/scripts/test-phase4-wayland-qemu.sh @@ -30,7 +30,7 @@ usage() { cat <<'USAGE' Usage: test-phase4-wayland-qemu.sh [--check] [extra qemu args...] -Boot the repo's Wayland profile in QEMU with a VirtIO NIC using UEFI firmware. +Boot the repo's full desktop target in QEMU with a VirtIO NIC using UEFI firmware. Examples: ./local/scripts/test-phase4-wayland-qemu.sh @@ -41,7 +41,7 @@ Expected validation path: display session -> validation launcher -> compositor -> wayland-session Important: - the current harness uses '-vga std' and today still surfaces llvmpipe in-guest. + the current harness uses QEMU virtio-gpu for the bounded software-path desktop slice. Treat this as a Phase 4 software-path/runtime smoke check and regression harness. Hardware-accelerated desktop proof is a separate bare-metal/runtime-driver milestone. USAGE @@ -78,12 +78,12 @@ else fi arch="${ARCH:-$(uname -m)}" -image="build/$arch/redbear-wayland/harddrive.img" -extra="build/$arch/redbear-wayland/extra.img" +image="build/$arch/redbear-full/harddrive.img" +extra="build/$arch/redbear-full/extra.img" if [[ ! -f "$image" ]]; then echo "ERROR: missing image $image" >&2 - echo "Build it first with: ./local/scripts/build-redbear.sh redbear-wayland" >&2 + echo "Build it first with: ./local/scripts/build-redbear.sh redbear-full" >&2 exit 1 fi @@ -92,7 +92,7 @@ if [[ ! -f "$extra" ]]; then fi echo "=== Red Bear OS Phase 4 Wayland QEMU Launch ===" -echo "Config: redbear-wayland" +echo "Config: redbear-full" echo "Image: $image" echo "UEFI: $firmware" echo @@ -102,14 +102,14 @@ echo " netctl status" echo " redbear-phase4-wayland-check" echo " the validation compositor should own the bounded runtime path" echo " qt6-wayland-smoke should leave a success marker via wayland-session" -echo " production desktop direction is redbear-kde -> kwin_wayland" +echo " active desktop direction is redbear-full -> kwin_wayland" echo if [[ "$check_mode" -eq 1 ]]; then expect <&2 - echo "Build it first with: ./local/scripts/build-redbear.sh redbear-kde" >&2 + echo "Build it first with: ./local/scripts/build-redbear.sh redbear-full" >&2 exit 1 fi @@ -104,7 +104,7 @@ if [[ "$check_mode" -eq 1 ]]; then expect </dev/null || true echo "PS/2 serio runtime validation completed via guest runtime check" exit 0 fi @@ -112,9 +113,9 @@ exec qemu-system-x86_64 \ -netdev user,id=net0 \ -object filter-dump,id=f1,netdev=net0,file="build/$arch/$config/network.pcap" \ -nographic -vga none \ - -drive file="$image",format=raw,if=none,id=drv0 \ + -drive file="$image",format=raw,if=none,id=drv0,snapshot=on \ -device nvme,drive=drv0,serial=NVME_SERIAL \ - -drive file="$extra",format=raw,if=none,id=drv1 \ + -drive file="$extra",format=raw,if=none,id=drv1,snapshot=on \ -device nvme,drive=drv1,serial=NVME_EXTRA \ -enable-kvm -cpu host \ $extra_qemu_args diff --git a/local/scripts/test-usb-storage-qemu.sh b/local/scripts/test-usb-storage-qemu.sh index 6077e79e..1342dd8b 100644 --- a/local/scripts/test-usb-storage-qemu.sh +++ b/local/scripts/test-usb-storage-qemu.sh @@ -91,41 +91,23 @@ pkill -f "qemu-system-x86_64.*$image" 2>/dev/null || true sleep 1 rm -f "$log_file" -set +e -timeout 120s qemu-system-x86_64 \ - -name "Red Bear OS x86_64" \ - -device qemu-xhci,id=xhci \ - -smp 4 \ - -m 2048 \ - -bios "$firmware" \ - -chardev stdio,id=debug,signal=off,mux=on \ - -serial chardev:debug \ - -mon chardev=debug \ - -machine q35 \ - -device ich9-intel-hda -device hda-output \ - -device virtio-net,netdev=net0 \ - -netdev user,id=net0 \ - -object filter-dump,id=f1,netdev=net0,file="build/$arch/$config/network.pcap" \ - -nographic -vga none \ - -drive file="$image",format=raw,if=none,id=drv0 \ - -device nvme,drive=drv0,serial=NVME_SERIAL \ - -drive file="$extra",format=raw,if=none,id=drv1 \ - -device nvme,drive=drv1,serial=NVME_EXTRA \ - -drive file="$usb_img",format=raw,if=none,id=usbdisk \ - -device usb-storage,bus=xhci.0,drive=usbdisk \ - -enable-kvm -cpu host \ - > "$log_file" 2>&1 -set -e +expect <&2 - exit 1 -fi - -if ! grep -Fq "DISK CONTENT: $expected_sector_b64" "$log_file"; then - echo "ERROR: USB storage sector 0 readback did not match the seeded pattern; see $log_file" >&2 - exit 1 -fi +pkill -f "qemu-system-x86_64.*$image" 2>/dev/null || true if grep -q "panic\|usbscsid: .*IO ERROR\|usbscsid: startup failed\|usbscsid: event queue error\|usbscsid: scheme tick failed\|bulk .* endpoint stalled" "$log_file"; then echo "ERROR: USB storage path hit a crash/error; see $log_file" >&2 diff --git a/mk/config.mk b/mk/config.mk index d7ff168f..cd881f08 100644 --- a/mk/config.mk +++ b/mk/config.mk @@ -17,9 +17,9 @@ PREFIX_USE_UPSTREAM_RUST_COMPILER?=0 ## Enable to use binary packages (much faster) REPO_BINARY?=0 ## Name of the configuration to include in the image name e.g. desktop or server. -## Red Bear's tracked default desktop target is redbear-kde. Runtime claims remain evidence-qualified +## Red Bear's tracked default desktop target is redbear-full. Runtime claims remain evidence-qualified ## until compositor/session proof is strong enough for broader support language. -CONFIG_NAME?=redbear-kde +CONFIG_NAME?=redbear-full ## Build appstream data for repo REPO_APPSTREAM?=0 ## Ignore errors when building the repo, attempt to build every package diff --git a/recipes/core/relibc/recipe.toml b/recipes/core/relibc/recipe.toml index 5b587252..8c945ffa 100644 --- a/recipes/core/relibc/recipe.toml +++ b/recipes/core/relibc/recipe.toml @@ -7,6 +7,7 @@ patches = [ "../../../local/patches/relibc/P3-signalfd.patch", "../../../local/patches/relibc/P3-signalfd-header.patch", "../../../local/patches/relibc/P3-timerfd.patch", + "../../../local/patches/relibc/P3-elf64-types.patch", ] [build] diff --git a/recipes/system/redbear-authd b/recipes/system/redbear-authd new file mode 120000 index 00000000..9d86a0de --- /dev/null +++ b/recipes/system/redbear-authd @@ -0,0 +1 @@ +../../local/recipes/system/redbear-authd \ No newline at end of file diff --git a/recipes/system/redbear-greeter b/recipes/system/redbear-greeter new file mode 120000 index 00000000..7ae00b45 --- /dev/null +++ b/recipes/system/redbear-greeter @@ -0,0 +1 @@ +../../local/recipes/system/redbear-greeter \ No newline at end of file diff --git a/recipes/system/redbear-session-launch b/recipes/system/redbear-session-launch new file mode 120000 index 00000000..31af07d5 --- /dev/null +++ b/recipes/system/redbear-session-launch @@ -0,0 +1 @@ +../../local/recipes/system/redbear-session-launch \ No newline at end of file diff --git a/recipes/wip/AGENTS.md b/recipes/wip/AGENTS.md index 4e835a59..766ac924 100644 --- a/recipes/wip/AGENTS.md +++ b/recipes/wip/AGENTS.md @@ -48,8 +48,9 @@ recipes/wip/ │ ├── wlroots/ # wlroots (not compiled/tested) │ ├── sway/ # sway (not compiled/tested) │ ├── hyprland/ # hyprland (not compiled/tested) -│ ├── xwayland/ # XWayland (partially patched) -│ └── seatd/ # Seat daemon (recipe exists, untested) +│ └── xwayland/ # XWayland (partially patched) +├── services/ +│ └── seatd/ # Seat daemon recipe (service category, runtime trust still open) ├── kde/ # 9 KDE app recipes │ ├── kde-dolphin/ # File manager (needs kio) │ ├── kdenlive/ # Video editor (needs MLT) @@ -67,21 +68,21 @@ recipes/wip/ | Task | Location | |------|----------| -| Fix Wayland build | `wayland/libwayland/redox.patch` — still carries POSIX compatibility workarounds | +| Fix Wayland build | `wayland/libwayland/redox.patch` plus recipe-time source rewrites — residual Redox compatibility and scanner/build handling remain | | Add Wayland compositor | `wayland//recipe.toml` — use `dependencies = ["libwayland"]` | -| Fix cosmic-comp | `wayland/cosmic-comp/` — missing libinput causes no keyboard | -| Work on smallvil | `wayland/smallvil/` — Smithay-based, already running | +| Inspect cosmic-comp status | `wayland/cosmic-comp/` — historical partial bring-up; not the active forward path | +| Inspect smallvil history | `wayland/smallvil/` — historical bounded validation compositor reference only | | Port a KDE app | Copy existing recipe pattern, add `#TODO` header | | Add Qt port | Prefer the newer `local/recipes/qt/` / `local/recipes/kde/` work over this older note | ## WAYLAND STATUS -- **libwayland**: Builds with `redox.patch`; several POSIX-dependent code paths are still commented out there -- **cosmic-comp**: Partially working, no keyboard input (missing libinput) -- **smallvil**: Basic compositor running, poor performance +- **libwayland**: Builds with a smaller Redox patch plus recipe-time source rewriting; runtime trust is still incomplete +- **cosmic-comp**: Historical partial bring-up note only; current runtime/session status is not trusted enough for support claims +- **smallvil**: Historical bounded validation compositor reference only; no longer part of the active forward desktop workflow - **wlroots/sway/hyprland**: Not compiled or tested - **xwayland**: Partially patched -- **Blockers**: downstream Wayland patch reduction, libinput/runtime input validation, DRM/KMS hardware/runtime validation +- **Blockers**: runtime substrate trust, complete compositor session proof, libinput/seatd runtime validation, and DRM/KMS hardware/runtime validation ## KDE STATUS diff --git a/recipes/wip/qt/qtbase/recipe.toml b/recipes/wip/qt/qtbase/recipe.toml index 182360ea..0af3b77b 100644 --- a/recipes/wip/qt/qtbase/recipe.toml +++ b/recipes/wip/qt/qtbase/recipe.toml @@ -19,8 +19,39 @@ dependencies = [ script = """ DYNAMIC_INIT -RELIBC_STAGE_INCLUDE="${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage/usr/include" -RELIBC_STAGE_LIB="${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage/usr/lib" +RELIBC_STAGE_INCLUDE_STAGE="${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage/usr/include" +RELIBC_STAGE_INCLUDE_TMP="${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage.tmp/usr/include" +RELIBC_STAGE_LIB_STAGE="${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage/usr/lib" +RELIBC_STAGE_LIB_TMP="${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage.tmp/usr/lib" +RELIBC_BUILD_LIB="${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/build/target/${TARGET}/release" + +RELIBC_STAGE_INCLUDE="$RELIBC_STAGE_INCLUDE_STAGE" +if [ ! -d "$RELIBC_STAGE_INCLUDE" ] && [ -d "$RELIBC_STAGE_INCLUDE_TMP" ]; then + RELIBC_STAGE_INCLUDE="$RELIBC_STAGE_INCLUDE_TMP" +fi + +choose_relibc_lib_stage() { + local candidate="$1" + if [ -f "$candidate/libc.so" ] && readelf -Ws "$candidate/libc.so" | grep -q '_Z7strtoldPKcPPc'; then + printf '%s\n' "$candidate" + return 0 + fi + return 1 +} + +if RELIBC_STAGE_LIB="$(choose_relibc_lib_stage "$RELIBC_STAGE_LIB_STAGE")"; then + : +elif RELIBC_STAGE_LIB="$(choose_relibc_lib_stage "$RELIBC_STAGE_LIB_TMP")"; then + : +elif RELIBC_STAGE_LIB="$(choose_relibc_lib_stage "$RELIBC_BUILD_LIB")"; then + : +elif [ -d "$RELIBC_STAGE_LIB_STAGE" ]; then + RELIBC_STAGE_LIB="$RELIBC_STAGE_LIB_STAGE" +elif [ -d "$RELIBC_BUILD_LIB" ]; then + RELIBC_STAGE_LIB="$RELIBC_BUILD_LIB" +else + RELIBC_STAGE_LIB="$RELIBC_STAGE_LIB_TMP" +fi if [ -d "${RELIBC_STAGE_INCLUDE}" ]; then mkdir -p "${COOKBOOK_SYSROOT}/include" cp -a "${RELIBC_STAGE_INCLUDE}/." "${COOKBOOK_SYSROOT}/include/" @@ -34,6 +65,24 @@ if [ -d "${RELIBC_STAGE_LIB}" ]; then export LDFLAGS="-L${RELIBC_STAGE_LIB} -Wl,-rpath-link,${RELIBC_STAGE_LIB} ${LDFLAGS}" fi +cat > strtold_cpp_compat.c <<'EOF' +long double strtold(const char *nptr, char **endptr); + +long double relibc_compat_cpp_strtold(const char *nptr, char **endptr) + __asm__("_Z7strtoldPKcPPc"); + +long double relibc_compat_cpp_strtold(const char *nptr, char **endptr) { + return strtold(nptr, endptr); +} +EOF +"${GNU_TARGET}-gcc" \ + --sysroot="${COOKBOOK_SYSROOT}" \ + -shared -fPIC strtold_cpp_compat.c \ + -o "${COOKBOOK_SYSROOT}/lib/libredbear-qt-strtold-compat.so" +mkdir -p "${COOKBOOK_STAGE}/usr/lib" +cp -f "${COOKBOOK_SYSROOT}/lib/libredbear-qt-strtold-compat.so" "${COOKBOOK_STAGE}/usr/lib/" +export LDFLAGS="${LDFLAGS} -Wl,--no-as-needed -L${COOKBOOK_SYSROOT}/lib -lredbear-qt-strtold-compat" + export CFLAGS="${CFLAGS} -fcf-protection=none" export CXXFLAGS="${CXXFLAGS} -fcf-protection=none" diff --git a/recipes/wip/qt/qtbase/redox.patch b/recipes/wip/qt/qtbase/redox.patch index a3bc334a..f31e6cd2 100644 --- a/recipes/wip/qt/qtbase/redox.patch +++ b/recipes/wip/qt/qtbase/redox.patch @@ -320,17 +320,76 @@ diff -ruwN source-old/src/corelib/plugin/qelfparser_p.cpp source/src/corelib/plu return header.e_version == EV_CURRENT; } +@@ -742,7 +752,9 @@ + + qEDebug << ElfHeaderDebug{ reinterpret_cast(data.data()) }; + +- auto header = reinterpret_cast(data.data()); ++ typename T::Ehdr headerStorage; ++ memcpy(&headerStorage, data.data(), sizeof(headerStorage)); ++ const auto header = &headerStorage; + if (!ElfHeaderCheck<>::checkHeader(*header)) + return error(ElfHeaderCheck<>::explainCheckFailure(*header)); + diff -ruwN source-old/src/corelib/plugin/qlibrary.cpp source/src/corelib/plugin/qlibrary.cpp --- source-old/src/corelib/plugin/qlibrary.cpp 2024-12-02 05:39:06.000000000 +0000 -+++ source/src/corelib/plugin/qlibrary.cpp 2026-04-16 00:00:00.000000000 +0000 -@@ -232,24 +232,37 @@ ++++ source/src/corelib/plugin/qlibrary.cpp 2026-04-19 00:00:00.000000000 +0000 +@@ -17,6 +17,10 @@ + #include + #include + ++#ifdef Q_OS_REDOX ++# include ++#endif ++ + #ifdef Q_OS_DARWIN + # include + #endif +@@ -176,7 +180,14 @@ + to it being used in the plugin in the first place. + */ + #if defined(Q_OF_ELF) ++# if defined(Q_OS_REDOX) ++ // Redox currently reaches the plugin file successfully but the ELF-specific ++ // metadata parser misreads shared plugins inside the guest runtime. Fall ++ // back to the generic QTMETADATA search path until the Redox ELF loader / ++ // parser path is trustworthy again. ++# else + return QElfParser::parse({s, s_len}, errMsg); ++# endif + #elif defined(Q_OF_MACH_O) + return QMachOParser::parse(s, s_len, errMsg); + #elif defined(Q_OS_WIN) || defined(Q_OS_CYGWIN) + @@ -230,7 +234,41 @@ + Q_INT64_C(1) << (sizeof(qsizetype) > 4 ? 40 : 29); + qsizetype fdlen = qMin(file.size(), MaxMemoryMapSize); - const char *filedata = reinterpret_cast(file.map(0, fdlen)); + const char *filedata = nullptr; + QByteArray data; + +#ifdef Q_OS_REDOX -+ data = file.read(qMin(fdlen, 64 * 1024 * 1024)); ++ const qsizetype readLimit = qMin(fdlen, 64 * 1024 * 1024); ++ data.resize(readLimit); ++ qsizetype total = 0; ++ const int fd = file.handle(); ++ if (fd < 0) { ++ qCWarning(qt_lcDebugPlugins, "%ls: failed to acquire file descriptor: %ls", ++ qUtf16Printable(library), qUtf16Printable(file.errorString())); ++ return {}; ++ } ++ while (total < readLimit) { ++ const qint64 chunk = ::pread(fd, data.data() + total, size_t(readLimit - total), off_t(total)); ++ if (chunk < 0) { ++ qCWarning(qt_lcDebugPlugins, "%ls: failed to pread for metadata scan: %ls", ++ qUtf16Printable(library), qUtf16Printable(qt_error_string(errno))); ++ return {}; ++ } ++ if (chunk == 0) ++ break; ++ total += chunk; ++ } ++ data.truncate(total); + filedata = data.constData(); + fdlen = data.size(); + if (filedata == nullptr || fdlen == 0) { @@ -344,10 +403,7 @@ diff -ruwN source-old/src/corelib/plugin/qlibrary.cpp source/src/corelib/plugin/ #ifdef Q_OS_UNIX if (filedata == nullptr) { - // If we can't mmap(), then the dynamic loader won't be able to either. - // This can't be used as a plugin. - qCWarning(qt_lcDebugPlugins, "%ls: failed to map to memory: %ls", - qUtf16Printable(library), qUtf16Printable(file.errorString())); +@@ -241,7 +279,6 @@ return {}; } #else @@ -355,14 +411,6 @@ diff -ruwN source-old/src/corelib/plugin/qlibrary.cpp source/src/corelib/plugin/ if (filedata == nullptr) { // It's unknown at this point whether Windows supports LoadLibrary() on // files that fail to CreateFileMapping / MapViewOfFile, so we err on - // the side of doing a regular read into memory (up to 64 MB). - data = file.read(64 * 1024 * 1024); - filedata = data.constData(); - fdlen = data.size(); - } - #endif - - QString errMsg = library; diff -ruwN source-old/src/corelib/global/qsimd.cpp source/src/corelib/global/qsimd.cpp --- source-old/src/corelib/global/qsimd.cpp 2024-12-02 05:39:06.000000000 +0000 diff --git a/recipes/wip/qt/qtdeclarative/recipe.toml b/recipes/wip/qt/qtdeclarative/recipe.toml index df1cc357..01890314 100644 --- a/recipes/wip/qt/qtdeclarative/recipe.toml +++ b/recipes/wip/qt/qtdeclarative/recipe.toml @@ -226,4 +226,12 @@ if [ -d "${COOKBOOK_STAGE}/usr/qml" ]; then mkdir -p "${SYSROOT}/qml" cp -a "${COOKBOOK_STAGE}/usr/qml/"* "${SYSROOT}/qml/" 2>/dev/null || true fi + +for stdlib in \ + "${COOKBOOK_STAGE}/usr/include/stdlib.h" \ + "${SYSROOT}/include/stdlib.h" \ + "${SYSROOT}/usr/include/stdlib.h"; do + [ -f "$stdlib" ] || continue + sed -i '/strtold[[:space:]]*(/d' "$stdlib" 2>/dev/null || true +done """ diff --git a/recipes/wip/qt/qtshadertools/recipe.toml b/recipes/wip/qt/qtshadertools/recipe.toml index f2758880..82d3277b 100644 --- a/recipes/wip/qt/qtshadertools/recipe.toml +++ b/recipes/wip/qt/qtshadertools/recipe.toml @@ -28,9 +28,9 @@ if [ -d "${QTBASE_BUILD_INCLUDE}/6.11.0/QtGui/rhi" ]; then mkdir -p "${COOKBOOK_SYSROOT}/include" cp -a "${QTBASE_BUILD_INCLUDE}/6.11.0/QtGui/rhi" "${COOKBOOK_SYSROOT}/include/" fi -export CPPFLAGS="${CPPFLAGS} -I${COOKBOOK_SYSROOT}/include -I${COOKBOOK_SYSROOT}/include/QtGui/6.11.0/QtGui" -export CFLAGS="${CFLAGS} -I${COOKBOOK_SYSROOT}/include -I${COOKBOOK_SYSROOT}/include/QtGui/6.11.0/QtGui" -export CXXFLAGS="${CXXFLAGS} -I${COOKBOOK_SYSROOT}/include -I${COOKBOOK_SYSROOT}/include/QtGui/6.11.0/QtGui" +export CPPFLAGS="${CPPFLAGS} -I${COOKBOOK_SYSROOT}/include/QtGui/6.11.0/QtGui" +export CFLAGS="${CFLAGS} -I${COOKBOOK_SYSROOT}/include/QtGui/6.11.0/QtGui" +export CXXFLAGS="${CXXFLAGS} -I${COOKBOOK_SYSROOT}/include/QtGui/6.11.0/QtGui" python - <<'PY' import os @@ -185,10 +185,10 @@ cmake "${COOKBOOK_SOURCE}" \ -DCMAKE_INSTALL_PREFIX=/usr \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_PREFIX_PATH="${COOKBOOK_SYSROOT}" \ - -DCMAKE_C_STANDARD_INCLUDE_DIRECTORIES="${COOKBOOK_SYSROOT}/include;${COOKBOOK_SYSROOT}/include/QtGui/6.11.0/QtGui" \ - -DCMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES="${COOKBOOK_SYSROOT}/include;${COOKBOOK_SYSROOT}/include/QtGui/6.11.0/QtGui" \ + -DCMAKE_C_STANDARD_INCLUDE_DIRECTORIES="${COOKBOOK_SYSROOT}/include/QtGui/6.11.0/QtGui" \ + -DCMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES="${COOKBOOK_SYSROOT}/include/QtGui/6.11.0/QtGui" \ -DCMAKE_C_FLAGS="${CFLAGS}" \ - -DCMAKE_CXX_FLAGS="${CXXFLAGS} -I${COOKBOOK_SYSROOT}/include" \ + -DCMAKE_CXX_FLAGS="${CXXFLAGS}" \ -DQT_BUILD_EXAMPLES=OFF \ -DQT_BUILD_TESTS=OFF \ -DQT_GENERATE_SBOM=OFF \ diff --git a/recipes/wip/wayland/libwayland/recipe.toml b/recipes/wip/wayland/libwayland/recipe.toml index c77142dc..5f6498a3 100644 --- a/recipes/wip/wayland/libwayland/recipe.toml +++ b/recipes/wip/wayland/libwayland/recipe.toml @@ -15,10 +15,24 @@ dependencies = [ script = """ DYNAMIC_INIT -if [ -d "${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage/usr/include" ]; then +RELIBC_INCLUDE_STAGE="${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage/usr/include" +if [ ! -f "${RELIBC_INCLUDE_STAGE}/sys/signalfd.h" ]; then + RELIBC_INCLUDE_STAGE="${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage.tmp/usr/include" +fi + +if [ -d "${RELIBC_INCLUDE_STAGE}" ]; then mkdir -p "${COOKBOOK_SYSROOT}/include" "${COOKBOOK_SYSROOT}/usr/include" - cp -a "${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage/usr/include/." "${COOKBOOK_SYSROOT}/include/" - cp -a "${COOKBOOK_ROOT}/recipes/core/relibc/target/${TARGET}/stage/usr/include/." "${COOKBOOK_SYSROOT}/usr/include/" + cp -a "${RELIBC_INCLUDE_STAGE}/." "${COOKBOOK_SYSROOT}/include/" + cp -a "${RELIBC_INCLUDE_STAGE}/." "${COOKBOOK_SYSROOT}/usr/include/" + mkdir -p "${COOKBOOK_SYSROOT}/include/sys" "${COOKBOOK_SYSROOT}/usr/include/sys" + for header in signalfd.h timerfd.h eventfd.h; do + if [ -f "${RELIBC_INCLUDE_STAGE}/sys/${header}" ]; then + cp -f "${RELIBC_INCLUDE_STAGE}/sys/${header}" \ + "${COOKBOOK_SYSROOT}/include/sys/${header}" + cp -f "${RELIBC_INCLUDE_STAGE}/sys/${header}" \ + "${COOKBOOK_SYSROOT}/usr/include/sys/${header}" + fi + done fi python - <<'PY' @@ -45,44 +59,159 @@ event_text = event_loop.read_text() event_text = event_text.replace( '''#include #ifdef __redox__ -#include -#ifndef SFD_CLOEXEC -#define SFD_CLOEXEC O_CLOEXEC -#endif -#ifndef SFD_NONBLOCK -#define SFD_NONBLOCK O_NONBLOCK -#endif -struct signalfd_siginfo { - uint8_t pad[128]; -}; -int signalfd(int fd, const sigset_t *mask, int flags); -#ifndef TFD_CLOEXEC -#define TFD_CLOEXEC O_CLOEXEC -#endif -#ifndef TFD_NONBLOCK -#define TFD_NONBLOCK O_NONBLOCK -#endif -#ifndef TFD_TIMER_ABSTIME -#define TFD_TIMER_ABSTIME TIMER_ABSTIME -#endif -int timerfd_create(int clockid, int flags); -int timerfd_settime(int fd, int flags, const struct itimerspec *new_value, struct itimerspec *old_value); +#include +#include #else #include #include #endif''', '''#include -#include -#include ''', -) -event_text = event_text.replace( - '''#include -#include -#include ''', - '''#include #ifdef __redox__ -#include -#include +#include +#include + +#ifndef SFD_CLOEXEC +#define SFD_CLOEXEC O_CLOEXEC +#endif + +#ifndef SFD_NONBLOCK +#define SFD_NONBLOCK O_NONBLOCK +#endif + +struct signalfd_siginfo { + uint8_t pad[128]; +}; + +#ifndef TFD_CLOEXEC +#define TFD_CLOEXEC O_CLOEXEC +#endif + +#ifndef TFD_NONBLOCK +#define TFD_NONBLOCK O_NONBLOCK +#endif + +#ifndef TFD_TIMER_ABSTIME +#define TFD_TIMER_ABSTIME 0x1 +#endif + +int signalfd4(int fd, const sigset_t *mask, uintptr_t masksize, int flags); +int signalfd(int fd, const sigset_t *mask, uintptr_t masksize); +int timerfd_create(int clockid, int flags); +int timerfd_gettime(int fd, struct itimerspec *curr); +int timerfd_settime(int fd, int flags, const struct itimerspec *new_value, struct itimerspec *old_value); + +int signalfd4(int fd, const sigset_t *mask, uintptr_t masksize, int flags) +{ + const int supported = SFD_CLOEXEC | SFD_NONBLOCK; + int new_fd = fd; + + if ((flags & ~supported) != 0) { + errno = EINVAL; + return -1; + } + if (mask == NULL || masksize != sizeof(*mask)) { + errno = EINVAL; + return -1; + } + + if (fd == -1) { + int oflag = O_RDWR; + if (flags & SFD_CLOEXEC) + oflag |= O_CLOEXEC; + if (flags & SFD_NONBLOCK) + oflag |= O_NONBLOCK; + new_fd = open("/scheme/event", oflag); + if (new_fd < 0) + return -1; + } else { + if ((flags & SFD_CLOEXEC) && fcntl(fd, F_SETFD, FD_CLOEXEC) < 0) + return -1; + if (flags & SFD_NONBLOCK) { + int current = fcntl(fd, F_GETFL, 0); + if (current < 0) + return -1; + if (fcntl(fd, F_SETFL, current | O_NONBLOCK) < 0) + return -1; + } + } + + if (sigprocmask(SIG_BLOCK, mask, NULL) < 0) { + if (fd == -1) + close(new_fd); + return -1; + } + + return new_fd; +} + +int signalfd(int fd, const sigset_t *mask, uintptr_t masksize) +{ + return signalfd4(fd, mask, masksize, 0); +} + +int timerfd_create(int clockid, int flags) +{ + const int supported = TFD_CLOEXEC | TFD_NONBLOCK; + int oflag = O_RDWR; + char path[64]; + + if ((flags & ~supported) != 0) { + errno = EINVAL; + return -1; + } + if (flags & TFD_CLOEXEC) + oflag |= O_CLOEXEC; + if (flags & TFD_NONBLOCK) + oflag |= O_NONBLOCK; + + snprintf(path, sizeof(path), "/scheme/time/%d", clockid); + return open(path, oflag); +} + +int timerfd_gettime(int fd, struct itimerspec *curr) +{ + ssize_t bytes_read; + + if (curr == NULL) { + errno = EFAULT; + return -1; + } + + curr->it_interval = (struct timespec){0}; + bytes_read = read(fd, &curr->it_value, sizeof(struct timespec)); + if (bytes_read != (ssize_t)sizeof(struct timespec)) { + if (bytes_read >= 0) + errno = EIO; + return -1; + } + + return 0; +} + +int timerfd_settime(int fd, int flags, const struct itimerspec *new_value, struct itimerspec *old_value) +{ + ssize_t bytes_written; + + if (flags & ~TFD_TIMER_ABSTIME) { + errno = EINVAL; + return -1; + } + if (new_value == NULL) { + errno = EFAULT; + return -1; + } + if (old_value != NULL && timerfd_gettime(fd, old_value) < 0) + return -1; + + bytes_written = write(fd, &new_value->it_value, sizeof(struct timespec)); + if (bytes_written != (ssize_t)sizeof(struct timespec)) { + if (bytes_written >= 0) + errno = EIO; + return -1; + } + + return 0; +} #else #include #include @@ -97,6 +226,45 @@ server_text = server_text.replace( #include ''', '''#include #ifdef __redox__ + +#ifndef EFD_CLOEXEC +#define EFD_CLOEXEC O_CLOEXEC +#endif + +#ifndef EFD_NONBLOCK +#define EFD_NONBLOCK O_NONBLOCK +#endif + +#ifndef EFD_SEMAPHORE +#define EFD_SEMAPHORE 0x1 +#endif + +int eventfd(unsigned int initval, int flags) +{ + const int supported = EFD_CLOEXEC | EFD_NONBLOCK | EFD_SEMAPHORE; + int oflag = O_RDWR; + char path[64]; + + if ((flags & ~supported) != 0) { + errno = EINVAL; + return -1; + } + if (flags & EFD_CLOEXEC) + oflag |= O_CLOEXEC; + if (flags & EFD_NONBLOCK) + oflag |= O_NONBLOCK; + + snprintf(path, sizeof(path), "/scheme/event/eventfd/%u/%d", + initval, (flags & EFD_SEMAPHORE) ? 1 : 0); + return open(path, oflag); +} +#else +#include +#endif''', +) +server_text = server_text.replace( + '''#include +#ifdef __redox__ #ifndef EFD_CLOEXEC #define EFD_CLOEXEC O_CLOEXEC #endif @@ -106,6 +274,32 @@ server_text = server_text.replace( int eventfd(unsigned int initval, int flags); #else #include +#endif''', + '''#include +#ifdef __redox__ +#include + +int eventfd(unsigned int initval, int flags) +{ + const int supported = EFD_CLOEXEC | EFD_NONBLOCK | EFD_SEMAPHORE; + int oflag = O_RDWR; + char path[64]; + + if ((flags & ~supported) != 0) { + errno = EINVAL; + return -1; + } + if (flags & EFD_CLOEXEC) + oflag |= O_CLOEXEC; + if (flags & EFD_NONBLOCK) + oflag |= O_NONBLOCK; + + snprintf(path, sizeof(path), "/scheme/event/eventfd/%u/%d", + initval, (flags & EFD_SEMAPHORE) ? 1 : 0); + return open(path, oflag); +} +#else +#include #endif''', ) server.write_text(server_text) @@ -127,6 +321,42 @@ FILE *open_memstream(char **bufp, size_t *sizep); #include "wayland-util.h"''', ) +connection_text = connection_text.replace( + '''void +wl_closure_print(struct wl_closure *closure, struct wl_object *target, + int send, int discarded, uint32_t (*n_parse)(union wl_argument *arg), + const char *queue_name) +{ + int i;''', + '''void +wl_closure_print(struct wl_closure *closure, struct wl_object *target, + int send, int discarded, uint32_t (*n_parse)(union wl_argument *arg), + const char *queue_name) +{ +#ifdef __redox__ + (void)closure; + (void)target; + (void)send; + (void)discarded; + (void)n_parse; + (void)queue_name; + return; +#else + int i;''', +) +connection_text = connection_text.replace( + ''' if (fclose(f) == 0) { + fprintf(stderr, "%s", buffer); + free(buffer); + } +}''', + ''' if (fclose(f) == 0) { + fprintf(stderr, "%s", buffer); + free(buffer); + } +#endif +}''', +) connection.write_text(connection_text) PY diff --git a/scripts/fetch-all-sources.sh b/scripts/fetch-all-sources.sh index 3514b355..7e49260d 100755 --- a/scripts/fetch-all-sources.sh +++ b/scripts/fetch-all-sources.sh @@ -32,7 +32,7 @@ REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" cd "$REPO_ROOT" REPO_BIN="./target/release/repo" -CONFIG_NAME="${1:-redbear-kde}" +CONFIG_NAME="${1:-redbear-full}" ACTION="fetch" # ── Colors (disabled when not a terminal) ─────────────────────────── @@ -58,8 +58,8 @@ usage() { echo " --force Force re-download even if checksums match" echo " --help Show this help" echo "" - echo "Configs: redbear-kde, redbear-live, redbear-full, redbear-minimal, redbear-wayland" - echo "Default config: redbear-kde" + echo "Configs: redbear-full, redbear-minimal, redbear-live-full, redbear-live-minimal" + echo "Default config: redbear-full" } ALL_CONFIGS=0