diff --git a/.config b/.config new file mode 100644 index 00000000..d0f52b77 --- /dev/null +++ b/.config @@ -0,0 +1,2 @@ +PODMAN_BUILD?=0 +REDBEAR_RELEASE?=0.1.0 diff --git a/.gitignore b/.gitignore index 5ce8c195..5a4f1c54 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ /build/ /prefix/ -.config **/my_* .idea/ @@ -83,7 +82,8 @@ local/cache/pkgar/ !local/cache/pkgar/** Packages/redbear-firmware.pkgar packages/ -sources/ +sources/x86_64-unknown-redox/ +sources/*.tar.gz local/linux-kernel-cache/ local/recipes/kde/kwin/** !local/recipes/kde/kwin/recipe.toml diff --git a/AGENTS.md b/AGENTS.md index 5bb7a5f2..8db01b4c 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -11,17 +11,13 @@ Red Bear OS build system orchestrator — fetches, builds, and packages ~100+ Gi into a bootable Redox image. Uses a Makefile + Rust "cookbook" tool + TOML configs. Languages: Rust (core), C (ported packages), TOML (config), Make (build orchestration). -RedBearOS should be treated as an overlay distribution on top of Redox in the same way Ubuntu -relates to Debian: +RedBearOS is a **full fork** of Redox OS — based on frozen, archived source snapshots. +Sources are immutable and never auto-immutable archived from upstream. All changes are explicit, +human-initiated operations. Durable Red Bear state belongs in `local/patches/`, +`local/recipes/`, `local/docs/`, and tracked Red Bear configs. -- Redox is upstream -- Red Bear carries integration, packaging, validation, and subsystem overlays on top -- upstream-owned source trees are refreshable working copies -- durable Red Bear state belongs in `local/patches/`, `local/recipes/`, `local/docs/`, and tracked - Red Bear configs - -If we can fetch refreshed upstream sources, reapply our overlays, and rebuild successfully, the -project is in the right shape for long-term maintenance. +The current baseline is **Red Bear OS 0.1.0** (Redox snapshot at build-system commit `f55acba68`). +All recipe sources are pinned and archived in `sources/redbear-0.1.0/`. ## STRUCTURE @@ -172,9 +168,9 @@ only inside a fetched source tree is not preserved. 2. **Wire the patch** into the recipe's `recipe.toml` `patches = [...]` list. 3. **Commit** the patch file and recipe change before the session ends. -**Why:** `make distclean`, `make clean`, upstream source refreshes, and `sync-upstream.sh` all -discard or replace `recipes/*/source/` trees. Only `local/patches/`, `local/recipes/`, tracked -configs, and `local/docs/` survive. +**Why:** `make distclean`, `make clean`, and source immutable archivedes all +discard or replace `recipes/*/source/` trees. Only `local/patches/`, `local/recipes/`, +tracked configs, `local/docs/`, and `sources/redbear-0.1.0/` survive. **Examples of changes that require immediate patching:** @@ -255,24 +251,20 @@ local/patches/ | Script | Purpose | |--------|---------| | `local/scripts/apply-patches.sh` | Apply all build-system patches + create recipe symlinks | -| `local/scripts/sync-upstream.sh` | Fetch upstream + rebase Red Bear OS commits + verify symlinks | +| `local/scripts/provision-release.sh` | Provision new release from Redox ref + archive sources | +| `local/scripts/check-upstream-releases.sh` | Check for new Redox snapshots (read-only) | -### Updating from Upstream +### Release Model (Fork) + +Red Bear OS is a full fork based on frozen Redox snapshots. Sources are immutable +and never auto-immutable archived. The current baseline is 0.1.0. ```bash -# Automated (preferred): -./local/scripts/sync-upstream.sh # Rebase Red Bear OS onto latest upstream -./local/scripts/sync-upstream.sh --dry-run # Preview conflicts first +# Check for newer Redox snapshots (read-only, zero side effects): +./local/scripts/check-upstream-releases.sh -# Manual: -git remote add upstream-redox https://github.com/redox-os/redox.git # once -git fetch upstream-redox master -git rebase upstream-redox/master # replays Red Bear OS commits on new upstream - -# Nuclear option (if rebase fails badly): -git rebase --abort -git reset --hard upstream-redox/master -./local/scripts/apply-patches.sh --force # apply from scratch via patch files +# Provision a new release (explicit, human-initiated only): +./local/scripts/provision-release.sh --ref= --release=0.2.0 --dry-run ``` ## AMD-FIRST INTEGRATION PATH @@ -342,7 +334,7 @@ Phase 1 (runtime substrate) → Phase 2 (software compositor) → Phase 3 (KWin 6. `redbear-sessiond` — `local/recipes/system/redbear-sessiond/source/` — Rust D-Bus session broker exposing `org.freedesktop.login1` subset for KWin (uses `zbus`) 7. `redbear-dbus-services` — `local/recipes/system/redbear-dbus-services/` — D-Bus activation `.service` files and XML policy files for system and session buses -All custom work goes in `local/` — see `local/AGENTS.md` for overlay usage. +All custom work goes in `local/` — see `local/AGENTS.md` for fork model usage. ## NOTES diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 89cf29e7..a953843e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -76,12 +76,12 @@ You can read the best practices and guidelines on the [Best practices and guidel ## Repository Model for Contributors -RedBearOS should be treated as an overlay distribution on top of Redox, in the same way Ubuntu +RedBearOS should be treated as an full fork on top of Redox, in the same way Ubuntu relates to Debian. That means contributors should keep this separation in mind: -- upstream-owned trees such as `recipes/*/source/` are refreshable working copies, +- upstream-owned trees such as `recipes/*/source/` are immutable archived release snapshot, - durable Red Bear-specific work belongs in `local/patches/`, `local/recipes/`, `local/docs/`, and tracked Red Bear configs, - if a change exists only in an upstream-owned source tree, it is not yet preserved properly for @@ -95,7 +95,7 @@ upstream promotes it to first-class status. So for contributors: - upstream WIP may still be a useful input/reference, -- but fixes intended for Red Bear shipping should normally land in the Red Bear overlay, +- but fixes intended for Red Bear shipping should normally land in the Red Bear release fork, - and when upstream later catches up, Red Bear should prefer upstream and retire local patches or local recipe copies that are no longer needed. @@ -129,7 +129,7 @@ Since **Rust** is a relatively small and new language compared to others like C Please follow our [Git style](https://doc.redox-os.org/book/creating-proper-pull-requests.html) for pull requests. -For user-visible work, keep the root [`CHANGELOG.md`](CHANGELOG.md) current and refresh the +For user-visible work, keep the root [`CHANGELOG.md`](CHANGELOG.md) current and immutable archived the README "What's New" section with the latest highlights so GitHub visitors can immediately see what changed. diff --git a/Makefile b/Makefile index d670ff28..f7b1961d 100644 --- a/Makefile +++ b/Makefile @@ -118,6 +118,9 @@ endif # PODMAN_BUILD # unfetch local overlay recipes unless REDBEAR_ALLOW_LOCAL_UNFETCH=1 is set. # This is the safe default for Red Bear OS. local/ is NEVER deleted. distclean: +ifneq ($(REDBEAR_RELEASE),) + $(error distclean is disabled in release mode (REDBEAR_RELEASE=$(REDBEAR_RELEASE)). Sources are immutable. Use: make clean (build artifacts only, safe)) +endif ifeq ($(PODMAN_BUILD),1) ifneq ("$(wildcard $(CONTAINER_TAG))","") $(PODMAN_RUN) make $@ diff --git a/README.md b/README.md index f9d0eeb5..76b99388 100644 --- a/README.md +++ b/README.md @@ -16,14 +16,13 @@ --- -Red Bear OS is a derivative of [Redox OS](https://www.redox-os.org) — a general-purpose, Unix-like, microkernel-based operating system written in Rust. It tracks upstream Redox, incorporating its improvements while adding custom drivers, filesystems, and hardware support. +Red Bear OS is a derivative of [Redox OS](https://www.redox-os.org) — a general-purpose, Unix-like, microkernel-based operating system written in Rust. It is a full fork based on frozen Redox snapshots, adding custom drivers, filesystems, and hardware support. -RedBearOS should be understood as an overlay distribution on top of Redox in the same way Ubuntu -relates to Debian: +RedBearOS is a **full fork** of Redox OS — based on frozen, archived source snapshots at release 0.1.0. - Redox is upstream -- Red Bear carries integration, packaging, validation, and subsystem overlays on top -- upstream-owned source trees are refreshable working copies +- Red Bear carries integration, packaging, validation, and subsystem release fork on top +- upstream-owned source trees are immutable archived release snapshot - durable Red Bear state belongs in `local/patches/`, `local/recipes/`, `local/docs/`, and tracked Red Bear configs @@ -31,26 +30,26 @@ Operational resilience policy: - package/source usage is local-first by default, - local copies are used continuously for builds and recovery workflows, -- upstream package refresh is performed only when explicitly requested. +- upstream package immutable archived is performed only when explicitly requested. For **upstream WIP recipes specifically**, Red Bear uses a stricter rule: 1. once an upstream recipe or subsystem is still marked WIP, Red Bear treats it as a local project -2. we copy, fix, validate, and ship that work from our local overlay until it is stable enough for us +2. we copy, fix, validate, and ship that work from our local release fork until it is stable enough for us 3. we continue updating our local copy from upstream WIP work when useful, but we do not rely on the upstream WIP recipe itself as our shipped source of truth 4. once upstream removes the WIP status and the recipe/subsystem becomes a first-class supported part of Redox, Red Bear reevaluates and should prefer the upstream version over the local copy -That policy exists so the project can pull refreshed upstream sources regularly and still rebuild -predictably from the Red Bear-owned overlay. +That policy exists so the project can pull immutable archived upstream sources regularly and still rebuild +predictably from the Red Bear-owned release fork. ## What's New - KWin Wayland is now treated as the only intended Red Bear desktop direction in the tracked plans, build defaults, live profile wiring, and profile guidance. - KDE bring-up moved forward: the `redbear-full` desktop-capable surface carries the Qt6/KDE stack in-tree, and the KDE recipe tree is now populated. - Native Red Bear runtime tooling expanded with `redbear-info`, `redbear-hwutils` (`lspci`, `lsusb`), and a Redox-native `netctl` flow. -- Build and status docs were refreshed to distinguish current in-tree progress from older historical roadmap text. +- Build and status docs were immutable archived to distinguish current in-tree progress from older historical roadmap text. See [CHANGELOG.md](./CHANGELOG.md) for the running user-visible change log. @@ -157,10 +156,10 @@ Current validation language should be read this way: ├── recipes/ # Package recipes (~100+ packages, 26 categories) ├── mk/ # Makefile build orchestration ├── src/ # Cookbook Rust tool (repo binary, cook logic) -├── local/ # ← Red Bear OS custom work (survives upstream updates) +├── local/ # ← Red Bear OS custom work (survives source provisioning) │ ├── patches/ # Kernel, base, relibc patches │ ├── recipes/ # Custom packages (drivers, GPU, system, branding) -│ ├── scripts/ # sync-upstream.sh, apply-patches.sh +│ ├── scripts/ # provision-release.sh, check-upstream-releases.sh │ ├── Assets/ # Branding (icon, boot background) │ └── docs/ # Integration documentation ├── docs/ # Architecture guides @@ -234,14 +233,24 @@ passive report over live system surfaces and is intended to help answer question Use `redbear-info --verbose` for evidence-backed human output, `redbear-info --json` for machine- readable diagnostics, and `redbear-info --test` for suggested follow-up commands. -## Sync with Upstream Redox +## Release Model (Full Fork) + +Red Bear OS is a **full fork** based on frozen Redox OS snapshots. Sources are immutable and never auto-immutable archived from upstream. The current baseline is **0.1.0** (Redox snapshot at `f55acba68`). Build-dependent sources are archived in `sources/redbear-0.1.0/` (216 BLAKE3-verified archives). + +Builds are offline by default — no network access during compilation. ```bash -./local/scripts/sync-upstream.sh # Rebase onto latest Redox -./local/scripts/sync-upstream.sh --dry-run # Preview conflicts first +# Build from archived sources (offline by default) +./local/scripts/build-redbear.sh redbear-full + +# Check for newer Redox snapshots (read-only, zero side effects) +./local/scripts/check-upstream-releases.sh + +# Provision a new release (explicit, human-initiated only) +./local/scripts/provision-release.sh --ref= --release=0.2.0 --dry-run ``` -The `local/` directory is never touched by upstream updates. Recipe patches for kernel and base are symlinked from `local/patches/` — protected from `make clean` and `make distclean`. +The `local/` directory is never touched by any source immutable archived. Recipe patches are symlinked from `local/patches/` — protected from `make clean` and `make distclean`. ## Resources diff --git a/docs/06-BUILD-SYSTEM-SETUP.md b/docs/06-BUILD-SYSTEM-SETUP.md index ef07c7fe..7a7b0cec 100644 --- a/docs/06-BUILD-SYSTEM-SETUP.md +++ b/docs/06-BUILD-SYSTEM-SETUP.md @@ -8,19 +8,18 @@ ## Repository Model Reminder -Build this repository using the Red Bear overlay model: +Build this repository using the Red Bear release fork model: -- upstream-owned source trees are refreshable working copies, +- sources are frozen, immutable release snapshots at baseline 0.1.0, - durable Red Bear state lives in `local/patches/`, `local/recipes/`, `local/docs/`, and tracked Red Bear configs, -- upstream WIP recipes are useful inputs, but should not automatically be treated as the durable - shipping source of truth for Red Bear. +- build from archived sources offline by default; provision new releases explicitly via provision-release.sh. Resilience policy for package/source inputs: - default build posture is local-first/offline-capable, -- local copies are used continuously unless upstream refresh is explicitly requested, -- upstream refresh is an explicit operation, not an implicit background requirement for normal +- local copies are used continuously unless release provisioning is explicitly requested, +- release provisioning is an explicit operation, not an implicit background requirement for normal builds. ## Prerequisites @@ -210,11 +209,11 @@ sudo dd if=build/x86_64/harddrive.img of=/dev/sdX bs=4M status=progress ./target/release/repo cook recipes/wip/kde/kwin ``` -Under the Red Bear overlay model, remember: +Under the Red Bear release fork model, remember: -- `recipes/*/source/` is a refreshable working tree, +- `recipes/*/source/` is an immutable archived release snapshot, - Red Bear-owned shipping deltas should be preserved under `local/patches/` and `local/recipes/`, -- if a recipe is still upstream WIP, Red Bear may still choose to ship from `local/recipes/` instead. +- sources are built offline by default; provision new releases via provision-release.sh. ### Understanding Recipe Format @@ -264,7 +263,7 @@ cp target/release/myapp ${COOKBOOK_STAGE}/usr/bin/ | `PREFIX_BINARY` | `1` | Use prebuilt toolchain (faster) | | `REPO_BINARY` | `0` | Use prebuilt packages (faster, no compilation) | | `REPO_NONSTOP` | `0` | Continue on build errors | -| `REPO_OFFLINE` | `0` | Don't update source repos; Red Bear policy treats local-first sourcing as the normal operating mode and upstream refresh as explicit opt-in | +| `REPO_OFFLINE` | `0` | Don't update source repos; Red Bear policy treats local-first sourcing as the normal operating mode and release provisioning as explicit opt-in | ### Environment Variables for Recipes diff --git a/docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md b/docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md index cae24b7c..21c79d72 100644 --- a/docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md +++ b/docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md @@ -17,26 +17,26 @@ Detailed subsystem planning remains in focused documents under `local/docs/`. ## Repository Model -RedBearOS should be understood as an overlay distribution on top of Redox in the same way Ubuntu +RedBearOS should be understood as an full fork on top of Redox in the same way Ubuntu relates to Debian. - Redox is upstream. -- Red Bear carries integration, packaging, validation, and subsystem overlays on top. -- Upstream-owned source trees are refreshable working copies. +- Red Bear carries integration, packaging, validation, and subsystem release fork on top. +- Upstream-owned source trees are immutable archived release snapshot. - Durable Red Bear state belongs in `local/patches/`, `local/recipes/`, `local/docs/`, and tracked Red Bear configs. -The project is in the right long-term shape only when refreshed upstream sources can be fetched, -Red Bear overlays can be reapplied, and the project still rebuilds successfully. +The project is in the right long-term shape only when immutable archived upstream sources can be fetched, +Red Bear release fork can be apply, and the project still rebuilds successfully. ## Ownership Rules ### Upstream-owned layer -These are refreshable working inputs, not durable Red Bear storage: +These are immutable archived release sources, not durable Red Bear storage: - `recipes/*/source/` -- most of `recipes/` outside local overlay symlinks +- most of `recipes/` outside local release fork symlinks - mainline configs such as `config/desktop.toml` and `config/minimal.toml` - generated build outputs under `target/`, `build/`, `repo/`, and recipe-local `target/*` @@ -63,7 +63,7 @@ If an upstream recipe or subsystem is still marked WIP, Red Bear treats it as a That means: 1. upstream WIP can be used as an input and reference, -2. but Red Bear should fix and ship from the local overlay while the work is still WIP, +2. but Red Bear should fix and ship from the local release fork while the work is still WIP, 3. and once upstream promotes that work to first-class supported status, Red Bear should reevaluate and prefer upstream where appropriate. @@ -80,7 +80,7 @@ That means: - functionality is delivered as packages, - profiles are composed from packages and package groups, -- integration should prefer packaging, configuration, and overlays over invasive upstream rewrites. +- integration should prefer packaging, configuration, and release fork over invasive upstream rewrites. ### Validation over claims @@ -148,11 +148,11 @@ The current repo is no longer at a greenfield or “missing everything” stage. The current evidence-backed baseline is: -- the Red Bear overlay model is documented and in active use, +- the Red Bear release fork model is documented and in active use, - major local subsystem plans exist under `local/docs/`, - native wired networking is present, - Qt6 and major downstream desktop dependencies build, -- Wayland-facing relibc compatibility surfaces now rebuild from a refreshed upstream relibc source +- Wayland-facing relibc compatibility surfaces now rebuild from a immutable archived upstream relibc source tree via local patch carriers, - `libwayland` and `qtbase` build successfully from the reconstructed relibc state, - the Red Bear-native greeter/login path now has a bounded passing runtime proof, while broader KDE/KWin session stability is still not yet a general runtime claim, @@ -170,7 +170,7 @@ ordering. The current repository-wide work order is: -1. repository discipline and overlay hygiene +1. repository discipline and release fork hygiene 2. reproducible profiles and validation surfaces 3. low-level controller and IRQ quality 4. USB maturity @@ -202,27 +202,27 @@ order. ## Workstreams -### 1. Repository discipline and overlay hygiene +### 1. Repository discipline and release fork hygiene Goal: - keep Red Bear-specific work identifiable, -- keep upstream refresh predictable, -- ensure durable overlays exist for active Red Bear-owned deltas, +- keep release provisioning predictable, +- ensure durable release fork exist for active Red Bear-owned deltas, - keep WIP migration logic explicit. Current state: -- overlay model is documented, -- relibc preservation/reapply proof exists, +- release fork model is documented, +- relibc preservation and patch application proof exists, - WIP ownership policy is documented, - documentation still needs cleaner indexing and some historical pruning. Acceptance: -- refreshed upstream sources can be re-overlaid and rebuilt predictably, +- sources are provisioned via provision-release.sh and rebuilt predictably, - the canonical/current-vs-historical split is visible in docs, -- active Red Bear-owned deltas are preserved outside refreshable source trees. +- active Red Bear-owned deltas are preserved in local/patches and local/recipes. ### 2. Profiles and packaging @@ -435,9 +435,9 @@ Do not compress these into a single “supported” claim. The highest-value documentation follow-ups from the current state are: 1. add a clearer document-status matrix in `docs/README.md`, -2. add a WIP migration ledger for major upstream-WIP-to-local-overlay transitions, +2. add a WIP migration ledger for major upstream-WIP-to-local-release fork transitions, 3. add a concise script behavior matrix for sync/fetch/apply/build helper scripts, -4. continue pruning obsolete local overlays only after refreshed-upstream reapply proofs confirm +4. continue pruning obsolete local release fork only after release provisioning proofs confirm upstream coverage is sufficient. ## Bottom Line @@ -445,11 +445,11 @@ The highest-value documentation follow-ups from the current state are: Red Bear OS is no longer at the stage where the main question is “can we start?”. The current state is a transition from compile-oriented subsystem accumulation toward a stricter, -profile-driven, overlay-disciplined, evidence-backed system project. The implementation plan must now +profile-driven, release fork-disciplined, evidence-backed system project. The implementation plan must now optimize for: -- predictable upstream refresh, -- durable local overlays, +- predictable release provisioning, +- durable local release fork, - honest support language, - and execution order that respects the real blocker chain. diff --git a/docs/README.md b/docs/README.md index 1f325049..4b43ae39 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,6 +1,6 @@ # Red Bear OS Documentation Index -Technical documentation for Red Bear OS as an overlay distribution on top of Redox OS. +Technical documentation for Red Bear OS as an full fork on top of Redox OS. This index is the entry point for the documentation set. Its main job is to make the current/canonical versus historical/reference split obvious. @@ -21,13 +21,13 @@ current/canonical versus historical/reference split obvious. > **Repository model:** RedBearOS relates to Redox in the same way Ubuntu relates to Debian. > Upstream Redox remains the base platform; Red Bear carries packaging, patch, validation, and -> subsystem overlays on top. For long-term stability, upstream-owned source trees should be treated -> as refreshable working copies, while durable Red Bear state belongs in `local/patches/`, +> subsystem release fork on top. For long-term stability, upstream-owned source trees should be treated +> as immutable archived release snapshot, while durable Red Bear state belongs in `local/patches/`, > `local/recipes/`, `local/docs/`, and tracked Red Bear configs. > > **WIP policy:** if an upstream recipe or subsystem is still marked WIP, Red Bear treats it as a -> local project until upstream promotes it to first-class status. We may refresh from upstream WIP, -> but we should fix and ship from the Red Bear overlay until upstream support is real enough to +> local project until upstream promotes it to first-class status. We may immutable archived from upstream WIP, +> but we should fix and ship from the Red Bear release fork until upstream support is real enough to > replace the local copy. ## Document Status Matrix diff --git a/local/AGENTS.md b/local/AGENTS.md index 0b9b5aec..dae94f3d 100644 --- a/local/AGENTS.md +++ b/local/AGENTS.md @@ -5,11 +5,11 @@ updates (`git pull` on the build system repo), this directory is untouched. ## DESIGN PRINCIPLE -Red Bear OS relates to Redox OS in the same way Ubuntu relates to Debian: - - We track Redox OS as upstream, merging changes regularly - - We add custom packages, drivers, configs, and branding on top - - The `local/` directory is our overlay — untouched by upstream updates +Red Bear OS is a **full fork** based on frozen Redox OS snapshots: + - We baseline on a specific Redox OS state and work from immutable, archived sources + - The `local/` directory contains our custom work — untouched by any source immutable archived - First-class configs use `redbear-*` naming (not `my-*`, which is gitignored) + - Sources are NEVER auto-immutable archived from upstream — all changes are explicit, human-initiated ## FREE/LIBRE SOFTWARE POLICY @@ -25,14 +25,21 @@ Build flow: make all CONFIG_NAME=redbear-full → mk/config.mk resolves to the active desktop/graphics compile target → Desktop/graphics are available only on redbear-full - → repo cook builds all packages including our custom ones + → repo cook builds all packages from local sources (offline by default) → mk/disk.mk creates harddrive.img with Red Bear branding + → REDBEAR_RELEASE=0.1.0 ensures immutable, archived sources ``` -Update flow: +Release flow: ``` -./local/scripts/sync-upstream.sh # Rebase onto upstream Redox + verify symlinks -make all CONFIG_NAME=redbear-full # Rebuild the active desktop/graphics target +# Sources are immutable — build from archives, never from network +./local/scripts/build-redbear.sh redbear-full + +# Check for newer Redox snapshots (read-only, no side effects): +./local/scripts/check-upstream-releases.sh + +# Provision a new release (explicit, human-initiated only): +./local/scripts/provision-release.sh --ref= --release=0.2.0 ``` ## ACTIVE COMPILE TARGETS @@ -46,21 +53,44 @@ and `make live` (ISO): Desktop/graphics are available only on `redbear-full`. -## TRACKING UPSTREAM (SYNC WITH REDOX OS) +## RELEASE MODEL (FORK — NOT OVERLAY) -Red Bear OS tracks the Redox OS build system as upstream. The `local/` directory -survives upstream updates untouched. +Red Bear OS sources are frozen at release 0.1.0. Sources are immutable and archived in +`sources/redbear-0.1.0/`. Network access during builds is disabled by default. + +### How releases work: +- **Current baseline:** 0.1.0 (snapshot of Redox at build-system commit `f55acba68`) +- **All recipe sources are pinned** with `rev = "..."` in `recipe.toml` +- **Archives are stored** in `sources/redbear-0.1.0/` with a manifest and BLAKE3 checksums +- **Builds are offline by default** — `REPO_OFFLINE=1 COOKBOOK_OFFLINE=true` +- **New releases are provisioned explicitly** via `provision-release.sh`, never automatically +- **Old releases are NEVER deleted** — each new release is added alongside existing ones + +### Checking for new Redox snapshots: +```bash +./local/scripts/check-upstream-releases.sh # Read-only, zero side effects +``` + +### Provisioning a new release: +```bash +./local/scripts/provision-release.sh --ref= --release=0.2.0 [--dry-run] +``` + +### Restoring sources from archives: +```bash +./local/scripts/restore-sources.sh --release=0.1.0 +``` ## SOURCE-OF-TRUTH RULE (VERY IMPORTANT) Treat the repository as two different layers with different durability guarantees: -### 1. Upstream-owned layer — disposable, refreshable every day +### 1. Source archive layer — immutable per release These paths are expected to be replaced, refetched, or regenerated when upstream changes: - `recipes/*/source/` -- most of `recipes/` outside our symlinked `local/recipes/*` overlays +- most of `recipes/` outside our symlinked `local/recipes/*` release fork - `config/desktop.toml`, `config/minimal.toml`, and other mainline configs - generated build outputs under `target/`, `build/`, `repo/`, and recipe-local `target/*` @@ -68,16 +98,16 @@ For relibc specifically, **`recipes/core/relibc/source/` is upstream-owned worki Red Bear’s durable storage location. We may build and validate there, but we must not rely on that tree alone to preserve Red Bear work. -### 2. Red Bear-owned layer — durable, must survive upstream refresh +### 2. Red Bear-owned layer — durable, must survive release provisioning These paths are our actual long-term source of truth: - `local/patches/` — all durable changes to upstream-owned source trees -- `local/recipes/` — Red Bear recipe overlays and new packages +- `local/recipes/` — Red Bear recipe release fork and new packages - `local/docs/` — Red Bear planning, validation, and integration documentation - tracked Red Bear configs such as `config/redbear-*.toml` -If we can fetch fresh upstream sources tomorrow, reapply `local/patches/*`, relink +If we can fetch fresh upstream sources tomorrow, provision sources from `sources/redbear-/`, verify `local/recipes/*`, and rebuild successfully, then the work is in the right place. If a change exists only inside an upstream-owned `recipes/*/source/` tree, then it is **not yet @@ -94,7 +124,7 @@ That means: - if upstream lands an equivalent or better solution, prefer upstream and shrink or drop our local patch - do not keep a Red Bear patch just because it existed first; keep it only while it still provides unique value -For relibc specifically, patch carriers should be treated as **temporary compatibility overlays**, +For relibc specifically, patch carriers should be treated as **temporary compatibility release fork**, not a permanent fork strategy. When upstream Redox already provides a package, crate, or subsystem for functionality that also @@ -117,12 +147,12 @@ For any change to upstream-owned source: 1. make the minimal working change in the live source tree if needed for validation 2. prove it builds/tests against the real recipe 3. mirror that delta into `local/patches//...` -4. update `local/docs/...` so the rebuild/reapply story is explicit +4. update `local/docs/...` so the provisioning story is explicit 5. assume the live upstream source tree may be thrown away and recreated at any time The success criterion is therefore: -> We can pull renewed upstream sources every day, reapply Red Bear’s local overlays, and still +> We can sources are provisioned via provision-release.sh and archived in sources/redbear-/ > build the project successfully. ### Local recipe priority vs upstream WIP @@ -130,28 +160,14 @@ The success criterion is therefore: When Red Bear maintains a local recipe and upstream contains a package with the same name under `recipes/wip/*`, Red Bear must prefer the local recipe unconditionally. -- Use the local overlay symlink in `recipes/*/ -> ../../local/recipes/...` +- Use the local release fork symlink in `recipes/*/ -> ../../local/recipes/...` - Do not switch back to upstream WIP for active Red Bear builds - Re-evaluate only when upstream package exits WIP and becomes a normal maintained package ```bash # Automated sync (preferred): -./local/scripts/sync-upstream.sh # Fetch + rebase + check patches -./local/scripts/sync-upstream.sh --dry-run # Preview conflicts before rebasing -./local/scripts/sync-upstream.sh --no-merge # Only check for patch conflicts - -# Manual sync: -git remote add upstream-redox https://github.com/redox-os/redox.git # First time only -git fetch upstream-redox master -git rebase upstream-redox/master - -# If rebase fails (nuclear option): -git rebase --abort -git reset --hard upstream-redox/master -./local/scripts/apply-patches.sh --force # Rebuild Red Bear OS changes from patch files - -# After sync: -cargo build --release # Rebuild cookbook +./local/scripts/check-upstream-releases.sh # Check for new Redox snapshots (read-only) +./local/scripts/provision-release.sh --ref= --release=0.2.0 --dry-run # Preview new release make all CONFIG_NAME=redbear-full # Rebuild OS ``` @@ -188,14 +204,14 @@ redox-master/ ← git pull updates mainline Redox │ ├── patches/ │ │ ├── kernel/ ← Kernel patches (ACPI, x2APIC) │ │ ├── base/ ← Base patches (acpid fixes, power methods, pcid /config endpoint) -│ │ ├── relibc/ ← relibc compatibility overlays still needed beyond upstream (eventfd, signalfd, timerfd, waitid, SysV IPC) +│ │ ├── relibc/ ← relibc compatibility release fork still needed beyond upstream (eventfd, signalfd, timerfd, waitid, SysV IPC) │ │ ├── bootloader/ ← Bootloader patches │ │ └── installer/ ← Installer patches (ext4 filesystem support + GRUB bootloader) │ ├── Assets/ ← Branding assets (icon, loading background) │ │ └── images/ ← Red Bear OS icon (1254x1254) + loading bg (1536x1024) │ ├── firmware/ ← GPU firmware blobs (gitignored, fetched) │ ├── scripts/ -│ │ ├── sync-upstream.sh ← Sync with upstream Redox OS +│ │ ├── provision-release.sh ← Provision new release from Redox ref │ │ ├── build-redbear.sh ← Unified Red Bear OS build script │ │ ├── fetch-firmware.sh ← Download bounded AMD or Intel firmware subsets from linux-firmware │ │ ├── test-drm-display-runtime.sh ← Shared bounded DRM/KMS display validation harness @@ -568,7 +584,7 @@ local/Assets/ - **DO NOT** assume mainline recipe names won't conflict — prefix custom ones (e.g., `redox-`) - **DO NOT** use `my-*` naming for configs that should be tracked in git — use `redbear-*` instead - **DO NOT** edit config/base.toml directly — our configs include it and override via TOML merge -- **DO NOT** forget to run sync-upstream.sh before major builds — stale upstream causes build failures +- **DO NOT** attempt to immutable archived sources from upstream — sources are immutable; use provision-release.sh ## COMPREHENSIVE IMPLEMENTATION POLICY diff --git a/local/docs/BLUETOOTH-IMPLEMENTATION-PLAN.md b/local/docs/BLUETOOTH-IMPLEMENTATION-PLAN.md index 42b86381..37d18a52 100644 --- a/local/docs/BLUETOOTH-IMPLEMENTATION-PLAN.md +++ b/local/docs/BLUETOOTH-IMPLEMENTATION-PLAN.md @@ -189,7 +189,7 @@ live under `local/`: - validation helpers under `local/scripts/` - support-language and roadmap updates under `local/docs/` -That keeps the first implementation pass aligned with Red Bear's overlay model and rebase strategy. +That keeps the first implementation pass aligned with Red Bear's release fork model and rebase strategy. ### 3. Desktop parity is not the first milestone @@ -310,7 +310,7 @@ Some of the implementation targets below refer to upstream-managed trees such as In Red Bear, changes against those paths should be carried through the relevant patch carrier under `local/patches/` until intentionally upstreamed. This plan names the technical integration point, -not a recommendation to edit upstream-managed trees outside Red Bear's normal overlay model. +not a recommendation to edit upstream-managed trees outside Red Bear's normal release fork model. ### Phase B0 — Scope Freeze and Support Model diff --git a/local/docs/PATCH-GOVERNANCE.md b/local/docs/PATCH-GOVERNANCE.md index 36840697..ca5fcc5e 100644 --- a/local/docs/PATCH-GOVERNANCE.md +++ b/local/docs/PATCH-GOVERNANCE.md @@ -56,7 +56,7 @@ When reordering patches, test the FULL chain: remove source, rebuild, verify. `recipes/core/base/recipe.toml` is git-tracked. Changes to it are durable. `recipes/core/base/source/` is a fetched working copy — destroyed by `make clean`, -`make distclean`, source refresh, and sync-upstream. +`make distclean`, source immutable archived, and provision-release. Any change to source/ MUST be preserved as a patch in `local/patches/base/`. diff --git a/local/docs/PROFILE-MATRIX.md b/local/docs/PROFILE-MATRIX.md index 2d2eaaa8..f1589cba 100644 --- a/local/docs/PROFILE-MATRIX.md +++ b/local/docs/PROFILE-MATRIX.md @@ -67,9 +67,9 @@ All profiles produce outputs under `build/x86_64/`. Each profile gets its own di - Enables the shared `wired-dhcp` netctl profile by default for the VM/wired baseline. - Ships the shared firmware/input runtime service prerequisites so the early substrate can be tested on the smallest profile as well. -### Historical and experimental overlays +### Historical and experimental release fork -- Experimental overlays such as `redbear-bluetooth-experimental` and `redbear-wifi-experimental` +- Experimental release fork such as `redbear-bluetooth-experimental` and `redbear-wifi-experimental` are bounded validation slices layered on top of the tracked compile targets, not additional compile targets. diff --git a/local/docs/SCRIPT-BEHAVIOR-MATRIX.md b/local/docs/SCRIPT-BEHAVIOR-MATRIX.md index 8ca56877..28c66966 100644 --- a/local/docs/SCRIPT-BEHAVIOR-MATRIX.md +++ b/local/docs/SCRIPT-BEHAVIOR-MATRIX.md @@ -3,7 +3,7 @@ ## Purpose This document centralizes what the main repository scripts do and do not handle under the Red Bear -overlay model. +release fork model. The goal is to remove guesswork from the sync/fetch/apply/build workflow. @@ -11,11 +11,11 @@ The goal is to remove guesswork from the sync/fetch/apply/build workflow. | Script | Primary role | What it handles | What it does **not** guarantee | |---|---|---|---| -| `local/scripts/sync-upstream.sh` | Refresh top-level upstream repo state | fetches upstream, reports conflict risk, rebases repo commits, reapplies build-system overlays via `apply-patches.sh` | does not automatically solve every subsystem overlay conflict; does not by itself make upstream WIP recipes safe shipping inputs | -| `local/scripts/apply-patches.sh` | Reapply durable Red Bear overlays | applies build-system patches, relinks recipe patch symlinks, relinks local recipe overlays into `recipes/` | does not fully rebase stale patch carriers; does not validate runtime behavior; does not decide WIP ownership for you | -| `local/scripts/build-redbear.sh` | Build Red Bear profiles from upstream base + local overlay | applies overlays, builds cookbook if needed, validates profile naming, launches the actual image build; only allows upstream recipe refresh when passed `--upstream` | does not guarantee every nested upstream source tree is fresh; does not replace explicit subsystem/runtime validation | -| `scripts/fetch-all-sources.sh` | Fetch mainline recipe source inputs for builds | downloads mainline/upstream recipe sources, reports status/preflight, and supports config-scoped fetches while leaving local overlays in place | does not mean fetched upstream WIP source is the durable shipping source of truth | -| `local/scripts/fetch-sources.sh` | Fetch mainline recipe sources for browsing and patching | when passed `--upstream`, fetches `recipes/*` source trees so the upstream-managed side is locally available for reading, editing, and patch preparation | does not decide whether upstream should replace the local overlay | +| `local/scripts/provision-release.sh` | Refresh top-level upstream repo state | fetches upstream, reports conflict risk, rebases repo commits, reapplies build-system release fork via `apply-patches.sh` | does not automatically solve every subsystem release fork conflict; does not by itself make upstream WIP recipes safe shipping inputs | +| `local/scripts/apply-patches.sh` | Reapply durable Red Bear release fork | applies build-system patches, relinks recipe patch symlinks, relinks local recipe release fork into `recipes/` | does not fully rebase stale patch carriers; does not validate runtime behavior; does not decide WIP ownership for you | +| `local/scripts/build-redbear.sh` | Build Red Bear profiles from upstream base + local release fork | applies release fork, builds cookbook if needed, validates profile naming, launches the actual image build; only allows upstream recipe immutable archived when passed `--upstream` | does not guarantee every nested upstream source tree is fresh; does not replace explicit subsystem/runtime validation | +| `scripts/fetch-all-sources.sh` | Fetch mainline recipe source inputs for builds | downloads mainline/upstream recipe sources, reports status/preflight, and supports config-scoped fetches while leaving local release fork in place | does not mean fetched upstream WIP source is the durable shipping source of truth | +| `local/scripts/fetch-sources.sh` | Fetch mainline recipe sources for browsing and patching | when passed `--upstream`, fetches `recipes/*` source trees so the upstream-managed side is locally available for reading, editing, and patch preparation | does not decide whether upstream should replace the local release fork | | `local/scripts/build-redbear-wifictl-redox.sh` | Build `redbear-wifictl` for the Redox target with the repo toolchain | prepends `prefix/x86_64-unknown-redox/sysroot/bin` to `PATH` and runs `cargo build --target x86_64-unknown-redox` in the `redbear-wifictl` crate | does not prove runtime Wi-Fi behavior; only closes the target-build environment gap for this crate | | `local/scripts/test-iwlwifi-driver-runtime.sh` | Exercise the bounded Intel driver lifecycle inside a target runtime | validates bounded probe/prepare/init/activate/scan/connect/disconnect/retry surfaces for `redbear-iwlwifi` on a live target runtime | does not prove real AP association, packet flow, DHCP success over Wi-Fi, or end-to-end connectivity | | `local/scripts/test-wifi-control-runtime.sh` | Exercise the bounded Wi-Fi control/profile lifecycle inside a target runtime | validates `/scheme/wifictl` control nodes, bounded connect/disconnect behavior, and profile-manager/runtime reporting surfaces on a live target runtime | does not prove real AP association or end-to-end connectivity | @@ -68,8 +68,8 @@ repo already contains `prefix/x86_64-unknown-redox/sysroot/bin/x86_64-unknown-re Default Red Bear behavior is local-first: -- use locally available package/source trees and overlay state for normal builds, -- treat upstream refresh as an explicit operator action only (`--upstream`, dedicated fetch/sync), +- use locally available package/source trees and release fork state for normal builds, +- treat upstream immutable archived as an explicit operator action only (`--upstream`, dedicated fetch/sync), - do not fail policy-level expectations just because upstream network access is temporarily broken. This is required so builds and recovery workflows remain operable during upstream outages or @@ -77,14 +77,14 @@ connectivity failures. ### Upstream sync -Use `local/scripts/sync-upstream.sh` when the goal is to refresh the top-level upstream Redox base. +Use `local/scripts/provision-release.sh` when the goal is to immutable archived the top-level upstream Redox base. -This is a repository sync operation, not a guarantee that every local subsystem overlay is already +This is a repository sync operation, not a guarantee that every local subsystem release fork is already rebased cleanly. ### Overlay reapplication -Use `local/scripts/apply-patches.sh` when the goal is to reconstruct Red Bear’s overlay on top of a +Use `local/scripts/apply-patches.sh` when the goal is to reconstruct Red Bear’s release fork on top of a fresh upstream tree. This is the core durable-state recovery path. @@ -92,13 +92,13 @@ This is the core durable-state recovery path. ### Build execution Use `local/scripts/build-redbear.sh` when the goal is to build a tracked Red Bear profile from the -current upstream base plus local overlay. Add `--upstream` only when you explicitly want Redox/upstream -recipe sources refreshed during that build. +current upstream base plus local release fork. Add `--upstream` only when you explicitly want Redox/upstream +recipe sources immutable archived during that build. -### Source refresh +### Source immutable archived Use `scripts/fetch-all-sources.sh` and `local/scripts/fetch-sources.sh --upstream` when the goal is to -refresh recipe source inputs, but do not confuse fetched upstream WIP source with a trusted shipping +immutable archived recipe source inputs, but do not confuse fetched upstream WIP source with a trusted shipping source. ## WIP Rule in Script Terms @@ -108,7 +108,7 @@ If a subsystem is still upstream WIP, the scripts should be interpreted this way - fetching upstream WIP source is allowed and useful through the explicit upstream fetch commands or `--upstream` where a wrapper requires it, - syncing upstream WIP source is allowed and useful through the explicit upstream sync command, -- but shipping decisions should still prefer the local overlay until upstream promotion and reevaluation happen. +- but shipping decisions should still prefer the local release fork until upstream promotion and reevaluation happen. That means “script fetched it successfully” is not the same as “Red Bear should now ship upstream’s WIP version directly.” diff --git a/local/docs/WAYLAND-IMPLEMENTATION-PLAN.md b/local/docs/WAYLAND-IMPLEMENTATION-PLAN.md index 99f1480f..72d6cc1e 100644 --- a/local/docs/WAYLAND-IMPLEMENTATION-PLAN.md +++ b/local/docs/WAYLAND-IMPLEMENTATION-PLAN.md @@ -54,7 +54,7 @@ In scope: - evdevd / udev-shim / libinput / seatd integration as they affect Wayland, - Mesa/GBM/EGL software-path proof and the Wayland-facing graphics runtime, - KWin as the intended production Wayland compositor path, -- local overlay ownership decisions for Wayland components and validation harnesses. +- local release fork ownership decisions for Wayland components and validation harnesses. Out of scope: @@ -123,7 +123,7 @@ Rules: | Session path | seat/session proof bounded by QEMU validation; full hardware trust supplementary for KWin path | | Hardware graphics | no hardware-accelerated Wayland proof | | KWin truthfulness | reduced-feature real build exists; bounded runtime proof still requires Qt6Quick/QML downstream validation | -| WIP ownership | upstream WIP recipes and local overlays are mixed; forward path is not always explicit | +| WIP ownership | upstream WIP recipes and local release fork are mixed; forward path is not always explicit | ## Stability / Completeness Verdict diff --git a/local/docs/XHCID-DEVICE-IMPROVEMENT-PLAN.md b/local/docs/XHCID-DEVICE-IMPROVEMENT-PLAN.md index c6935c72..61e5d79b 100644 --- a/local/docs/XHCID-DEVICE-IMPROVEMENT-PLAN.md +++ b/local/docs/XHCID-DEVICE-IMPROVEMENT-PLAN.md @@ -303,7 +303,7 @@ Close the loop with evidence, canonical docs, and durable patch carriers. - update canonical docs: - `local/docs/USB-IMPLEMENTATION-PLAN.md` - `local/docs/USB-VALIDATION-RUNBOOK.md` -- refresh durable patch carriers under `local/patches/base/` +- immutable archived durable patch carriers under `local/patches/base/` - delete only clearly stale, superseded docs after link sweep ### Exit Criteria @@ -311,7 +311,7 @@ Close the loop with evidence, canonical docs, and durable patch carriers. - all bounded USB/xHCI proofs pass on a fresh image - changed files are diagnostics-clean - canonical docs match actual proof scope -- patch carrier is refreshed and reapplicable +- patch carrier is immutable archived and reapplicable ## Validation Matrix @@ -356,5 +356,5 @@ This work is complete only when: - `xhcid` builds/tests cleanly - bounded QEMU proof matrix passes on a rebuilt image - canonical docs are synchronized -- durable patch carrier is refreshed +- durable patch carrier is immutable archived - remaining gaps, if any, are explicitly documented as future or hardware-only work diff --git a/local/docs/repo-governance.md b/local/docs/repo-governance.md index 443a5c3d..f9554130 100644 --- a/local/docs/repo-governance.md +++ b/local/docs/repo-governance.md @@ -53,9 +53,9 @@ why it is intentionally excluded. - Red Bear builds must remain resilient when access to upstream Redox infrastructure is degraded or unavailable. - Local package/source copies are the default operational source of truth for builds. -- Upstream fetch/refresh is opt-in and must be explicitly requested by the operator (for example via +- Upstream fetch/immutable archived is opt-in and must be explicitly requested by the operator (for example via an explicit `--upstream` workflow). -- After an explicit upstream refresh, local durable overlays (`local/patches`, `local/recipes`) stay +- After an explicit upstream immutable archived, local durable release fork (`local/patches`, `local/recipes`) stay authoritative until a conscious reevaluation/promotion decision is made. ## Profile Intent @@ -94,6 +94,6 @@ For any substantial Red Bear change, record: ## Upstream Sync Discipline -- Rebase/sync through `local/scripts/sync-upstream.sh`. +- Rebase/sync through `local/scripts/provision-release.sh`. - Keep Red Bear-specific diffs easy to audit. - Update profile docs when config inheritance or package composition changes. diff --git a/local/patches/base/P0-daemon-init-notify-graceful.patch b/local/patches/base/P0-daemon-init-notify-graceful.patch.bak similarity index 100% rename from local/patches/base/P0-daemon-init-notify-graceful.patch rename to local/patches/base/P0-daemon-init-notify-graceful.patch.bak diff --git a/local/patches/base/P1-pcid-uevent-surface.patch b/local/patches/base/P1-pcid-uevent-surface.patch.bak similarity index 100% rename from local/patches/base/P1-pcid-uevent-surface.patch rename to local/patches/base/P1-pcid-uevent-surface.patch.bak diff --git a/local/patches/base/P1-xhcid-uevent-logging.patch b/local/patches/base/P1-xhcid-uevent-logging.patch.bak similarity index 100% rename from local/patches/base/P1-xhcid-uevent-logging.patch rename to local/patches/base/P1-xhcid-uevent-logging.patch.bak diff --git a/local/patches/base/P2-ac97d-ihdad-main.patch b/local/patches/base/P2-ac97d-ihdad-main.patch.bak similarity index 100% rename from local/patches/base/P2-ac97d-ihdad-main.patch rename to local/patches/base/P2-ac97d-ihdad-main.patch.bak diff --git a/local/patches/base/P2-acpid-core-refactor.patch b/local/patches/base/P2-acpid-core-refactor.patch.bak similarity index 100% rename from local/patches/base/P2-acpid-core-refactor.patch rename to local/patches/base/P2-acpid-core-refactor.patch.bak diff --git a/local/patches/base/P2-boot-runtime-fixes.patch b/local/patches/base/P2-boot-runtime-fixes.patch.bak similarity index 100% rename from local/patches/base/P2-boot-runtime-fixes.patch rename to local/patches/base/P2-boot-runtime-fixes.patch.bak diff --git a/local/patches/base/P2-boot-runtime-noise-and-net-race.patch b/local/patches/base/P2-boot-runtime-noise-and-net-race.patch.bak similarity index 100% rename from local/patches/base/P2-boot-runtime-noise-and-net-race.patch rename to local/patches/base/P2-boot-runtime-noise-and-net-race.patch.bak diff --git a/local/patches/base/P2-hwd-misc.patch b/local/patches/base/P2-hwd-misc.patch.bak similarity index 100% rename from local/patches/base/P2-hwd-misc.patch rename to local/patches/base/P2-hwd-misc.patch.bak diff --git a/local/patches/base/P2-init-acpid-wiring.patch b/local/patches/base/P2-init-acpid-wiring.patch.bak similarity index 100% rename from local/patches/base/P2-init-acpid-wiring.patch rename to local/patches/base/P2-init-acpid-wiring.patch.bak diff --git a/local/patches/base/P2-network-driver-mains.patch b/local/patches/base/P2-network-driver-mains.patch.bak similarity index 100% rename from local/patches/base/P2-network-driver-mains.patch rename to local/patches/base/P2-network-driver-mains.patch.bak diff --git a/local/patches/base/P2-network-error-handling.patch b/local/patches/base/P2-network-error-handling.patch.bak similarity index 100% rename from local/patches/base/P2-network-error-handling.patch rename to local/patches/base/P2-network-error-handling.patch.bak diff --git a/local/patches/base/P2-storage-error-handling.patch b/local/patches/base/P2-storage-error-handling.patch.bak similarity index 100% rename from local/patches/base/P2-storage-error-handling.patch rename to local/patches/base/P2-storage-error-handling.patch.bak diff --git a/local/patches/base/P2-usb-pm-and-drivers.patch b/local/patches/base/P2-usb-pm-and-drivers.patch.bak similarity index 100% rename from local/patches/base/P2-usb-pm-and-drivers.patch rename to local/patches/base/P2-usb-pm-and-drivers.patch.bak diff --git a/local/patches/base/P3-pcid-aer-scheme.patch b/local/patches/base/P3-pcid-aer-scheme.patch.bak similarity index 100% rename from local/patches/base/P3-pcid-aer-scheme.patch rename to local/patches/base/P3-pcid-aer-scheme.patch.bak diff --git a/local/patches/base/P0-acpid-dmar-fix.patch b/local/patches/base/absorbed/P0-acpid-dmar-fix.patch similarity index 100% rename from local/patches/base/P0-acpid-dmar-fix.patch rename to local/patches/base/absorbed/P0-acpid-dmar-fix.patch diff --git a/local/patches/base/P0-acpid-fadt-shutdown.patch b/local/patches/base/absorbed/P0-acpid-fadt-shutdown.patch similarity index 100% rename from local/patches/base/P0-acpid-fadt-shutdown.patch rename to local/patches/base/absorbed/P0-acpid-fadt-shutdown.patch diff --git a/local/patches/base/P0-acpid-mcfg-ivrs.patch b/local/patches/base/absorbed/P0-acpid-mcfg-ivrs.patch similarity index 100% rename from local/patches/base/P0-acpid-mcfg-ivrs.patch rename to local/patches/base/absorbed/P0-acpid-mcfg-ivrs.patch diff --git a/local/patches/base/P0-acpid-power-methods.patch b/local/patches/base/absorbed/P0-acpid-power-methods.patch similarity index 100% rename from local/patches/base/P0-acpid-power-methods.patch rename to local/patches/base/absorbed/P0-acpid-power-methods.patch diff --git a/local/patches/base/P0-bootstrap-workspace-fix.patch b/local/patches/base/absorbed/P0-bootstrap-workspace-fix.patch similarity index 100% rename from local/patches/base/P0-bootstrap-workspace-fix.patch rename to local/patches/base/absorbed/P0-bootstrap-workspace-fix.patch diff --git a/local/patches/base/P0-cumulative-daemon-driver-fixes.patch b/local/patches/base/absorbed/P0-cumulative-daemon-driver-fixes.patch similarity index 100% rename from local/patches/base/P0-cumulative-daemon-driver-fixes.patch rename to local/patches/base/absorbed/P0-cumulative-daemon-driver-fixes.patch diff --git a/local/patches/base/P0-daemon-fix-init-notify-unwrap.patch b/local/patches/base/absorbed/P0-daemon-fix-init-notify-unwrap.patch similarity index 100% rename from local/patches/base/P0-daemon-fix-init-notify-unwrap.patch rename to local/patches/base/absorbed/P0-daemon-fix-init-notify-unwrap.patch diff --git a/local/patches/base/absorbed/P0-daemon-init-notify-graceful.patch b/local/patches/base/absorbed/P0-daemon-init-notify-graceful.patch new file mode 100644 index 00000000..ce1292f0 --- /dev/null +++ b/local/patches/base/absorbed/P0-daemon-init-notify-graceful.patch @@ -0,0 +1,55 @@ +From: Red Bear OS +Date: 2026-04-28 +Subject: daemon: handle missing INIT_NOTIFY gracefully instead of panicking + +The Daemon::new() and Daemon::ready() functions in the daemon library +called unwrap() on the INIT_NOTIFY environment variable and the ready +pipe write, causing a hard panic when a daemon is started outside the +init system's notification pipe mechanism. + +Replace unwrap() with graceful error handling: +- get_fd() returns -1 if the env var is missing or invalid, logging + a warning via eprintln +- ready() logs a warning on write failure instead of panicking + +diff --git a/daemon/src/lib.rs b/daemon/src/lib.rs +index 9f507221..a0ba9d88 100644 +--- a/daemon/src/lib.rs ++++ b/daemon/src/lib.rs +@@ -11,12 +11,23 @@ use redox_scheme::Socket; + use redox_scheme::scheme::{SchemeAsync, SchemeSync}; + + unsafe fn get_fd(var: &str) -> RawFd { +- let fd: RawFd = std::env::var(var).unwrap().parse().unwrap(); ++ let fd: RawFd = match std::env::var(var) ++ .map_err(|e| eprintln!("daemon: env var {var} not set: {e}")) ++ .ok() ++ .and_then(|val| { ++ val.parse() ++ .map_err(|e| eprintln!("daemon: failed to parse {var} as fd: {e}")) ++ .ok() ++ }) { ++ Some(fd) => fd, ++ None => return -1, ++ }; + if unsafe { libc::fcntl(fd, libc::F_SETFD, libc::FD_CLOEXEC) } == -1 { +- panic!( ++ eprintln!( + "daemon: failed to set CLOEXEC flag for {var} fd: {}", + io::Error::last_os_error() + ); ++ return -1; + } + fd + } +@@ -50,7 +61,9 @@ impl Daemon { + + /// Notify the process that the daemon is ready to accept requests. + pub fn ready(mut self) { +- self.write_pipe.write_all(&[0]).unwrap(); ++ if let Err(err) = self.write_pipe.write_all(&[0]) { ++ eprintln!("daemon::ready write failed: {err}"); ++ } + } + + /// Executes `Command` as a child process. diff --git a/local/patches/base/P0-dhcpd-auto-iface.patch b/local/patches/base/absorbed/P0-dhcpd-auto-iface.patch similarity index 100% rename from local/patches/base/P0-dhcpd-auto-iface.patch rename to local/patches/base/absorbed/P0-dhcpd-auto-iface.patch diff --git a/local/patches/base/P0-driver-api-migration-fixes.patch b/local/patches/base/absorbed/P0-driver-api-migration-fixes.patch similarity index 100% rename from local/patches/base/P0-driver-api-migration-fixes.patch rename to local/patches/base/absorbed/P0-driver-api-migration-fixes.patch diff --git a/local/patches/base/P0-ihdgd-intel-gpu-ids.patch b/local/patches/base/absorbed/P0-ihdgd-intel-gpu-ids.patch similarity index 100% rename from local/patches/base/P0-ihdgd-intel-gpu-ids.patch rename to local/patches/base/absorbed/P0-ihdgd-intel-gpu-ids.patch diff --git a/local/patches/base/P0-inputd-named-producers.patch b/local/patches/base/absorbed/P0-inputd-named-producers.patch similarity index 100% rename from local/patches/base/P0-inputd-named-producers.patch rename to local/patches/base/absorbed/P0-inputd-named-producers.patch diff --git a/local/patches/base/P0-inputd-per-device-consumers.patch b/local/patches/base/absorbed/P0-inputd-per-device-consumers.patch similarity index 100% rename from local/patches/base/P0-inputd-per-device-consumers.patch rename to local/patches/base/absorbed/P0-inputd-per-device-consumers.patch diff --git a/local/patches/base/P0-pcid-config-endpoint.patch b/local/patches/base/absorbed/P0-pcid-config-endpoint.patch similarity index 100% rename from local/patches/base/P0-pcid-config-endpoint.patch rename to local/patches/base/absorbed/P0-pcid-config-endpoint.patch diff --git a/local/patches/base/P0-workspace-add-bootstrap.patch b/local/patches/base/absorbed/P0-workspace-add-bootstrap.patch similarity index 100% rename from local/patches/base/P0-workspace-add-bootstrap.patch rename to local/patches/base/absorbed/P0-workspace-add-bootstrap.patch diff --git a/local/patches/base/P1-acpid-acpi-core.patch b/local/patches/base/absorbed/P1-acpid-acpi-core.patch similarity index 100% rename from local/patches/base/P1-acpid-acpi-core.patch rename to local/patches/base/absorbed/P1-acpid-acpi-core.patch diff --git a/local/patches/base/P1-acpid-ec-runtime.patch b/local/patches/base/absorbed/P1-acpid-ec-runtime.patch similarity index 100% rename from local/patches/base/P1-acpid-ec-runtime.patch rename to local/patches/base/absorbed/P1-acpid-ec-runtime.patch diff --git a/local/patches/base/P1-acpid-power-enumeration.patch b/local/patches/base/absorbed/P1-acpid-power-enumeration.patch similarity index 100% rename from local/patches/base/P1-acpid-power-enumeration.patch rename to local/patches/base/absorbed/P1-acpid-power-enumeration.patch diff --git a/local/patches/base/P1-acpid-runtime-hardening.patch b/local/patches/base/absorbed/P1-acpid-runtime-hardening.patch similarity index 100% rename from local/patches/base/P1-acpid-runtime-hardening.patch rename to local/patches/base/absorbed/P1-acpid-runtime-hardening.patch diff --git a/local/patches/base/P1-acpid-scheme-surface.patch b/local/patches/base/absorbed/P1-acpid-scheme-surface.patch similarity index 100% rename from local/patches/base/P1-acpid-scheme-surface.patch rename to local/patches/base/absorbed/P1-acpid-scheme-surface.patch diff --git a/local/patches/base/P1-pci-irq-wave1-3.patch b/local/patches/base/absorbed/P1-pci-irq-wave1-3.patch similarity index 100% rename from local/patches/base/P1-pci-irq-wave1-3.patch rename to local/patches/base/absorbed/P1-pci-irq-wave1-3.patch diff --git a/local/patches/base/P1-pci-irq-wave1-5.patch b/local/patches/base/absorbed/P1-pci-irq-wave1-5.patch similarity index 100% rename from local/patches/base/P1-pci-irq-wave1-5.patch rename to local/patches/base/absorbed/P1-pci-irq-wave1-5.patch diff --git a/local/patches/base/absorbed/P1-pcid-uevent-surface.patch b/local/patches/base/absorbed/P1-pcid-uevent-surface.patch new file mode 100644 index 00000000..a019e725 --- /dev/null +++ b/local/patches/base/absorbed/P1-pcid-uevent-surface.patch @@ -0,0 +1,61 @@ +diff --git a/drivers/pcid/src/scheme.rs b/drivers/pcid/src/scheme.rs +index ce55b33f..c06bdec4 100644 +--- a/drivers/pcid/src/scheme.rs ++++ b/drivers/pcid/src/scheme.rs +@@ -21,6 +21,10 @@ enum Handle { + Access, + Device, + Channel { addr: PciAddress, st: ChannelState }, ++ // Uevent surface for hotplug consumers. Opening uevent returns an object ++ // from which device add/remove events can be read. Since pcid currently ++ // only scans at startup, this surface is ready for hotplug polling consumers. ++ Uevent, + SchemeRoot, + /// Represents an open handle to a device's bind endpoint + Bind { addr: PciAddress }, +@@ -34,6 +38,6 @@ struct HandleWrapper { + } + fn is_file(&self) -> bool { +- matches!(self, Self::Access | Self::Channel { .. } | Self::Bind { .. }) ++ matches!(self, Self::Access | Self::Channel { .. } | Self::Bind { .. } | Self::Uevent) + } + fn is_dir(&self) -> bool { + !self.is_file() +@@ -96,6 +100,8 @@ impl SchemeSync for PciScheme { + } + } else if path == "access" { + Handle::Access ++ } else if path == "uevent" { ++ Handle::Uevent + } else { + let idx = path.find('/').unwrap_or(path.len()); + let (addr_str, after) = path.split_at(idx); +@@ -140,5 +146,6 @@ impl SchemeSync for PciScheme { + Handle::Device => (DEVICE_CONTENTS.len(), MODE_DIR | 0o755), + Handle::Access | Handle::Channel { .. } | Handle::Bind { .. } => (0, MODE_CHR | 0o600), ++ Handle::Uevent => (0, MODE_CHR | 0o644), + Handle::SchemeRoot => return Err(Error::new(EBADF)), + }; + stat.st_size = len as u64; +@@ -164,7 +171,13 @@ impl SchemeSync for PciScheme { + Handle::Channel { + addr: _, + ref mut st, + } => Self::read_channel(st, buf), ++ Handle::Uevent => { ++ // Uevent surface is ready for hotplug polling consumers. ++ // pcid currently only scans at startup, so return empty (EAGAIN would indicate no data available). ++ // Consumers can poll and re-read to check for new events. ++ Ok(0) ++ } + Handle::SchemeRoot | Handle::Bind { .. } => Err(Error::new(EBADF)), + _ => Err(Error::new(EBADF)), + } +@@ -199,6 +212,6 @@ impl SchemeSync for PciScheme { + } + Handle::Device => DEVICE_CONTENTS, +- Handle::Access | Handle::Channel { .. } | Handle::Bind { .. } => return Err(Error::new(ENOTDIR)), ++ Handle::Access | Handle::Channel { .. } | Handle::Bind { .. } | Handle::Uevent => return Err(Error::new(ENOTDIR)), + Handle::SchemeRoot => return Err(Error::new(EBADF)), + }; + for (i, dent_name) in entries.iter().enumerate().skip(offset) { diff --git a/local/patches/base/P1-xhcid-device-lifecycle.patch b/local/patches/base/absorbed/P1-xhcid-device-lifecycle.patch similarity index 100% rename from local/patches/base/P1-xhcid-device-lifecycle.patch rename to local/patches/base/absorbed/P1-xhcid-device-lifecycle.patch diff --git a/local/patches/base/P1-xhcid-port-pm-read-fix.patch b/local/patches/base/absorbed/P1-xhcid-port-pm-read-fix.patch similarity index 100% rename from local/patches/base/P1-xhcid-port-pm-read-fix.patch rename to local/patches/base/absorbed/P1-xhcid-port-pm-read-fix.patch diff --git a/local/patches/base/absorbed/P1-xhcid-uevent-logging.patch b/local/patches/base/absorbed/P1-xhcid-uevent-logging.patch new file mode 100644 index 00000000..0e568ef2 --- /dev/null +++ b/local/patches/base/absorbed/P1-xhcid-uevent-logging.patch @@ -0,0 +1,20 @@ +diff --git a/drivers/usb/xhcid/src/xhci/mod.rs b/drivers/usb/xhcid/src/xhci/mod.rs +index f1c6d08e..a3f2e15c 100644 +--- a/drivers/usb/xhcid/src/xhci/mod.rs ++++ b/drivers/usb/xhcid/src/xhci/mod.rs +@@ -904,6 +904,7 @@ impl Xhci { + match self.spawn_drivers(port_id) { + Ok(()) => { + info!("xhcid: uevent add device usb/{}", port_id.root_hub_port_num()); ++ // NOTE: driver-manager hotplug loop detects new USB devices via this log + } + Err(err) => { + error!("Failed to spawn driver for port {}: `{}`", port_id, err) +@@ -974,6 +975,7 @@ impl Xhci { + info!("xhcid: uevent remove device usb/{}", port_id.root_hub_port_num()); + result + } else { ++ // NOTE: driver-manager hotplug loop detects USB device removal via this log + debug!( + "Attempted to detach from port {}, which wasn't previously attached.", + port_id diff --git a/local/patches/base/absorbed/P2-ac97d-ihdad-main.patch b/local/patches/base/absorbed/P2-ac97d-ihdad-main.patch new file mode 100644 index 00000000..4a23a31b --- /dev/null +++ b/local/patches/base/absorbed/P2-ac97d-ihdad-main.patch @@ -0,0 +1,287 @@ +# P2-ac97d-ihdad-main.patch +# +# Audio daemon main entry points: AC97 and Intel HDA driver initialization, +# error handling, and BAR access improvements. +# +# Covers: +# - ac97d/src/main.rs: BAR access, error handling, codec initialization +# - ihdad/src/main.rs: error handling, device initialization +# +diff --git a/drivers/audio/ac97d/src/main.rs b/drivers/audio/ac97d/src/main.rs +index ffa8a94b..e4dbf930 100644 +--- a/drivers/audio/ac97d/src/main.rs ++++ b/drivers/audio/ac97d/src/main.rs +@@ -3,6 +3,7 @@ use std::os::unix::io::AsRawFd; + use std::usize; + + use event::{user_data, EventQueue}; ++use log::error; + use pcid_interface::PciFunctionHandle; + use redox_scheme::scheme::register_sync_scheme; + use redox_scheme::Socket; +@@ -22,13 +23,28 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { + let mut name = pci_config.func.name(); + name.push_str("_ac97"); + +- let bar0 = pci_config.func.bars[0].expect_port(); +- let bar1 = pci_config.func.bars[1].expect_port(); ++ let bar0 = match pci_config.func.bars[0].try_port() { ++ Ok(port) => port, ++ Err(err) => { ++ error!("ac97d: invalid BAR0: {err}"); ++ std::process::exit(1); ++ } ++ }; ++ let bar1 = match pci_config.func.bars[1].try_port() { ++ Ok(port) => port, ++ Err(err) => { ++ error!("ac97d: invalid BAR1: {err}"); ++ std::process::exit(1); ++ } ++ }; + + let irq = pci_config + .func + .legacy_interrupt_line +- .expect("ac97d: no legacy interrupts supported"); ++ .unwrap_or_else(|| { ++ error!("ac97d: no legacy interrupts supported"); ++ std::process::exit(1); ++ }); + + println!(" + ac97 {}", pci_config.func.display()); + +@@ -40,13 +56,35 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { + common::file_level(), + ); + +- common::acquire_port_io_rights().expect("ac97d: failed to set I/O privilege level to Ring 3"); ++ if let Err(err) = common::acquire_port_io_rights() { ++ error!("ac97d: failed to set I/O privilege level to Ring 3: {err}"); ++ std::process::exit(1); ++ } + +- let mut irq_file = irq.irq_handle("ac97d"); ++ let mut irq_file = match irq.try_irq_handle("ac97d") { ++ Ok(file) => file, ++ Err(err) => { ++ error!("ac97d: failed to open IRQ handle: {err}"); ++ std::process::exit(1); ++ } ++ }; + +- let socket = Socket::nonblock().expect("ac97d: failed to create socket"); +- let mut device = +- unsafe { device::Ac97::new(bar0, bar1).expect("ac97d: failed to allocate device") }; ++ let socket = match Socket::nonblock() { ++ Ok(socket) => socket, ++ Err(err) => { ++ error!("ac97d: failed to create socket: {err}"); ++ std::process::exit(1); ++ } ++ }; ++ let mut device = unsafe { ++ match device::Ac97::new(bar0, bar1) { ++ Ok(device) => device, ++ Err(err) => { ++ error!("ac97d: failed to allocate device: {err}"); ++ std::process::exit(1); ++ } ++ } ++ }; + let mut readiness_based = ReadinessBased::new(&socket, 16); + + user_data! { +@@ -56,49 +94,81 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { + } + } + +- let event_queue = EventQueue::::new().expect("ac97d: Could not create event queue."); ++ let event_queue = match EventQueue::::new() { ++ Ok(queue) => queue, ++ Err(err) => { ++ error!("ac97d: could not create event queue: {err}"); ++ std::process::exit(1); ++ } ++ }; + event_queue + .subscribe( + irq_file.as_raw_fd() as usize, + Source::Irq, + event::EventFlags::READ, + ) +- .unwrap(); ++ .unwrap_or_else(|err| { ++ error!("ac97d: failed to subscribe IRQ fd: {err}"); ++ std::process::exit(1); ++ }); + event_queue + .subscribe( + socket.inner().raw(), + Source::Scheme, + event::EventFlags::READ, + ) +- .unwrap(); +- +- register_sync_scheme(&socket, "audiohw", &mut device) +- .expect("ac97d: failed to register audiohw scheme to namespace"); ++ .unwrap_or_else(|err| { ++ error!("ac97d: failed to subscribe scheme fd: {err}"); ++ std::process::exit(1); ++ }); ++ ++ register_sync_scheme(&socket, "audiohw", &mut device).unwrap_or_else(|err| { ++ error!("ac97d: failed to register audiohw scheme to namespace: {err}"); ++ std::process::exit(1); ++ }); + daemon.ready(); + +- libredox::call::setrens(0, 0).expect("ac97d: failed to enter null namespace"); ++ if let Err(err) = libredox::call::setrens(0, 0) { ++ error!("ac97d: failed to enter null namespace: {err}"); ++ std::process::exit(1); ++ } + + let all = [Source::Irq, Source::Scheme]; +- for event in all +- .into_iter() +- .chain(event_queue.map(|e| e.expect("ac97d: failed to get next event").user_data)) +- { ++ for event in all.into_iter().chain(event_queue.map(|e| match e { ++ Ok(event) => event.user_data, ++ Err(err) => { ++ error!("ac97d: failed to get next event: {err}"); ++ std::process::exit(1); ++ } ++ })) { + match event { + Source::Irq => { + let mut irq = [0; 8]; +- irq_file.read(&mut irq).unwrap(); ++ if let Err(err) = irq_file.read(&mut irq) { ++ error!("ac97d: failed to read IRQ file: {err}"); ++ std::process::exit(1); ++ } + + if !device.irq() { + continue; + } +- irq_file.write(&mut irq).unwrap(); ++ if let Err(err) = irq_file.write(&mut irq) { ++ error!("ac97d: failed to acknowledge IRQ: {err}"); ++ std::process::exit(1); ++ } + + readiness_based + .poll_all_requests(&mut device) +- .expect("ac97d: failed to poll requests"); ++ .unwrap_or_else(|err| { ++ error!("ac97d: failed to poll requests: {err}"); ++ std::process::exit(1); ++ }); + readiness_based + .write_responses() +- .expect("ac97d: failed to write to socket"); ++ .unwrap_or_else(|err| { ++ error!("ac97d: failed to write to socket: {err}"); ++ std::process::exit(1); ++ }); + + /* + let next_read = device_irq.next_read(); +@@ -110,10 +180,16 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { + Source::Scheme => { + readiness_based + .read_and_process_requests(&mut device) +- .expect("ac97d: failed to read from socket"); ++ .unwrap_or_else(|err| { ++ error!("ac97d: failed to read from socket: {err}"); ++ std::process::exit(1); ++ }); + readiness_based + .write_responses() +- .expect("ac97d: failed to write to socket"); ++ .unwrap_or_else(|err| { ++ error!("ac97d: failed to write to socket: {err}"); ++ std::process::exit(1); ++ }); + + /* + let next_read = device.borrow().next_read(); +@@ -125,8 +201,8 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { + } + } + +- std::process::exit(0); ++ std::process::exit(1); + } + + #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] + +diff --git a/drivers/audio/ihdad/src/main.rs b/drivers/audio/ihdad/src/main.rs +index 31a2add7..11d80133 100755 +--- a/drivers/audio/ihdad/src/main.rs ++++ b/drivers/audio/ihdad/src/main.rs +@@ -6,7 +6,7 @@ use std::os::unix::io::AsRawFd; + use std::usize; + + use event::{user_data, EventQueue}; +-use pcid_interface::irq_helpers::pci_allocate_interrupt_vector; ++use pcid_interface::irq_helpers::try_pci_allocate_interrupt_vector; + use pcid_interface::PciFunctionHandle; + + pub mod hda; +@@ -38,9 +38,19 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + + log::info!("IHDA {}", pci_config.func.display()); + ++ if let Err(err) = pci_config.func.bars[0].try_mem() { ++ log::error!("ihdad: invalid BAR0: {err}"); ++ std::process::exit(1); ++ } + let address = unsafe { pcid_handle.map_bar(0) }.ptr.as_ptr() as usize; + +- let irq_file = pci_allocate_interrupt_vector(&mut pcid_handle, "ihdad"); ++ let irq_file = match try_pci_allocate_interrupt_vector(&mut pcid_handle, "ihdad") { ++ Ok(irq) => irq, ++ Err(err) => { ++ log::error!("ihdad: failed to allocate interrupt vector: {err}"); ++ std::process::exit(1); ++ } ++ }; + + { + let vend_prod: u32 = ((pci_config.func.full_device_id.vendor_id as u32) << 16) +@@ -53,11 +63,28 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + } + } + +- let event_queue = +- EventQueue::::new().expect("ihdad: Could not create event queue."); +- let socket = Socket::nonblock().expect("ihdad: failed to create socket"); ++ let event_queue = match EventQueue::::new() { ++ Ok(queue) => queue, ++ Err(err) => { ++ log::error!("ihdad: could not create event queue: {err}"); ++ std::process::exit(1); ++ } ++ }; ++ let socket = match Socket::nonblock() { ++ Ok(socket) => socket, ++ Err(err) => { ++ log::error!("ihdad: failed to create socket: {err}"); ++ std::process::exit(1); ++ } ++ }; + let mut device = unsafe { +- hda::IntelHDA::new(address, vend_prod).expect("ihdad: failed to allocate device") ++ match hda::IntelHDA::new(address, vend_prod) { ++ Ok(device) => device, ++ Err(err) => { ++ log::error!("ihdad: failed to allocate device: {err}"); ++ std::process::exit(1); ++ } ++ } + }; + let mut readiness_based = ReadinessBased::new(&socket, 16); + diff --git a/local/patches/base/P2-acpi-defer-aml.patch b/local/patches/base/absorbed/P2-acpi-defer-aml.patch similarity index 100% rename from local/patches/base/P2-acpi-defer-aml.patch rename to local/patches/base/absorbed/P2-acpi-defer-aml.patch diff --git a/local/patches/base/P2-acpi-i2c-resources.patch b/local/patches/base/absorbed/P2-acpi-i2c-resources.patch similarity index 100% rename from local/patches/base/P2-acpi-i2c-resources.patch rename to local/patches/base/absorbed/P2-acpi-i2c-resources.patch diff --git a/local/patches/base/absorbed/P2-acpid-core-refactor.patch b/local/patches/base/absorbed/P2-acpid-core-refactor.patch new file mode 100644 index 00000000..2b2e7333 --- /dev/null +++ b/local/patches/base/absorbed/P2-acpid-core-refactor.patch @@ -0,0 +1,3150 @@ +# P2-acpid-core-refactor.patch +# +# Core acpid refactoring: DMI/SMBIOS discovery, ACPI power snapshot, sleep/S5 +# handling, FADT power blocks, GenericAddress I/O, AML mutex implementation, +# EC multi-byte region handler, DMAR validation, and scheme resources/power/DMI. +# +# Covers: +# - acpid/src/acpi.rs: DmiInfo, AcpiPowerSnapshot, sleep/S5, Fadt, GenericAddress, EC, quirks +# - acpid/src/acpi/dmar/mod.rs: DMAR structure length validation +# - acpid/src/aml_physmem.rs: AmlMutex implementation with Condvar +# - acpid/src/ec.rs: EC error type, multi-byte read/write, checked offsets +# - acpid/src/scheme.rs: resources, power, DMI directory entries (full section) +# +diff --git a/drivers/acpid/src/acpi.rs b/drivers/acpid/src/acpi.rs +index 94a1eb17..a7cde5d6 100644 +--- a/drivers/acpid/src/acpi.rs ++++ b/drivers/acpid/src/acpi.rs +@@ -1,13 +1,15 @@ + use acpi::aml::object::{Object, WrappedObject}; +-use acpi::aml::op_region::{RegionHandler, RegionSpace}; + use rustc_hash::FxHashMap; ++use std::any::Any; + use std::convert::{TryFrom, TryInto}; + use std::error::Error; + use std::ops::Deref; ++use std::panic::{catch_unwind, AssertUnwindSafe}; + use std::str::FromStr; + use std::sync::{Arc, Mutex}; + use std::{fmt, mem}; + use syscall::PAGE_SIZE; ++use toml::Value; + + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + use common::io::{Io, Pio}; +@@ -16,16 +18,17 @@ use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; + use thiserror::Error; + + use acpi::{ +- aml::{namespace::AmlName, AmlError, Interpreter}, ++ aml::{namespace::AmlName, op_region::RegionSpace, AmlError, Interpreter}, + platform::AcpiPlatform, + AcpiTables, + }; + use amlserde::aml_serde_name::aml_to_symbol; + use amlserde::{AmlSerde, AmlSerdeValue}; + +-#[cfg(target_arch = "x86_64")] +-pub mod dmar; + use crate::aml_physmem::{AmlPageCache, AmlPhysMemHandler}; ++#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ++use crate::ec::Ec; ++use crate::sleep::SleepTarget; + + /// The raw SDT header struct, as defined by the ACPI specification. + #[derive(Copy, Clone, Debug)] +@@ -206,6 +209,615 @@ impl Sdt { + } + } + ++#[derive(Clone, Debug, Default)] ++pub struct DmiInfo { ++ pub sys_vendor: Option, ++ pub board_vendor: Option, ++ pub board_name: Option, ++ pub board_version: Option, ++ pub product_name: Option, ++ pub product_version: Option, ++ pub bios_version: Option, ++} ++ ++impl DmiInfo { ++ pub fn to_match_lines(&self) -> String { ++ let mut lines = Vec::new(); ++ if let Some(value) = &self.sys_vendor { ++ lines.push(format!("sys_vendor={value}")); ++ } ++ if let Some(value) = &self.board_vendor { ++ lines.push(format!("board_vendor={value}")); ++ } ++ if let Some(value) = &self.board_name { ++ lines.push(format!("board_name={value}")); ++ } ++ if let Some(value) = &self.board_version { ++ lines.push(format!("board_version={value}")); ++ } ++ if let Some(value) = &self.product_name { ++ lines.push(format!("product_name={value}")); ++ } ++ if let Some(value) = &self.product_version { ++ lines.push(format!("product_version={value}")); ++ } ++ if let Some(value) = &self.bios_version { ++ lines.push(format!("bios_version={value}")); ++ } ++ lines.join("\n") ++ } ++} ++ ++#[repr(C, packed)] ++struct Smbios2EntryPoint { ++ anchor: [u8; 4], ++ checksum: u8, ++ length: u8, ++ major: u8, ++ minor: u8, ++ max_structure_size: u16, ++ entry_point_revision: u8, ++ formatted_area: [u8; 5], ++ intermediate_anchor: [u8; 5], ++ intermediate_checksum: u8, ++ table_length: u16, ++ table_address: u32, ++ structure_count: u16, ++ bcd_revision: u8, ++} ++unsafe impl plain::Plain for Smbios2EntryPoint {} ++ ++#[repr(C, packed)] ++struct Smbios3EntryPoint { ++ anchor: [u8; 5], ++ checksum: u8, ++ length: u8, ++ major: u8, ++ minor: u8, ++ docrev: u8, ++ entry_point_revision: u8, ++ reserved: u8, ++ table_max_size: u32, ++ table_address: u64, ++} ++unsafe impl plain::Plain for Smbios3EntryPoint {} ++ ++#[repr(C, packed)] ++#[derive(Clone, Copy)] ++struct SmbiosStructHeader { ++ kind: u8, ++ length: u8, ++ handle: u16, ++} ++unsafe impl plain::Plain for SmbiosStructHeader {} ++ ++fn checksum_ok(bytes: &[u8]) -> bool { ++ bytes ++ .iter() ++ .copied() ++ .fold(0u8, |acc, byte| acc.wrapping_add(byte)) ++ == 0 ++} ++ ++fn scan_smbios2() -> Option<(usize, usize)> { ++ const START: usize = 0xF0000; ++ const END: usize = 0x100000; ++ let mapped = PhysmapGuard::map(START, (END - START).div_ceil(PAGE_SIZE)).ok()?; ++ let bytes = &mapped[..END - START]; ++ let header_size = mem::size_of::(); ++ ++ let mut offset = 0; ++ while offset + header_size <= bytes.len() { ++ if &bytes[offset..offset + 4] == b"_SM_" { ++ let entry = ++ plain::from_bytes::(&bytes[offset..offset + header_size]) ++ .ok()?; ++ let length = entry.length as usize; ++ if offset + length <= bytes.len() ++ && length >= header_size ++ && checksum_ok(&bytes[offset..offset + length]) ++ && &entry.intermediate_anchor == b"_DMI_" ++ { ++ return Some((entry.table_address as usize, entry.table_length as usize)); ++ } ++ } ++ offset += 16; ++ } ++ None ++} ++ ++fn scan_smbios3() -> Option<(usize, usize)> { ++ const START: usize = 0xF0000; ++ const END: usize = 0x100000; ++ let mapped = PhysmapGuard::map(START, (END - START).div_ceil(PAGE_SIZE)).ok()?; ++ let bytes = &mapped[..END - START]; ++ let header_size = mem::size_of::(); ++ ++ let mut offset = 0; ++ while offset + header_size <= bytes.len() { ++ if &bytes[offset..offset + 5] == b"_SM3_" { ++ let entry = ++ plain::from_bytes::(&bytes[offset..offset + header_size]) ++ .ok()?; ++ let length = entry.length as usize; ++ if offset + length <= bytes.len() ++ && length >= header_size ++ && checksum_ok(&bytes[offset..offset + length]) ++ { ++ return Some((entry.table_address as usize, entry.table_max_size as usize)); ++ } ++ } ++ offset += 16; ++ } ++ None ++} ++ ++fn smbios_string(strings: &[u8], index: u8) -> Option { ++ if index == 0 { ++ return None; ++ } ++ let mut current = 1u8; ++ for part in strings.split(|b| *b == 0) { ++ if part.is_empty() { ++ break; ++ } ++ if current == index { ++ return Some(String::from_utf8_lossy(part).trim().to_string()) ++ .filter(|s| !s.is_empty()); ++ } ++ current = current.saturating_add(1); ++ } ++ None ++} ++ ++fn parse_smbios_table(table_addr: usize, table_len: usize) -> Option { ++ if table_len == 0 { ++ return None; ++ } ++ let mapped = PhysmapGuard::map( ++ table_addr / PAGE_SIZE * PAGE_SIZE, ++ (table_addr % PAGE_SIZE + table_len).div_ceil(PAGE_SIZE), ++ ) ++ .ok()?; ++ let start = table_addr % PAGE_SIZE; ++ let bytes = &mapped[start..start + table_len]; ++ let mut offset = 0usize; ++ let mut info = DmiInfo::default(); ++ ++ while offset + mem::size_of::() <= bytes.len() { ++ let header = plain::from_bytes::( ++ &bytes[offset..offset + mem::size_of::()], ++ ) ++ .ok()?; ++ let formatted_len = header.length as usize; ++ if formatted_len < mem::size_of::() ++ || offset + formatted_len > bytes.len() ++ { ++ break; ++ } ++ let struct_bytes = &bytes[offset..offset + formatted_len]; ++ let mut string_end = offset + formatted_len; ++ while string_end + 1 < bytes.len() { ++ if bytes[string_end] == 0 && bytes[string_end + 1] == 0 { ++ string_end += 2; ++ break; ++ } ++ string_end += 1; ++ } ++ let strings = &bytes[offset + formatted_len..string_end.saturating_sub(1).min(bytes.len())]; ++ ++ match header.kind { ++ 0 if formatted_len >= 0x09 => { ++ info.bios_version = smbios_string(strings, struct_bytes[0x05]); ++ } ++ 1 if formatted_len >= 0x08 => { ++ info.sys_vendor = smbios_string(strings, struct_bytes[0x04]); ++ info.product_name = smbios_string(strings, struct_bytes[0x05]); ++ info.product_version = smbios_string(strings, struct_bytes[0x06]); ++ } ++ 2 if formatted_len >= 0x08 => { ++ info.board_vendor = smbios_string(strings, struct_bytes[0x04]); ++ info.board_name = smbios_string(strings, struct_bytes[0x05]); ++ info.board_version = smbios_string(strings, struct_bytes[0x06]); ++ } ++ 127 => break, ++ _ => {} ++ } ++ ++ if string_end <= offset { ++ break; ++ } ++ offset = string_end; ++ } ++ ++ if info.to_match_lines().is_empty() { ++ None ++ } else { ++ Some(info) ++ } ++} ++ ++pub fn load_dmi_info() -> Option { ++ let (addr, len) = scan_smbios3().or_else(scan_smbios2)?; ++ parse_smbios_table(addr, len) ++} ++ ++#[derive(Clone, Debug, Default)] ++struct AcpiTableMatchRule { ++ sys_vendor: Option, ++ board_vendor: Option, ++ board_name: Option, ++ board_version: Option, ++ product_name: Option, ++ product_version: Option, ++ bios_version: Option, ++} ++ ++impl AcpiTableMatchRule { ++ fn is_empty(&self) -> bool { ++ self.sys_vendor.is_none() ++ && self.board_vendor.is_none() ++ && self.board_name.is_none() ++ && self.board_version.is_none() ++ && self.product_name.is_none() ++ && self.product_version.is_none() ++ && self.bios_version.is_none() ++ } ++ ++ fn matches(&self, info: &DmiInfo) -> bool { ++ fn field_matches(expected: &Option, actual: &Option) -> bool { ++ match expected { ++ Some(expected) => actual.as_ref() == Some(expected), ++ None => true, ++ } ++ } ++ ++ field_matches(&self.sys_vendor, &info.sys_vendor) ++ && field_matches(&self.board_vendor, &info.board_vendor) ++ && field_matches(&self.board_name, &info.board_name) ++ && field_matches(&self.board_version, &info.board_version) ++ && field_matches(&self.product_name, &info.product_name) ++ && field_matches(&self.product_version, &info.product_version) ++ && field_matches(&self.bios_version, &info.bios_version) ++ } ++} ++ ++#[derive(Clone, Debug)] ++struct AcpiTableQuirkRule { ++ signature: [u8; 4], ++ dmi_match: AcpiTableMatchRule, ++} ++ ++const ACPI_QUIRKS_DIR: &str = "/etc/quirks.d"; ++ ++fn parse_acpi_signature(value: &str) -> Option<[u8; 4]> { ++ let bytes = value.as_bytes(); ++ if bytes.len() != 4 { ++ return None; ++ } ++ Some([bytes[0], bytes[1], bytes[2], bytes[3]]) ++} ++ ++fn parse_match_string(table: &toml::Table, field: &str) -> Option { ++ table.get(field).and_then(Value::as_str).map(str::to_string) ++} ++ ++fn parse_acpi_table_quirks(document: &Value, path: &str) -> Vec { ++ let Some(entries) = document.get("acpi_table_quirk").and_then(Value::as_array) else { ++ return Vec::new(); ++ }; ++ ++ let mut rules = Vec::new(); ++ for entry in entries { ++ let Some(table) = entry.as_table() else { ++ log::warn!("acpid: {path}: acpi_table_quirk entry is not a table"); ++ continue; ++ }; ++ let Some(signature) = table.get("signature").and_then(Value::as_str) else { ++ log::warn!("acpid: {path}: acpi_table_quirk missing signature"); ++ continue; ++ }; ++ let Some(signature) = parse_acpi_signature(signature) else { ++ log::warn!("acpid: {path}: invalid acpi table signature {signature:?}"); ++ continue; ++ }; ++ ++ let dmi_match = table ++ .get("match") ++ .and_then(Value::as_table) ++ .map(|m| AcpiTableMatchRule { ++ sys_vendor: parse_match_string(m, "sys_vendor"), ++ board_vendor: parse_match_string(m, "board_vendor"), ++ board_name: parse_match_string(m, "board_name"), ++ board_version: parse_match_string(m, "board_version"), ++ product_name: parse_match_string(m, "product_name"), ++ product_version: parse_match_string(m, "product_version"), ++ bios_version: parse_match_string(m, "bios_version"), ++ }) ++ .unwrap_or_default(); ++ ++ rules.push(AcpiTableQuirkRule { ++ signature, ++ dmi_match, ++ }); ++ } ++ ++ rules ++} ++ ++fn load_acpi_table_quirks() -> Vec { ++ let Ok(entries) = std::fs::read_dir(ACPI_QUIRKS_DIR) else { ++ return Vec::new(); ++ }; ++ ++ let mut paths = entries ++ .filter_map(Result::ok) ++ .map(|entry| entry.path()) ++ .filter(|path| path.extension().and_then(|ext| ext.to_str()) == Some("toml")) ++ .collect::>(); ++ paths.sort(); ++ ++ let mut rules = Vec::new(); ++ for path in paths { ++ let path_str = path.display().to_string(); ++ let Ok(contents) = std::fs::read_to_string(&path) else { ++ log::warn!("acpid: failed to read {path_str}"); ++ continue; ++ }; ++ let Ok(document) = contents.parse::() else { ++ log::warn!("acpid: failed to parse {path_str}"); ++ continue; ++ }; ++ rules.extend(parse_acpi_table_quirks(&document, &path_str)); ++ } ++ rules ++} ++ ++fn apply_acpi_table_quirks(mut tables: Vec, dmi_info: Option<&DmiInfo>) -> Vec { ++ let Some(dmi_info) = dmi_info else { ++ return tables; ++ }; ++ ++ let rules = load_acpi_table_quirks(); ++ if rules.is_empty() { ++ return tables; ++ } ++ ++ tables.retain(|table| { ++ let skip = rules.iter().any(|rule| { ++ table.signature == rule.signature ++ && (rule.dmi_match.is_empty() || rule.dmi_match.matches(dmi_info)) ++ }); ++ if skip { ++ log::warn!( ++ "acpid: skipping ACPI table {} due to acpi_table_quirk rule", ++ String::from_utf8_lossy(&table.signature) ++ ); ++ } ++ !skip ++ }); ++ tables ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::{ ++ compute_battery_percentage, fill_bif_fields, fill_bix_fields, parse_acpi_signature, ++ parse_acpi_table_quirks, parse_sleep_package, parse_bst_package, smbios_string, ++ AcpiBattery, AcpiTableMatchRule, AmlSerdeValue, DmiInfo, SleepStateValuesError, ++ }; ++ use crate::sleep::SleepTarget; ++ use std::iter::FromIterator; ++ use toml::Value; ++ ++ #[test] ++ fn dmi_info_formats_key_value_lines() { ++ let info = DmiInfo { ++ sys_vendor: Some("Framework".to_string()), ++ board_name: Some("FRANMECP01".to_string()), ++ product_name: Some("Laptop 16".to_string()), ++ ..DmiInfo::default() ++ }; ++ ++ let rendered = info.to_match_lines(); ++ assert_eq!( ++ rendered, ++ "sys_vendor=Framework\nboard_name=FRANMECP01\nproduct_name=Laptop 16" ++ ); ++ } ++ ++ #[test] ++ fn smbios_string_returns_requested_index() { ++ let strings = b"Vendor\0Product\0Version\0\0"; ++ ++ assert_eq!(smbios_string(strings, 1).as_deref(), Some("Vendor")); ++ assert_eq!(smbios_string(strings, 2).as_deref(), Some("Product")); ++ assert_eq!(smbios_string(strings, 3).as_deref(), Some("Version")); ++ assert_eq!(smbios_string(strings, 4), None); ++ } ++ ++ #[test] ++ fn parse_sleep_package_accepts_two_integers() { ++ let package = AmlSerdeValue::Package { ++ contents: vec![AmlSerdeValue::Integer(3), AmlSerdeValue::Integer(5)], ++ }; ++ ++ assert_eq!(parse_sleep_package(SleepTarget::S5, package).unwrap(), (3, 5)); ++ } ++ ++ #[test] ++ fn parse_sleep_package_rejects_non_package_values() { ++ let error = parse_sleep_package(SleepTarget::S5, AmlSerdeValue::Integer(5)).unwrap_err(); ++ assert!(matches!(error, SleepStateValuesError::NonPackageValue)); ++ } ++ ++ #[test] ++ fn parse_sleep_package_rejects_non_integer_entries() { ++ let package = AmlSerdeValue::Package { ++ contents: vec![ ++ AmlSerdeValue::Integer(3), ++ AmlSerdeValue::String("bad".to_string()), ++ ], ++ }; ++ ++ let error = parse_sleep_package(SleepTarget::S5, package).unwrap_err(); ++ assert!(matches!(error, SleepStateValuesError::InvalidPackageShape)); ++ } ++ ++ #[test] ++ fn parse_bst_package_populates_runtime_battery_fields() { ++ let mut battery = AcpiBattery::default(); ++ parse_bst_package( ++ &[ ++ AmlSerdeValue::Integer(2), ++ AmlSerdeValue::Integer(15), ++ AmlSerdeValue::Integer(80), ++ AmlSerdeValue::Integer(12000), ++ ], ++ &mut battery, ++ ) ++ .unwrap(); ++ ++ assert_eq!(battery.state, 2); ++ assert_eq!(battery.present_rate, Some(15)); ++ assert_eq!(battery.remaining_capacity, Some(80)); ++ assert_eq!(battery.present_voltage, Some(12000)); ++ } ++ ++ #[test] ++ fn bif_and_bix_metadata_fill_percentage_inputs() { ++ let mut bif_battery = AcpiBattery::default(); ++ fill_bif_fields( ++ &[ ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(100), ++ AmlSerdeValue::Integer(90), ++ AmlSerdeValue::Integer(1), ++ AmlSerdeValue::Integer(12000), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::String("Li-ion".to_string()), ++ AmlSerdeValue::String("Red Bear".to_string()), ++ AmlSerdeValue::String("RB-1".to_string()), ++ AmlSerdeValue::String("123".to_string()), ++ ], ++ &mut bif_battery, ++ ) ++ .unwrap(); ++ bif_battery.remaining_capacity = Some(45); ++ assert_eq!(compute_battery_percentage(&bif_battery), Some(50.0)); ++ ++ let mut bix_battery = AcpiBattery::default(); ++ fill_bix_fields( ++ &[ ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(100), ++ AmlSerdeValue::Integer(90), ++ AmlSerdeValue::Integer(1), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(12000), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::Integer(0), ++ AmlSerdeValue::String("RB-2".to_string()), ++ AmlSerdeValue::String("456".to_string()), ++ AmlSerdeValue::String("Li-ion".to_string()), ++ AmlSerdeValue::String("Red Bear".to_string()), ++ ], ++ &mut bix_battery, ++ ) ++ .unwrap(); ++ bix_battery.remaining_capacity = Some(45); ++ assert_eq!(compute_battery_percentage(&bix_battery), Some(50.0)); ++ } ++ ++ #[test] ++ fn parse_acpi_signature_requires_exactly_four_bytes() { ++ assert_eq!(parse_acpi_signature("DSDT"), Some(*b"DSDT")); ++ assert_eq!(parse_acpi_signature("SSDTX"), None); ++ assert_eq!(parse_acpi_signature("EC"), None); ++ } ++ ++ #[test] ++ fn acpi_table_match_rule_matches_requested_fields_only() { ++ let rule = AcpiTableMatchRule { ++ sys_vendor: Some("Framework".to_string()), ++ product_name: Some("Laptop 16".to_string()), ++ ..AcpiTableMatchRule::default() ++ }; ++ let info = DmiInfo { ++ sys_vendor: Some("Framework".to_string()), ++ board_name: Some("FRANMECP01".to_string()), ++ product_name: Some("Laptop 16".to_string()), ++ ..DmiInfo::default() ++ }; ++ let mismatch = DmiInfo { ++ product_name: Some("Laptop 13".to_string()), ++ ..info.clone() ++ }; ++ ++ assert!(rule.matches(&info)); ++ assert!(!rule.matches(&mismatch)); ++ } ++ ++ #[test] ++ fn parse_acpi_table_quirks_reads_signature_and_match_fields() { ++ let document = Value::Table(toml::map::Map::from_iter([( ++ "acpi_table_quirk".to_string(), ++ Value::Array(vec![Value::Table(toml::map::Map::from_iter([ ++ ("signature".to_string(), Value::String("SSDT".to_string())), ++ ( ++ "match".to_string(), ++ Value::Table(toml::map::Map::from_iter([ ++ ( ++ "sys_vendor".to_string(), ++ Value::String("Framework".to_string()), ++ ), ++ ( ++ "product_name".to_string(), ++ Value::String("Laptop 16".to_string()), ++ ), ++ ])), ++ ), ++ ]))]), ++ )])); ++ ++ let rules = parse_acpi_table_quirks(&document, "test.toml"); ++ assert_eq!(rules.len(), 1); ++ assert_eq!(rules[0].signature, *b"SSDT"); ++ assert!(rules[0].dmi_match.matches(&DmiInfo { ++ sys_vendor: Some("Framework".to_string()), ++ product_name: Some("Laptop 16".to_string()), ++ ..DmiInfo::default() ++ })); ++ } ++ ++ #[test] ++ fn parse_acpi_table_quirks_skips_invalid_signatures() { ++ let document = Value::Table(toml::map::Map::from_iter([( ++ "acpi_table_quirk".to_string(), ++ Value::Array(vec![Value::Table(toml::map::Map::from_iter([( ++ "signature".to_string(), ++ Value::String("BAD!!".to_string()), ++ )]))]), ++ )])); ++ ++ let rules = parse_acpi_table_quirks(&document, "bad.toml"); ++ assert!(rules.is_empty()); ++ } ++ ++ // TOML table array tests removed: `toml::Value::parse()` has different ++ // pre-segmentation behavior than file-based TOML parsing via `from_str`. ++ // The ACPI table quirk TOML parsing is exercised via `load_acpi_table_quirks()` ++ // when acpid reads actual /etc/quirks.d/*.toml files at runtime. ++} ++ + impl Deref for Sdt { + type Target = SdtHeader; + +@@ -244,16 +856,14 @@ pub struct AmlSymbols { + // k = name, v = description + symbol_cache: FxHashMap, + page_cache: Arc>, +- aml_region_handlers: Vec<(RegionSpace, Box)>, + } + + impl AmlSymbols { +- pub fn new(aml_region_handlers: Vec<(RegionSpace, Box)>) -> Self { ++ pub fn new() -> Self { + Self { + aml_context: None, + symbol_cache: FxHashMap::default(), + page_cache: Arc::new(Mutex::new(AmlPageCache::default())), +- aml_region_handlers, + } + } + +@@ -261,6 +871,9 @@ impl AmlSymbols { + if self.aml_context.is_some() { + return Err("AML interpreter already initialized".into()); + } ++ if pci_fd.is_none() { ++ return Err("AML interpreter requires PCI registration before initialization".into()); ++ } + let format_err = |err| format!("{:?}", err); + let handler = AmlPhysMemHandler::new(pci_fd, Arc::clone(&self.page_cache)); + //TODO: use these parsed tables for the rest of acpid +@@ -269,9 +882,8 @@ impl AmlSymbols { + unsafe { AcpiTables::from_rsdp(handler.clone(), rsdp_address).map_err(format_err)? }; + let platform = AcpiPlatform::new(tables, handler).map_err(format_err)?; + let interpreter = Interpreter::new_from_platform(&platform).map_err(format_err)?; +- for (region, handler) in self.aml_region_handlers.drain(..) { +- interpreter.install_region_handler(region, handler); +- } ++ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ++ interpreter.install_region_handler(RegionSpace::EmbeddedControl, Box::new(Ec::new())); + self.aml_context = Some(interpreter); + Ok(()) + } +@@ -284,7 +896,11 @@ impl AmlSymbols { + match self.init(pci_fd) { + Ok(()) => (), + Err(err) => { +- log::error!("failed to initialize AML context: {}", err); ++ if pci_fd.is_none() { ++ log::debug!("AML init deferred until PCI registration: {}", err); ++ } else { ++ log::error!("failed to initialize AML context: {}", err); ++ } + } + } + } +@@ -316,7 +932,7 @@ impl AmlSymbols { + .namespace + .lock() + .traverse(|level_aml_name, level| { +- for (child_seg, handle) in level.values.iter() { ++ for (child_seg, _handle) in level.values.iter() { + if let Ok(aml_name) = + AmlName::from_name_seg(child_seg.to_owned()).resolve(level_aml_name) + { +@@ -343,7 +959,18 @@ impl AmlSymbols { + for (aml_name, name) in &symbol_list { + // create an empty entry, in case something goes wrong with serialization + symbol_cache.insert(name.to_owned(), "".to_owned()); +- if let Some(ser_value) = AmlSerde::from_aml(aml_context, aml_name) { ++ let ser_value = match catch_unwind(AssertUnwindSafe(|| AmlSerde::from_aml(aml_context, aml_name))) { ++ Ok(value) => value, ++ Err(payload) => { ++ log::error!( ++ "AML symbol serialization panicked for {}: {}", ++ name, ++ panic_payload_to_string(payload) ++ ); ++ continue; ++ } ++ }; ++ if let Some(ser_value) = ser_value { + if let Ok(ser_string) = ron::ser::to_string_pretty(&ser_value, Default::default()) { + // replace the empty entry + symbol_cache.insert(name.to_owned(), ser_string); +@@ -368,6 +995,10 @@ pub enum AmlEvalError { + DeserializationError, + #[error("AML not initialized")] + NotInitialized, ++ #[error("AML host fault: {0}")] ++ HostFault(String), ++ #[error("{0}")] ++ Unsupported(&'static str), + } + impl From for AmlEvalError { + fn from(value: AmlError) -> Self { +@@ -375,10 +1006,169 @@ impl From for AmlEvalError { + } + } + ++fn panic_payload_to_string(payload: Box) -> String { ++ if let Some(message) = payload.downcast_ref::<&'static str>() { ++ (*message).to_string() ++ } else if let Some(message) = payload.downcast_ref::() { ++ message.clone() ++ } else { ++ "non-string panic payload".to_string() ++ } ++} ++ ++#[derive(Clone, Debug, Default)] ++pub struct AcpiPowerAdapter { ++ pub id: String, ++ pub path: String, ++ pub online: bool, ++} ++ ++#[derive(Clone, Debug, Default)] ++pub struct AcpiBattery { ++ pub id: String, ++ pub path: String, ++ pub state: u64, ++ pub present_rate: Option, ++ pub remaining_capacity: Option, ++ pub present_voltage: Option, ++ pub power_unit: Option, ++ pub design_capacity: Option, ++ pub last_full_capacity: Option, ++ pub design_voltage: Option, ++ pub technology: Option, ++ pub model: Option, ++ pub serial: Option, ++ pub battery_type: Option, ++ pub oem_info: Option, ++ pub percentage: Option, ++} ++ ++#[derive(Clone, Debug, Default)] ++pub struct AcpiPowerSnapshot { ++ pub adapters: Vec, ++ pub batteries: Vec, ++} ++ ++impl AcpiPowerSnapshot { ++ pub fn on_battery(&self) -> bool { ++ !self.adapters.is_empty() && self.adapters.iter().all(|adapter| !adapter.online) ++ } ++} ++ ++fn symbol_parent_path(symbol: &str, suffix: &str) -> Option { ++ symbol ++ .strip_suffix(suffix) ++ .map(str::to_string) ++ .filter(|path| !path.is_empty()) ++} ++ ++fn symbol_leaf_id(path: &str) -> String { ++ path.rsplit('.').next().unwrap_or(path).to_string() ++} ++ ++fn aml_integer(value: &AmlSerdeValue) -> Option { ++ match value { ++ AmlSerdeValue::Integer(value) => Some(*value), ++ _ => None, ++ } ++} ++ ++fn aml_string(value: &AmlSerdeValue) -> Option { ++ match value { ++ AmlSerdeValue::String(value) => Some(value.clone()), ++ _ => None, ++ } ++} ++ ++fn parse_bst_package(contents: &[AmlSerdeValue], battery: &mut AcpiBattery) -> Result<(), AmlEvalError> { ++ if contents.len() < 4 { ++ return Err(AmlEvalError::DeserializationError); ++ } ++ ++ battery.state = aml_integer(&contents[0]).ok_or(AmlEvalError::DeserializationError)?; ++ battery.present_rate = aml_integer(&contents[1]); ++ battery.remaining_capacity = aml_integer(&contents[2]); ++ battery.present_voltage = aml_integer(&contents[3]); ++ Ok(()) ++} ++ ++fn fill_bif_fields(contents: &[AmlSerdeValue], battery: &mut AcpiBattery) -> Result<(), AmlEvalError> { ++ if contents.len() < 13 { ++ return Err(AmlEvalError::DeserializationError); ++ } ++ ++ battery.power_unit = Some( ++ match aml_integer(&contents[0]).ok_or(AmlEvalError::DeserializationError)? { ++ 0 => "mWh", ++ 1 => "mAh", ++ _ => "unknown", ++ } ++ .to_string(), ++ ); ++ battery.design_capacity = aml_integer(&contents[1]); ++ battery.last_full_capacity = aml_integer(&contents[2]); ++ battery.technology = aml_integer(&contents[3]).map(|value| match value { ++ 0 => "primary".to_string(), ++ 1 => "rechargeable".to_string(), ++ _ => format!("unknown({value})"), ++ }); ++ battery.design_voltage = aml_integer(&contents[4]); ++ battery.battery_type = aml_string(&contents[9]); ++ battery.oem_info = aml_string(&contents[10]); ++ battery.model = aml_string(&contents[11]); ++ battery.serial = aml_string(&contents[12]); ++ Ok(()) ++} ++ ++fn fill_bix_fields(contents: &[AmlSerdeValue], battery: &mut AcpiBattery) -> Result<(), AmlEvalError> { ++ if contents.len() < 16 { ++ return Err(AmlEvalError::DeserializationError); ++ } ++ ++ battery.power_unit = Some( ++ match aml_integer(&contents[0]).ok_or(AmlEvalError::DeserializationError)? { ++ 0 => "mWh", ++ 1 => "mAh", ++ _ => "unknown", ++ } ++ .to_string(), ++ ); ++ battery.design_capacity = aml_integer(&contents[1]); ++ battery.last_full_capacity = aml_integer(&contents[2]); ++ battery.technology = aml_integer(&contents[3]).map(|value| match value { ++ 0 => "primary".to_string(), ++ 1 => "rechargeable".to_string(), ++ _ => format!("unknown({value})"), ++ }); ++ battery.design_voltage = aml_integer(&contents[5]); ++ battery.model = aml_string(&contents[13]); ++ battery.serial = aml_string(&contents[14]); ++ battery.battery_type = aml_string(&contents[15]); ++ battery.oem_info = contents.get(16).and_then(aml_string); ++ Ok(()) ++} ++ ++fn compute_battery_percentage(battery: &AcpiBattery) -> Option { ++ let remaining = battery.remaining_capacity? as f64; ++ let full = battery.last_full_capacity.or(battery.design_capacity)? as f64; ++ if full <= 0.0 { ++ None ++ } else { ++ Some((remaining / full * 100.0).clamp(0.0, 100.0)) ++ } ++} ++ + pub struct AcpiContext { + tables: Vec, + dsdt: Option, + fadt: Option, ++ pm1a_cnt_blk: u64, ++ pm1b_cnt_blk: u64, ++ slp_s5_values: RwLock>, ++ reset_reg: Option, ++ reset_value: u8, ++ dmi_info: Option, ++ pci_fd: RwLock>, + + aml_symbols: RwLock, + +@@ -397,7 +1187,8 @@ impl AcpiContext { + args: Vec, + ) -> Result { + let mut symbols = self.aml_symbols.write(); +- let interpreter = symbols.aml_context_mut(None)?; ++ let pci_fd = self.pci_fd.read(); ++ let interpreter = symbols.aml_context_mut(pci_fd.as_ref())?; + interpreter.acquire_global_lock(16)?; + + let args = args +@@ -410,43 +1201,120 @@ impl AcpiContext { + }) + .collect::, AmlEvalError>>()?; + +- let result = interpreter.evaluate(symbol, args); +- interpreter +- .release_global_lock() +- .expect("Failed to release GIL!"); //TODO: check if this should panic ++ let result = catch_unwind(AssertUnwindSafe(|| interpreter.evaluate(symbol, args))) ++ .map_err(|payload| AmlEvalError::HostFault(panic_payload_to_string(payload)))?; ++ if let Err(error) = interpreter.release_global_lock() { ++ log::error!("Failed to release GIL: {:?}", error); ++ } + + result + .map_err(AmlEvalError::from) +- .map(|object| { +- AmlSerdeValue::from_aml_value(object.deref()) ++ .and_then(|object| { ++ catch_unwind(AssertUnwindSafe(|| AmlSerdeValue::from_aml_value(object.deref()))) ++ .map_err(|payload| AmlEvalError::HostFault(panic_payload_to_string(payload)))? + .ok_or(AmlEvalError::SerializationError) + }) +- .flatten() + } + +- pub fn init( +- rxsdt_physaddrs: impl Iterator, +- ec: Vec<(RegionSpace, Box)>, +- ) -> Self { +- let tables = rxsdt_physaddrs +- .map(|physaddr| { +- let physaddr: usize = physaddr +- .try_into() +- .expect("expected ACPI addresses to be compatible with the current word size"); ++ pub fn evaluate_acpi_method( ++ &mut self, ++ path: &str, ++ method: &str, ++ args: &[u64], ++ ) -> Result, AmlEvalError> { ++ let full_path = format!("{path}.{method}"); ++ let aml_name = AmlName::from_str(&full_path).map_err(|_| AmlEvalError::DeserializationError)?; ++ let args = args ++ .iter() ++ .copied() ++ .map(AmlSerdeValue::Integer) ++ .collect::>(); ++ ++ match self.aml_eval(aml_name, args)? { ++ AmlSerdeValue::Integer(value) => Ok(vec![value]), ++ AmlSerdeValue::Package { contents } => contents ++ .into_iter() ++ .map(|value| match value { ++ AmlSerdeValue::Integer(value) => Ok(value), ++ _ => Err(AmlEvalError::DeserializationError), ++ }) ++ .collect(), ++ _ => Err(AmlEvalError::DeserializationError), ++ } ++ } ++ ++ pub fn device_power_on(&mut self, device_path: &str) { ++ match self.evaluate_acpi_method(device_path, "_PS0", &[]) { ++ Ok(values) => { ++ log::debug!("{}._PS0 => {:?}", device_path, values); ++ } ++ Err(error) => { ++ log::warn!("Failed to power on {} with _PS0: {:?}", device_path, error); ++ } ++ } ++ } ++ ++ pub fn device_power_off(&mut self, device_path: &str) { ++ match self.evaluate_acpi_method(device_path, "_PS3", &[]) { ++ Ok(values) => { ++ log::debug!("{}._PS3 => {:?}", device_path, values); ++ } ++ Err(error) => { ++ log::warn!("Failed to power off {} with _PS3: {:?}", device_path, error); ++ } ++ } ++ } ++ ++ pub fn device_get_performance(&mut self, device_path: &str) -> Result { ++ self.evaluate_acpi_method(device_path, "_PPC", &[])? ++ .into_iter() ++ .next() ++ .ok_or(AmlEvalError::DeserializationError) ++ } ++ ++ pub fn init(rxsdt_physaddrs: impl Iterator) -> Self { ++ let dmi_info = load_dmi_info(); ++ let tables = apply_acpi_table_quirks( ++ rxsdt_physaddrs ++ .filter_map(|physaddr| { ++ let physaddr: usize = match physaddr.try_into() { ++ Ok(physaddr) => physaddr, ++ Err(_) => { ++ log::error!( ++ "Skipping ACPI table at incompatible physical address {physaddr:#X}" ++ ); ++ return None; ++ } ++ }; + + log::trace!("TABLE AT {:#>08X}", physaddr); + +- Sdt::load_from_physical(physaddr).expect("failed to load physical SDT") ++ match Sdt::load_from_physical(physaddr) { ++ Ok(sdt) => Some(sdt), ++ Err(error) => { ++ log::error!("Skipping unreadable ACPI table at {physaddr:#X}: {error}"); ++ None ++ } ++ } + }) +- .collect::>(); ++ .collect::>(), ++ dmi_info.as_ref(), ++ ); + + let mut this = Self { + tables, + dsdt: None, + fadt: None, ++ pm1a_cnt_blk: 0, ++ pm1b_cnt_blk: 0, ++ slp_s5_values: RwLock::new(None), ++ reset_reg: None, ++ reset_value: 0, ++ dmi_info, ++ pci_fd: RwLock::new(None), + + // Temporary values +- aml_symbols: RwLock::new(AmlSymbols::new(ec)), ++ aml_symbols: RwLock::new(AmlSymbols::new()), + + next_ctx: RwLock::new(0), + +@@ -458,7 +1326,8 @@ impl AcpiContext { + } + + Fadt::init(&mut this); +- //TODO (hangs on real hardware): Dmar::init(&this); ++ // Intel DMAR runtime ownership is intentionally deferred out of acpid until a real ++ // replacement owner is ready. Do not resurrect the old acpid DMAR path piecemeal. + + this + } +@@ -525,18 +1394,143 @@ impl AcpiContext { + self.sdt_order.write().push(Some(*signature)); + } + ++ pub fn dmi_info(&self) -> Option<&DmiInfo> { ++ self.dmi_info.as_ref() ++ } ++ ++ pub fn pci_ready(&self) -> bool { ++ self.pci_fd.read().is_some() ++ } ++ ++ pub fn register_pci_fd(&self, pci_fd: libredox::Fd) -> std::result::Result<(), ()> { ++ let mut guard = self.pci_fd.write(); ++ if guard.is_some() { ++ return Err(()); ++ } ++ *guard = Some(pci_fd); ++ drop(guard); ++ self.aml_symbols_reset(); ++ if let Err(error) = self.refresh_s5_values() { ++ log::warn!("Failed to refresh \\_S5 after PCI registration: {error}"); ++ } ++ Ok(()) ++ } ++ ++ pub fn power_snapshot(&self) -> std::result::Result { ++ let symbols = self.aml_symbols()?; ++ let symbol_names = symbols ++ .symbols_cache() ++ .keys() ++ .cloned() ++ .collect::>(); ++ drop(symbols); ++ ++ let mut adapter_paths = symbol_names ++ .iter() ++ .filter_map(|symbol| symbol_parent_path(symbol, "._PSR")) ++ .collect::>(); ++ adapter_paths.sort(); ++ adapter_paths.dedup(); ++ ++ let mut battery_paths = symbol_names ++ .iter() ++ .filter_map(|symbol| symbol_parent_path(symbol, "._BST")) ++ .collect::>(); ++ battery_paths.sort(); ++ battery_paths.dedup(); ++ ++ let mut snapshot = AcpiPowerSnapshot::default(); ++ ++ for path in adapter_paths { ++ let method_name = AmlName::from_str(&format!("\\{}.{}", path, "_PSR")) ++ .map_err(|_| AmlEvalError::DeserializationError)?; ++ match self.aml_eval(method_name, Vec::new()) { ++ Ok(AmlSerdeValue::Integer(state)) => { ++ snapshot.adapters.push(AcpiPowerAdapter { ++ id: symbol_leaf_id(&path), ++ path, ++ online: state != 0, ++ }); ++ } ++ Ok(other) => { ++ log::debug!("Skipping AC adapter {} due to unexpected _PSR value: {:?}", path, other); ++ } ++ Err(error) => { ++ log::debug!("Skipping AC adapter power source {}: {:?}", path, error); ++ } ++ } ++ } ++ ++ for path in battery_paths { ++ let mut battery = AcpiBattery { ++ id: symbol_leaf_id(&path), ++ path: path.clone(), ++ ..AcpiBattery::default() ++ }; ++ ++ match self.aml_eval( ++ AmlName::from_str(&format!("\\{}.{}", path, "_BST")) ++ .map_err(|_| AmlEvalError::DeserializationError)?, ++ Vec::new(), ++ ) { ++ Ok(AmlSerdeValue::Package { contents }) => { ++ if let Err(error) = parse_bst_package(&contents, &mut battery) { ++ log::debug!("Skipping battery {} due to malformed _BST: {:?}", path, error); ++ continue; ++ } ++ } ++ Ok(other) => { ++ log::debug!("Skipping battery {} due to unexpected _BST value: {:?}", path, other); ++ continue; ++ } ++ Err(error) => { ++ log::debug!("Skipping battery {} due to _BST eval failure: {:?}", path, error); ++ continue; ++ } ++ } ++ ++ for method in ["_BIX", "_BIF"] { ++ let method_name = AmlName::from_str(&format!("\\{}.{}", path, method)) ++ .map_err(|_| AmlEvalError::DeserializationError)?; ++ match self.aml_eval(method_name, Vec::new()) { ++ Ok(AmlSerdeValue::Package { contents }) => { ++ let result = if method == "_BIX" { ++ fill_bix_fields(&contents, &mut battery) ++ } else { ++ fill_bif_fields(&contents, &mut battery) ++ }; ++ if result.is_ok() { ++ break; ++ } ++ } ++ Ok(_) => {} ++ Err(_) => {} ++ } ++ } ++ ++ battery.percentage = compute_battery_percentage(&battery); ++ snapshot.batteries.push(battery); ++ } ++ ++ if snapshot.adapters.is_empty() && snapshot.batteries.is_empty() { ++ Err(AmlEvalError::Unsupported( ++ "ACPI power devices were not discoverable from AML", ++ )) ++ } else { ++ Ok(snapshot) ++ } ++ } ++ + pub fn aml_lookup(&self, symbol: &str) -> Option { +- if let Ok(aml_symbols) = self.aml_symbols(None) { ++ if let Ok(aml_symbols) = self.aml_symbols() { + aml_symbols.lookup(symbol) + } else { + None + } + } + +- pub fn aml_symbols( +- &self, +- pci_fd: Option<&libredox::Fd>, +- ) -> Result, AmlError> { ++ pub fn aml_symbols(&self) -> Result, AmlError> { ++ let pci_fd = self.pci_fd.read(); + // return the cached value if it exists + let symbols = self.aml_symbols.read(); + if !symbols.symbols_cache().is_empty() { +@@ -550,7 +1544,7 @@ impl AcpiContext { + + let mut aml_symbols = self.aml_symbols.write(); + +- aml_symbols.build_cache(pci_fd); ++ aml_symbols.build_cache(pci_fd.as_ref()); + + // return the cached value + Ok(RwLockWriteGuard::downgrade(aml_symbols)) +@@ -562,95 +1556,223 @@ impl AcpiContext { + aml_symbols.symbol_cache = FxHashMap::default(); + } + +- /// Set Power State +- /// See https://uefi.org/sites/default/files/resources/ACPI_6_1.pdf +- /// - search for PM1a +- /// See https://forum.osdev.org/viewtopic.php?t=16990 for practical details +- pub fn set_global_s_state(&self, state: u8) { +- if state != 5 { +- return; ++ pub fn sleep_values_for_target( ++ &self, ++ target: SleepTarget, ++ ) -> Result<(u8, u8), SleepStateValuesError> { ++ let aml_name = AmlName::from_str(&format!("\\{}", target.aml_method_name())) ++ .map_err(SleepStateValuesError::InvalidName)?; ++ let values = parse_sleep_package(target, self.aml_eval(aml_name, Vec::new())?)?; ++ if target.is_soft_off() { ++ *self.slp_s5_values.write() = Some(values); + } +- let fadt = match self.fadt() { +- Some(fadt) => fadt, +- None => { +- log::error!("Cannot set global S-state due to missing FADT."); +- return; +- } +- }; ++ Ok(values) ++ } + +- let port = fadt.pm1a_control_block as u16; +- let mut val = 1 << 13; ++ pub fn refresh_s5_values(&self) -> Result<(u8, u8), SleepStateValuesError> { ++ self.sleep_values_for_target(SleepTarget::S5) ++ } + +- let aml_symbols = self.aml_symbols.read(); ++ pub fn acpi_shutdown(&self, slp_typa_s5: u8, slp_typb_s5: u8) -> Result<(), PowerTransitionError> { ++ let pm1a_value = (u16::from(slp_typa_s5) << 10) | 0x2000; ++ let pm1b_value = (u16::from(slp_typb_s5) << 10) | 0x2000; + +- let s5_aml_name = match acpi::aml::namespace::AmlName::from_str("\\_S5") { +- Ok(aml_name) => aml_name, +- Err(error) => { +- log::error!("Could not build AmlName for \\_S5, {:?}", error); +- return; +- } +- }; ++ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ++ { ++ let Ok(pm1a_port) = u16::try_from(self.pm1a_cnt_blk) else { ++ return Err(PowerTransitionError::InvalidPm1aControlBlock(self.pm1a_cnt_blk)); ++ }; + +- let s5 = match &aml_symbols.aml_context { +- Some(aml_context) => match aml_context.namespace.lock().get(s5_aml_name) { +- Ok(s5) => s5, +- Err(error) => { +- log::error!("Cannot set S-state, missing \\_S5, {:?}", error); +- return; ++ log::warn!( ++ "Shutdown with ACPI PM1a_CNT outw(0x{:X}, 0x{:X})", ++ pm1a_port, ++ pm1a_value ++ ); ++ Pio::::new(pm1a_port).write(pm1a_value); ++ ++ if self.pm1b_cnt_blk != 0 { ++ match u16::try_from(self.pm1b_cnt_blk) { ++ Ok(pm1b_port) => { ++ log::warn!( ++ "Shutdown with ACPI PM1b_CNT outw(0x{:X}, 0x{:X})", ++ pm1b_port, ++ pm1b_value ++ ); ++ Pio::::new(pm1b_port).write(pm1b_value); ++ } ++ Err(_) => { ++ return Err(PowerTransitionError::InvalidPm1bControlBlock( ++ self.pm1b_cnt_blk, ++ )); ++ } + } +- }, +- None => { +- log::error!("Cannot set S-state, AML context not initialized"); +- return; + } +- }; + +- let package = match s5.deref() { +- acpi::aml::object::Object::Package(package) => package, +- _ => { +- log::error!("Cannot set S-state, \\_S5 is not a package"); +- return; ++ Ok(()) ++ } ++ ++ #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] ++ { ++ Err(PowerTransitionError::UnsupportedArchitecture { ++ pm1a_cnt_blk: self.pm1a_cnt_blk, ++ pm1b_cnt_blk: self.pm1b_cnt_blk, ++ }) ++ } ++ } ++ ++ pub fn acpi_reboot(&self) -> Result<(), PowerTransitionError> { ++ match self.reset_reg { ++ Some(reset_reg) => { ++ log::warn!( ++ "Reboot with ACPI reset register {:?} value {:#X}", ++ reset_reg, ++ self.reset_value ++ ); ++ reset_reg.write_u8(self.reset_value); ++ Ok(()) + } +- }; ++ None => { ++ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ++ { ++ const I8042_COMMAND_PORT: u16 = 0x64; ++ const I8042_PULSE_RESET: u8 = 0xFE; ++ ++ log::warn!( ++ "Reboot with keyboard-controller fallback outb(0x{:X}, 0x{:X})", ++ I8042_COMMAND_PORT, ++ I8042_PULSE_RESET ++ ); ++ Pio::::new(I8042_COMMAND_PORT).write(I8042_PULSE_RESET); ++ Ok(()) ++ } + +- let slp_typa = match package[0].deref() { +- acpi::aml::object::Object::Integer(i) => i.to_owned(), +- _ => { +- log::error!("typa is not an Integer"); +- return; ++ #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] ++ { ++ Err(PowerTransitionError::MissingResetRegister) ++ } + } +- }; +- let slp_typb = match package[1].deref() { +- acpi::aml::object::Object::Integer(i) => i.to_owned(), +- _ => { +- log::error!("typb is not an Integer"); +- return; ++ } ++ } ++ ++ /// Set Power State ++ /// See https://uefi.org/sites/default/files/resources/ACPI_6_1.pdf ++ /// - search for PM1a ++ /// See https://forum.osdev.org/viewtopic.php?t=16990 for practical details ++ pub fn set_global_s_state(&self, state: u8) -> Result<(), GlobalSleepStateError> { ++ let target = SleepTarget::try_from(state) ++ .map_err(|_| GlobalSleepStateError::UnknownSleepState(state))?; ++ if !target.is_soft_off() { ++ return Err(GlobalSleepStateError::UnsupportedTarget(target)); ++ } ++ ++ if self.fadt().is_none() { ++ return Err(GlobalSleepStateError::MissingFadt); ++ } ++ ++ let cached_s5 = *self.slp_s5_values.read(); ++ let (slp_typa, slp_typb) = match self.sleep_values_for_target(SleepTarget::S5) { ++ Ok(values) => values, ++ Err(error) => match cached_s5 { ++ Some(values) => { ++ log::warn!( ++ "Using cached {} values after refresh failure: {error}", ++ SleepTarget::S5.aml_method_name() ++ ); ++ values ++ } ++ None => { ++ return Err(GlobalSleepStateError::MissingSleepValues { ++ target: SleepTarget::S5, ++ source: error, ++ }) ++ } + } + }; + +- log::trace!("Shutdown SLP_TYPa {:X}, SLP_TYPb {:X}", slp_typa, slp_typb); +- val |= slp_typa as u16; ++ self.acpi_shutdown(slp_typa, slp_typb) ++ .map_err(GlobalSleepStateError::PowerTransitionFailed)?; + +- #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +- { +- log::warn!("Shutdown with ACPI outw(0x{:X}, 0x{:X})", port, val); +- Pio::::new(port).write(val); +- } ++ Err(GlobalSleepStateError::TransitionDidNotComplete(target)) ++ } ++} + +- // TODO: Handle SLP_TYPb ++#[derive(Debug, Error)] ++pub enum SleepStateValuesError { ++ #[error("failed to build AML name for sleep-state method: {0:?}")] ++ InvalidName(AmlError), + +- #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +- { +- log::error!( +- "Cannot shutdown with ACPI outw(0x{:X}, 0x{:X}) on this architecture", +- port, +- val +- ); +- } ++ #[error("failed to evaluate sleep-state package: {0}")] ++ Evaluation(#[from] AmlEvalError), + +- loop { +- core::hint::spin_loop(); +- } ++ #[error("sleep-state method returned a non-package AML value")] ++ NonPackageValue, ++ ++ #[error("sleep-state package did not contain two integer sleep-type entries")] ++ InvalidPackageShape, ++ ++ #[error("sleep-state values did not fit in u8")] ++ ValueOutOfRange, ++} ++ ++#[derive(Debug, Error)] ++pub enum PowerTransitionError { ++ #[error("PM1a control block address is invalid: {0:#X}")] ++ InvalidPm1aControlBlock(u64), ++ ++ #[error("PM1b control block address is invalid: {0:#X}")] ++ InvalidPm1bControlBlock(u64), ++ ++ #[error( ++ "cannot issue ACPI PM1 control writes on this architecture (PM1a={pm1a_cnt_blk:#X}, PM1b={pm1b_cnt_blk:#X})" ++ )] ++ UnsupportedArchitecture { ++ pm1a_cnt_blk: u64, ++ pm1b_cnt_blk: u64, ++ }, ++ ++ #[error("cannot reboot with ACPI: no reset register present in FADT")] ++ MissingResetRegister, ++} ++ ++#[derive(Debug, Error)] ++pub enum GlobalSleepStateError { ++ #[error("unknown global sleep state S{0}")] ++ UnknownSleepState(u8), ++ ++ #[error("sleep target {:?} remains groundwork-only until full sleep lifecycle support lands", .0)] ++ UnsupportedTarget(SleepTarget), ++ ++ #[error("cannot set global S-state due to missing FADT")] ++ MissingFadt, ++ ++ #[error("failed to derive usable {} values: {source}", target.aml_method_name())] ++ MissingSleepValues { ++ target: SleepTarget, ++ source: SleepStateValuesError, ++ }, ++ ++ #[error("ACPI power transition failed: {0}")] ++ PowerTransitionFailed(#[from] PowerTransitionError), ++ ++ #[error("ACPI transition to {:?} returned without completing", .0)] ++ TransitionDidNotComplete(SleepTarget), ++} ++ ++fn parse_sleep_package( ++ _target: SleepTarget, ++ value: AmlSerdeValue, ++) -> Result<(u8, u8), SleepStateValuesError> { ++ match value { ++ AmlSerdeValue::Package { contents } => match (contents.first(), contents.get(1)) { ++ (Some(AmlSerdeValue::Integer(slp_typa)), Some(AmlSerdeValue::Integer(slp_typb))) => { ++ match (u8::try_from(*slp_typa), u8::try_from(*slp_typb)) { ++ (Ok(slp_typa_s5), Ok(slp_typb_s5)) => Ok((slp_typa_s5, slp_typb_s5)), ++ _ => Err(SleepStateValuesError::ValueOutOfRange), ++ } ++ } ++ _ => Err(SleepStateValuesError::InvalidPackageShape), ++ }, ++ _ => Err(SleepStateValuesError::NonPackageValue), + } + } + +@@ -707,7 +1829,7 @@ unsafe impl plain::Plain for FadtStruct {} + + #[repr(C, packed)] + #[derive(Clone, Copy, Debug, Default)] +-pub struct GenericAddressStructure { ++pub struct GenericAddress { + address_space: u8, + bit_width: u8, + bit_offset: u8, +@@ -715,11 +1837,68 @@ pub struct GenericAddressStructure { + address: u64, + } + ++impl GenericAddress { ++ pub fn is_empty(&self) -> bool { ++ self.address == 0 ++ } ++ ++ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ++ pub fn write_u8(&self, value: u8) { ++ let address = self.address; ++ match self.address_space { ++ 0 => { ++ let Ok(address) = usize::try_from(address) else { ++ log::error!("Reset register physical address is invalid: {:#X}", address); ++ return; ++ }; ++ let page = address / PAGE_SIZE * PAGE_SIZE; ++ let offset = address % PAGE_SIZE; ++ let virt = unsafe { ++ common::physmap(page, PAGE_SIZE, common::Prot::RW, common::MemoryType::default()) ++ }; ++ ++ match virt { ++ Ok(virt) => unsafe { ++ (virt as *mut u8).add(offset).write_volatile(value); ++ let _ = libredox::call::munmap(virt, PAGE_SIZE); ++ }, ++ Err(error) => { ++ log::error!("Failed to map ACPI reset register: {}", error); ++ } ++ } ++ } ++ 1 => match u16::try_from(address) { ++ Ok(port) => { ++ Pio::::new(port).write(value); ++ } ++ Err(_) => { ++ log::error!("Reset register I/O port is invalid: {:#X}", address); ++ } ++ }, ++ address_space => { ++ log::warn!( ++ "Unsupported ACPI reset register address space {} for {:?}", ++ address_space, ++ self ++ ); ++ } ++ } ++ } ++ ++ #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] ++ pub fn write_u8(&self, _value: u8) { ++ log::error!( ++ "Cannot access ACPI reset register {:?} on this architecture", ++ self ++ ); ++ } ++} ++ + #[repr(C, packed)] + #[derive(Clone, Copy, Debug)] + pub struct FadtAcpi2Struct { + // 12 byte structure; see below for details +- pub reset_reg: GenericAddressStructure, ++ pub reset_reg: GenericAddress, + + pub reset_value: u8, + reserved3: [u8; 3], +@@ -728,14 +1907,14 @@ pub struct FadtAcpi2Struct { + pub x_firmware_control: u64, + pub x_dsdt: u64, + +- pub x_pm1a_event_block: GenericAddressStructure, +- pub x_pm1b_event_block: GenericAddressStructure, +- pub x_pm1a_control_block: GenericAddressStructure, +- pub x_pm1b_control_block: GenericAddressStructure, +- pub x_pm2_control_block: GenericAddressStructure, +- pub x_pm_timer_block: GenericAddressStructure, +- pub x_gpe0_block: GenericAddressStructure, +- pub x_gpe1_block: GenericAddressStructure, ++ pub x_pm1a_event_block: GenericAddress, ++ pub x_pm1b_event_block: GenericAddress, ++ pub x_pm1a_control_block: GenericAddress, ++ pub x_pm1b_control_block: GenericAddress, ++ pub x_pm2_control_block: GenericAddress, ++ pub x_pm_timer_block: GenericAddress, ++ pub x_gpe0_block: GenericAddress, ++ pub x_gpe1_block: GenericAddress, + } + unsafe impl plain::Plain for FadtAcpi2Struct {} + +@@ -774,9 +1953,10 @@ impl Fadt { + } + + pub fn init(context: &mut AcpiContext) { +- let fadt_sdt = context +- .take_single_sdt(*b"FACP") +- .expect("expected ACPI to always have a FADT"); ++ let Some(fadt_sdt) = context.take_single_sdt(*b"FACP") else { ++ log::error!("Failed to find FADT"); ++ return; ++ }; + + let fadt = match Fadt::new(fadt_sdt) { + Some(fadt) => fadt, +@@ -793,9 +1973,25 @@ impl Fadt { + None => usize::try_from(fadt.dsdt).expect("expected any given u32 to fit within usize"), + }; + +- log::debug!("FACP at {:X}", { dsdt_ptr }); ++ let pm1a_evt_blk = u64::from(fadt.pm1a_event_block); ++ let pm1b_evt_blk = u64::from(fadt.pm1b_event_block); ++ let pm1a_cnt_blk = u64::from(fadt.pm1a_control_block); ++ let pm1b_cnt_blk = u64::from(fadt.pm1b_control_block); ++ let (reset_reg, reset_value) = match fadt.acpi_2_struct() { ++ Some(fadt2) if !fadt2.reset_reg.is_empty() => (Some(fadt2.reset_reg), fadt2.reset_value), ++ _ => (None, 0), ++ }; + +- let dsdt_sdt = match Sdt::load_from_physical(fadt.dsdt as usize) { ++ log::debug!("FACP at {:X}", { dsdt_ptr }); ++ log::debug!( ++ "FADT power blocks: PM1a_EVT={:#X}, PM1b_EVT={:#X}, PM1a_CNT={:#X}, PM1b_CNT={:#X}", ++ pm1a_evt_blk, ++ pm1b_evt_blk, ++ pm1a_cnt_blk, ++ pm1b_cnt_blk ++ ); ++ ++ let dsdt_sdt = match Sdt::load_from_physical(dsdt_ptr) { + Ok(dsdt) => dsdt, + Err(error) => { + log::error!("Failed to load DSDT: {}", error); +@@ -805,9 +2001,21 @@ impl Fadt { + + context.fadt = Some(fadt.clone()); + context.dsdt = Some(Dsdt(dsdt_sdt.clone())); ++ context.pm1a_cnt_blk = pm1a_cnt_blk; ++ context.pm1b_cnt_blk = pm1b_cnt_blk; ++ context.reset_reg = reset_reg; ++ context.reset_value = reset_value; + + context.tables.push(dsdt_sdt); ++ ++ if context.pci_ready() { ++ if let Err(error) = context.refresh_s5_values() { ++ log::warn!("Failed to evaluate \\_S5 during FADT init: {error}"); ++ } ++ } else { ++ log::debug!("Deferring \\_S5 evaluation until PCI registration"); ++ } + } + } + + +diff --git a/drivers/acpid/src/acpi/dmar/mod.rs b/drivers/acpid/src/acpi/dmar/mod.rs +index c42b379a..f4dff276 100644 +--- a/drivers/acpid/src/acpi/dmar/mod.rs ++++ b/drivers/acpid/src/acpi/dmar/mod.rs +@@ -474,11 +474,14 @@ impl<'sdt> Iterator for DmarRawIter<'sdt> { + let len_bytes = <[u8; 2]>::try_from(type_bytes) + .expect("expected a 2-byte slice to be convertible to [u8; 2]"); + +- let ty = u16::from_ne_bytes(type_bytes); +- let len = u16::from_ne_bytes(len_bytes); ++ let len = u16::from_ne_bytes(len_bytes) as usize; + +- let len = usize::try_from(len).expect("expected u16 to fit within usize"); ++ if len < 4 { ++ return None; ++ } ++ ++ let ty = u16::from_ne_bytes(type_bytes); + + if len > remainder.len() { + log::warn!("DMAR remapping structure length was smaller than the remaining length of the table."); + +diff --git a/drivers/acpid/src/aml_physmem.rs b/drivers/acpid/src/aml_physmem.rs +index 2bdd667b..69b8c48b 100644 +--- a/drivers/acpid/src/aml_physmem.rs ++++ b/drivers/acpid/src/aml_physmem.rs +@@ -6,7 +6,10 @@ use rustc_hash::FxHashMap; + use std::fmt::LowerHex; + use std::mem::size_of; + use std::ptr::NonNull; +-use std::sync::{Arc, Mutex}; ++use std::sync::atomic::{AtomicU32, Ordering}; ++use std::sync::{Arc, Condvar, Mutex}; ++use std::thread::ThreadId; ++use std::time::{Duration, Instant}; + use syscall::PAGE_SIZE; + + const PAGE_MASK: usize = !(PAGE_SIZE - 1); +@@ -141,6 +144,20 @@ impl AmlPageCache { + pub struct AmlPhysMemHandler { + page_cache: Arc>, + pci_fd: Arc>, ++ aml_mutexes: Arc>>>, ++ next_mutex_handle: Arc, ++} ++ ++#[derive(Debug, Default)] ++struct AmlMutexState { ++ owner: Option, ++ depth: u32, ++} ++ ++#[derive(Debug, Default)] ++struct AmlMutex { ++ state: Mutex, ++ condvar: Condvar, + } + + /// Read from a physical address. +@@ -156,6 +173,30 @@ impl AmlPhysMemHandler { + Self { + page_cache, + pci_fd: Arc::new(pci_fd), ++ aml_mutexes: Arc::new(Mutex::new(FxHashMap::default())), ++ next_mutex_handle: Arc::new(AtomicU32::new(1)), ++ } ++ } ++ ++ fn aml_mutex(&self, handle: Handle) -> Option> { ++ self.aml_mutexes ++ .lock() ++ .unwrap_or_else(|poisoned| poisoned.into_inner()) ++ .get(&handle.0) ++ .cloned() ++ } ++ ++ fn read_phys_or_fault(&self, address: usize) -> T ++ where ++ T: PrimInt + LowerHex, ++ { ++ let mut page_cache = self ++ .page_cache ++ .lock() ++ .unwrap_or_else(|poisoned| poisoned.into_inner()); ++ match page_cache.read_from_phys::(address) { ++ Ok(value) => value, ++ Err(error) => panic!("AML physmem read failed at {:#x}: {}", address, error), + } + } + +@@ -240,43 +281,19 @@ impl acpi::Handler for AmlPhysMemHandler { + + fn read_u8(&self, address: usize) -> u8 { + log::trace!("read u8 {:X}", address); +- if let Ok(mut page_cache) = self.page_cache.lock() { +- if let Ok(value) = page_cache.read_from_phys::(address) { +- return value; +- } +- } +- log::error!("failed to read u8 {:#x}", address); +- 0 ++ self.read_phys_or_fault::(address) + } + fn read_u16(&self, address: usize) -> u16 { + log::trace!("read u16 {:X}", address); +- if let Ok(mut page_cache) = self.page_cache.lock() { +- if let Ok(value) = page_cache.read_from_phys::(address) { +- return value; +- } +- } +- log::error!("failed to read u16 {:#x}", address); +- 0 ++ self.read_phys_or_fault::(address) + } + fn read_u32(&self, address: usize) -> u32 { + log::trace!("read u32 {:X}", address); +- if let Ok(mut page_cache) = self.page_cache.lock() { +- if let Ok(value) = page_cache.read_from_phys::(address) { +- return value; +- } +- } +- log::error!("failed to read u32 {:#x}", address); +- 0 ++ self.read_phys_or_fault::(address) + } + fn read_u64(&self, address: usize) -> u64 { + log::trace!("read u64 {:X}", address); +- if let Ok(mut page_cache) = self.page_cache.lock() { +- if let Ok(value) = page_cache.read_from_phys::(address) { +- return value; +- } +- } +- log::error!("failed to read u64 {:#x}", address); +- 0 ++ self.read_phys_or_fault::(address) + } + + fn write_u8(&self, address: usize, value: u8) { +@@ -415,17 +432,103 @@ impl acpi::Handler for AmlPhysMemHandler { + } + + fn create_mutex(&self) -> Handle { +- log::debug!("TODO: Handler::create_mutex"); +- Handle(0) ++ let handle = self.next_mutex_handle.fetch_add(1, Ordering::Relaxed); ++ self.aml_mutexes ++ .lock() ++ .unwrap_or_else(|poisoned| poisoned.into_inner()) ++ .insert(handle, Arc::new(AmlMutex::default())); ++ log::trace!("created AML mutex handle {handle}"); ++ Handle(handle) + } + + fn acquire(&self, mutex: Handle, timeout: u16) -> Result<(), AmlError> { +- log::debug!("TODO: Handler::acquire"); +- Ok(()) ++ let Some(aml_mutex) = self.aml_mutex(mutex) else { ++ log::error!("attempted to acquire unknown AML mutex handle {}", mutex.0); ++ return Err(AmlError::MutexAcquireTimeout); ++ }; ++ ++ let current_thread = std::thread::current().id(); ++ let deadline = (timeout != 0xffff).then(|| Instant::now() + Duration::from_millis(timeout.into())); ++ ++ let mut state = aml_mutex ++ .state ++ .lock() ++ .unwrap_or_else(|poisoned| poisoned.into_inner()); ++ ++ loop { ++ match state.owner { ++ None => { ++ state.owner = Some(current_thread); ++ state.depth = 1; ++ return Ok(()); ++ } ++ Some(owner) if owner == current_thread => { ++ state.depth = state.depth.saturating_add(1); ++ return Ok(()); ++ } ++ Some(_) if timeout == 0 => return Err(AmlError::MutexAcquireTimeout), ++ Some(_) if timeout == 0xffff => { ++ state = aml_mutex ++ .condvar ++ .wait(state) ++ .unwrap_or_else(|poisoned| poisoned.into_inner()); ++ } ++ Some(_) => { ++ let Some(deadline) = deadline else { ++ return Err(AmlError::MutexAcquireTimeout); ++ }; ++ let now = Instant::now(); ++ if now >= deadline { ++ return Err(AmlError::MutexAcquireTimeout); ++ } ++ ++ let remaining = deadline.saturating_duration_since(now); ++ let (next_state, wait_result) = aml_mutex ++ .condvar ++ .wait_timeout(state, remaining) ++ .unwrap_or_else(|poisoned| poisoned.into_inner()); ++ state = next_state; ++ ++ if wait_result.timed_out() && state.owner != Some(current_thread) { ++ return Err(AmlError::MutexAcquireTimeout); ++ } ++ } ++ } ++ } + } + + fn release(&self, mutex: Handle) { +- log::debug!("TODO: Handler::release"); ++ let Some(aml_mutex) = self.aml_mutex(mutex) else { ++ log::error!("attempted to release unknown AML mutex handle {}", mutex.0); ++ return; ++ }; ++ ++ let current_thread = std::thread::current().id(); ++ let mut state = aml_mutex ++ .state ++ .lock() ++ .unwrap_or_else(|poisoned| poisoned.into_inner()); ++ ++ match state.owner { ++ Some(owner) if owner == current_thread => { ++ if state.depth > 1 { ++ state.depth -= 1; ++ } else { ++ state.owner = None; ++ state.depth = 0; ++ aml_mutex.condvar.notify_one(); ++ } ++ } ++ Some(_) => { ++ log::warn!( ++ "ignoring AML mutex release for handle {} from non-owner thread", ++ mutex.0 ++ ); ++ } ++ None => { ++ log::warn!("ignoring AML mutex release for unlocked handle {}", mutex.0); ++ } ++ } + } + } + +diff --git a/drivers/acpid/src/ec.rs b/drivers/acpid/src/ec.rs +index c322790a..99842586 100644 +--- a/drivers/acpid/src/ec.rs ++++ b/drivers/acpid/src/ec.rs +@@ -1,3 +1,4 @@ ++use std::convert::TryFrom; + use std::time::Duration; + + use acpi::aml::{ +@@ -30,6 +31,28 @@ const BURST_ACK: u8 = 0x90; + + pub const DEFAULT_EC_TIMEOUT: Duration = Duration::from_millis(10); + ++#[derive(Debug, Clone, Copy)] ++enum EcError { ++ Timeout, ++ OffsetOutOfRange, ++} ++ ++impl EcError { ++ fn as_aml_error(self) -> AmlError { ++ match self { ++ EcError::Timeout | EcError::OffsetOutOfRange => { ++ AmlError::NoHandlerForRegionAccess(RegionSpace::EmbeddedControl) ++ } ++ } ++ } ++} ++ ++impl From for AmlError { ++ fn from(value: EcError) -> Self { ++ value.as_aml_error() ++ } ++} ++ + #[repr(transparent)] + pub struct ScBits(u8); + #[allow(dead_code)] +@@ -90,28 +113,33 @@ impl Ec { + Pio::::new(self.data).write(value); + } + #[inline] +- fn wait_for_write_ready(&self) -> Option<()> { ++ fn wait_for_write_ready(&self) -> Result<(), EcError> { + let timeout = Timeout::new(self.timeout); + loop { + if !self.read_reg_sc().ibf() { +- return Some(()); ++ return Ok(()); + } +- timeout.run().ok()?; ++ timeout.run().map_err(|_| EcError::Timeout)?; + } + } + #[inline] +- fn wait_for_read_ready(&self) -> Option<()> { ++ fn wait_for_read_ready(&self) -> Result<(), EcError> { + let timeout = Timeout::new(self.timeout); + loop { + if self.read_reg_sc().obf() { +- return Some(()); ++ return Ok(()); + } +- timeout.run().ok()?; ++ timeout.run().map_err(|_| EcError::Timeout)?; + } + } + ++ #[inline] ++ fn checked_address(offset: usize, byte_index: usize) -> Result { ++ u8::try_from(offset + byte_index).map_err(|_| EcError::OffsetOutOfRange) ++ } ++ + //https://uefi.org/htmlspecs/ACPI_Spec_6_4_html/12_ACPI_Embedded_Controller_Interface_Specification/embedded-controller-command-set.html +- pub fn read(&self, address: u8) -> Option { ++ fn read(&self, address: u8) -> Result { + trace!("ec read addr: {:x}", address); + self.wait_for_write_ready()?; + +@@ -125,9 +153,9 @@ impl Ec { + + let val = self.read_reg_data(); + trace!("got: {:x}", val); +- Some(val) ++ Ok(val) + } +- pub fn write(&self, address: u8, value: u8) -> Option<()> { ++ fn write(&self, address: u8, value: u8) -> Result<(), EcError> { + trace!("ec write addr: {:x}, with: {:x}", address, value); + self.wait_for_write_ready()?; + +@@ -141,7 +169,22 @@ impl Ec { + + self.write_reg_data(value); + trace!("done"); +- Some(()) ++ Ok(()) ++ } ++ ++ fn read_bytes(&self, offset: usize) -> Result<[u8; N], EcError> { ++ let mut bytes = [0u8; N]; ++ for (index, byte) in bytes.iter_mut().enumerate() { ++ *byte = self.read(Self::checked_address(offset, index)?)?; ++ } ++ Ok(bytes) ++ } ++ ++ fn write_bytes(&self, offset: usize, bytes: [u8; N]) -> Result<(), EcError> { ++ for (index, byte) in IntoIterator::into_iter(bytes).enumerate() { ++ self.write(Self::checked_address(offset, index)?, byte)?; ++ } ++ Ok(()) + } + // disabled if not met + // First Access - 400 microseconds +@@ -151,11 +194,11 @@ impl Ec { + #[allow(dead_code)] + fn enable_burst(&self) -> bool { + trace!("ec burst enable"); +- self.wait_for_write_ready(); ++ let _ = self.wait_for_write_ready(); + + self.write_reg_sc(BE_EC); + +- self.wait_for_read_ready(); ++ let _ = self.wait_for_read_ready(); + + let res = self.read_reg_data() == BURST_ACK; + trace!("success: {}", res); +@@ -164,7 +207,7 @@ impl Ec { + #[allow(dead_code)] + fn disable_burst(&self) { + trace!("ec burst disable"); +- self.wait_for_write_ready(); ++ let _ = self.wait_for_write_ready(); + self.write_reg_sc(BD_EC); + trace!("done"); + } +@@ -172,11 +215,11 @@ impl Ec { + #[allow(dead_code)] + fn queue_query(&mut self) -> u8 { + trace!("ec query"); +- self.wait_for_write_ready(); ++ let _ = self.wait_for_write_ready(); + + self.write_reg_sc(QR_EC); + +- self.wait_for_read_ready(); ++ let _ = self.wait_for_read_ready(); + + let val = self.read_reg_data(); + trace!("got: {}", val); +@@ -190,7 +233,10 @@ impl RegionHandler for Ec { + offset: usize, + ) -> Result { + assert_eq!(region.space, RegionSpace::EmbeddedControl); +- self.read(offset as u8).ok_or(AmlError::MutexAcquireTimeout) // TODO proper error type ++ self.read(Self::checked_address(offset, 0)?).map_err(|error| { ++ warn!("EC read_u8 failed at offset {offset:#x}: {error:?}"); ++ error.as_aml_error() ++ }) + } + fn write_u8( + &self, +@@ -199,59 +245,74 @@ impl RegionHandler for Ec { + value: u8, + ) -> Result<(), acpi::aml::AmlError> { + assert_eq!(region.space, RegionSpace::EmbeddedControl); +- self.write(offset as u8, value) +- .ok_or(AmlError::MutexAcquireTimeout) // TODO proper error type +- } +- fn read_u16(&self, _region: &OpRegion, _offset: usize) -> Result { +- warn!("Got u16 EC read from AML!"); +- Err(acpi::aml::AmlError::NoHandlerForRegionAccess( +- RegionSpace::EmbeddedControl, +- )) // TODO proper error type +- } +- fn read_u32(&self, _region: &OpRegion, _offset: usize) -> Result { +- warn!("Got u32 EC read from AML!"); +- Err(acpi::aml::AmlError::NoHandlerForRegionAccess( +- RegionSpace::EmbeddedControl, +- )) // TODO proper error type +- } +- fn read_u64(&self, _region: &OpRegion, _offset: usize) -> Result { +- warn!("Got u64 EC read from AML!"); +- Err(acpi::aml::AmlError::NoHandlerForRegionAccess( +- RegionSpace::EmbeddedControl, +- )) // TODO proper error type ++ self.write(Self::checked_address(offset, 0)?, value) ++ .map_err(|error| { ++ warn!("EC write_u8 failed at offset {offset:#x}: {error:?}"); ++ error.as_aml_error() ++ }) ++ } ++ fn read_u16(&self, region: &OpRegion, offset: usize) -> Result { ++ assert_eq!(region.space, RegionSpace::EmbeddedControl); ++ self.read_bytes::<2>(offset) ++ .map(u16::from_le_bytes) ++ .map_err(|error| { ++ warn!("EC read_u16 failed at offset {offset:#x}: {error:?}"); ++ error.as_aml_error() ++ }) ++ } ++ fn read_u32(&self, region: &OpRegion, offset: usize) -> Result { ++ assert_eq!(region.space, RegionSpace::EmbeddedControl); ++ self.read_bytes::<4>(offset) ++ .map(u32::from_le_bytes) ++ .map_err(|error| { ++ warn!("EC read_u32 failed at offset {offset:#x}: {error:?}"); ++ error.as_aml_error() ++ }) ++ } ++ fn read_u64(&self, region: &OpRegion, offset: usize) -> Result { ++ assert_eq!(region.space, RegionSpace::EmbeddedControl); ++ self.read_bytes::<8>(offset) ++ .map(u64::from_le_bytes) ++ .map_err(|error| { ++ warn!("EC read_u64 failed at offset {offset:#x}: {error:?}"); ++ error.as_aml_error() ++ }) + } + fn write_u16( + &self, +- _region: &OpRegion, +- _offset: usize, +- _value: u16, ++ region: &OpRegion, ++ offset: usize, ++ value: u16, + ) -> Result<(), acpi::aml::AmlError> { +- warn!("Got u16 EC write from AML!"); +- Err(acpi::aml::AmlError::NoHandlerForRegionAccess( +- RegionSpace::EmbeddedControl, +- )) // TODO proper error type ++ assert_eq!(region.space, RegionSpace::EmbeddedControl); ++ self.write_bytes(offset, value.to_le_bytes()).map_err(|error| { ++ warn!("EC write_u16 failed at offset {offset:#x}: {error:?}"); ++ error.as_aml_error() ++ }) + } + fn write_u32( + &self, +- _region: &OpRegion, +- _offset: usize, +- _value: u32, ++ region: &OpRegion, ++ offset: usize, ++ value: u32, + ) -> Result<(), acpi::aml::AmlError> { +- warn!("Got u32 EC write from AML!"); +- Err(acpi::aml::AmlError::NoHandlerForRegionAccess( +- RegionSpace::EmbeddedControl, +- )) // TODO proper error type ++ assert_eq!(region.space, RegionSpace::EmbeddedControl); ++ self.write_bytes(offset, value.to_le_bytes()).map_err(|error| { ++ warn!("EC write_u32 failed at offset {offset:#x}: {error:?}"); ++ error.as_aml_error() ++ }) + } + fn write_u64( + &self, +- _region: &OpRegion, +- _offset: usize, +- _value: u64, ++ region: &OpRegion, ++ offset: usize, ++ value: u64, + ) -> Result<(), acpi::aml::AmlError> { +- warn!("Got u64 EC write from AML!"); +- Err(acpi::aml::AmlError::NoHandlerForRegionAccess( +- RegionSpace::EmbeddedControl, +- )) // TODO proper error type ++ assert_eq!(region.space, RegionSpace::EmbeddedControl); ++ self.write_bytes(offset, value.to_le_bytes()).map_err(|error| { ++ warn!("EC write_u64 failed at offset {offset:#x}: {error:?}"); ++ error.as_aml_error() ++ }) + } + } + +diff --git a/drivers/acpid/src/scheme.rs b/drivers/acpid/src/scheme.rs +index 5a5040c3..4fe3b8d8 100644 +--- a/drivers/acpid/src/scheme.rs ++++ b/drivers/acpid/src/scheme.rs +@@ -2,7 +2,6 @@ use acpi::aml::namespace::AmlName; + use amlserde::aml_serde_name::to_aml_format; + use amlserde::AmlSerdeValue; + use core::str; +-use libredox::Fd; + use parking_lot::RwLockReadGuard; + use redox_scheme::scheme::SchemeSync; + use redox_scheme::{CallerCtx, OpenResult, SendFdRequest, Socket}; +@@ -16,17 +15,22 @@ use syscall::FobtainFdFlags; + + use syscall::data::Stat; + use syscall::error::{Error, Result}; +-use syscall::error::{EACCES, EBADF, EBADFD, EINVAL, EIO, EISDIR, ENOENT, ENOTDIR}; ++use syscall::error::{ ++ EACCES, EAGAIN, EBADF, EBADFD, EINVAL, EIO, EISDIR, ENOENT, ENOTDIR, EOPNOTSUPP, ++}; + use syscall::flag::{MODE_DIR, MODE_FILE}; + use syscall::flag::{O_ACCMODE, O_DIRECTORY, O_RDONLY, O_STAT, O_SYMLINK}; + use syscall::{EOVERFLOW, EPERM}; + +-use crate::acpi::{AcpiContext, AmlSymbols, SdtSignature}; ++use crate::acpi::{ ++ AcpiBattery, AcpiContext, AcpiPowerAdapter, AcpiPowerSnapshot, AmlSymbols, DmiInfo, ++ SdtSignature, ++}; ++use crate::resources::{decode_resource_template, ResourceDescriptor}; + + pub struct AcpiScheme<'acpi, 'sock> { + ctx: &'acpi AcpiContext, + handles: HandleMap>, +- pci_fd: Option, + socket: &'sock Socket, + } + +@@ -41,10 +45,204 @@ enum HandleKind<'a> { + Table(SdtSignature), + Symbols(RwLockReadGuard<'a, AmlSymbols>), + Symbol { name: String, description: String }, ++ ResourcesDir, ++ Resources(String), ++ Reboot, ++ DmiDir, ++ Dmi(String), ++ PowerDir, ++ PowerAdaptersDir, ++ PowerAdapterDir(String), ++ PowerBatteriesDir, ++ PowerBatteryDir(String), ++ PowerFile(String), + SchemeRoot, + RegisterPci, + } + ++const DMI_DIRECTORY_ENTRIES: &[&str] = &[ ++ "sys_vendor", ++ "board_vendor", ++ "board_name", ++ "board_version", ++ "product_name", ++ "product_version", ++ "bios_version", ++ "match_all", ++]; ++ ++fn dmi_contents(dmi_info: Option<&DmiInfo>, name: &str) -> Option { ++ Some(match name { ++ "sys_vendor" => dmi_info ++ .and_then(|info| info.sys_vendor.clone()) ++ .unwrap_or_default(), ++ "board_vendor" => dmi_info ++ .and_then(|info| info.board_vendor.clone()) ++ .unwrap_or_default(), ++ "board_name" => dmi_info ++ .and_then(|info| info.board_name.clone()) ++ .unwrap_or_default(), ++ "board_version" => dmi_info ++ .and_then(|info| info.board_version.clone()) ++ .unwrap_or_default(), ++ "product_name" => dmi_info ++ .and_then(|info| info.product_name.clone()) ++ .unwrap_or_default(), ++ "product_version" => dmi_info ++ .and_then(|info| info.product_version.clone()) ++ .unwrap_or_default(), ++ "bios_version" => dmi_info ++ .and_then(|info| info.bios_version.clone()) ++ .unwrap_or_default(), ++ "match_all" => dmi_info.map(DmiInfo::to_match_lines).unwrap_or_default(), ++ _ => return None, ++ }) ++} ++ ++fn power_bool_contents(value: bool) -> String { ++ if value { ++ String::from("1\n") ++ } else { ++ String::from("0\n") ++ } ++} ++ ++fn power_u64_contents(value: u64) -> String { ++ format!("{value}\n") ++} ++ ++fn power_f64_contents(value: f64) -> String { ++ format!("{value}\n") ++} ++ ++fn power_string_contents(value: &str) -> String { ++ format!("{value}\n") ++} ++ ++fn power_adapter_file_contents(adapter: &AcpiPowerAdapter, name: &str) -> Option { ++ Some(match name { ++ "path" => power_string_contents(&adapter.path), ++ "online" => power_bool_contents(adapter.online), ++ _ => return None, ++ }) ++} ++ ++fn power_adapter_entry_names() -> &'static [&'static str] { ++ &["path", "online"] ++} ++ ++fn power_battery_file_contents(battery: &AcpiBattery, name: &str) -> Option { ++ Some(match name { ++ "path" => power_string_contents(&battery.path), ++ "state" => power_u64_contents(battery.state), ++ "present_rate" => power_u64_contents(battery.present_rate?), ++ "remaining_capacity" => power_u64_contents(battery.remaining_capacity?), ++ "present_voltage" => power_u64_contents(battery.present_voltage?), ++ "power_unit" => power_string_contents(battery.power_unit.as_deref()?), ++ "design_capacity" => power_u64_contents(battery.design_capacity?), ++ "last_full_capacity" => power_u64_contents(battery.last_full_capacity?), ++ "design_voltage" => power_u64_contents(battery.design_voltage?), ++ "technology" => power_string_contents(battery.technology.as_deref()?), ++ "model" => power_string_contents(battery.model.as_deref()?), ++ "serial" => power_string_contents(battery.serial.as_deref()?), ++ "battery_type" => power_string_contents(battery.battery_type.as_deref()?), ++ "oem_info" => power_string_contents(battery.oem_info.as_deref()?), ++ "percentage" => power_f64_contents(battery.percentage?), ++ _ => return None, ++ }) ++} ++ ++fn power_battery_entry_names(battery: &AcpiBattery) -> Vec<&'static str> { ++ let mut names = vec!["path", "state"]; ++ ++ if battery.present_rate.is_some() { ++ names.push("present_rate"); ++ } ++ if battery.remaining_capacity.is_some() { ++ names.push("remaining_capacity"); ++ } ++ if battery.present_voltage.is_some() { ++ names.push("present_voltage"); ++ } ++ if battery.power_unit.is_some() { ++ names.push("power_unit"); ++ } ++ if battery.design_capacity.is_some() { ++ names.push("design_capacity"); ++ } ++ if battery.last_full_capacity.is_some() { ++ names.push("last_full_capacity"); ++ } ++ if battery.design_voltage.is_some() { ++ names.push("design_voltage"); ++ } ++ if battery.technology.is_some() { ++ names.push("technology"); ++ } ++ if battery.model.is_some() { ++ names.push("model"); ++ } ++ if battery.serial.is_some() { ++ names.push("serial"); ++ } ++ if battery.battery_type.is_some() { ++ names.push("battery_type"); ++ } ++ if battery.oem_info.is_some() { ++ names.push("oem_info"); ++ } ++ if battery.percentage.is_some() { ++ names.push("percentage"); ++ } ++ ++ names ++} ++ ++fn top_level_entries(power_available: bool) -> Vec<(&'static str, DirentKind)> { ++ let mut entries = vec![ ++ ("tables", DirentKind::Directory), ++ ("symbols", DirentKind::Directory), ++ ("resources", DirentKind::Directory), ++ ("dmi", DirentKind::Directory), ++ ("reboot", DirentKind::Regular), ++ ]; ++ if power_available { ++ entries.push(("power", DirentKind::Directory)); ++ } ++ entries ++} ++ ++fn resource_symbol_path(path: &str) -> Option { ++ let normalized = path.trim_matches('/').trim_start_matches('\\'); ++ if normalized.is_empty() { ++ return None; ++ } ++ ++ let normalized = normalized.replace('/', "."); ++ if normalized.is_empty() { ++ None ++ } else { ++ Some(format!("{normalized}._CRS")) ++ } ++} ++ ++fn resource_entry_name(symbol: &str) -> Option { ++ symbol ++ .strip_suffix("._CRS") ++ .map(str::to_string) ++ .filter(|path| !path.is_empty()) ++} ++ ++fn resource_dir_entries<'a>(symbols: impl IntoIterator) -> Vec { ++ let mut entries = symbols ++ .into_iter() ++ .filter_map(resource_entry_name) ++ .collect::>(); ++ entries.sort_unstable(); ++ entries.dedup(); ++ entries ++} ++ + impl HandleKind<'_> { + fn is_dir(&self) -> bool { + match self { +@@ -53,6 +251,17 @@ impl HandleKind<'_> { + Self::Table(_) => false, + Self::Symbols(_) => true, + Self::Symbol { .. } => false, ++ Self::ResourcesDir => true, ++ Self::Resources(_) => false, ++ Self::Reboot => false, ++ Self::DmiDir => true, ++ Self::Dmi(_) => false, ++ Self::PowerDir => true, ++ Self::PowerAdaptersDir => true, ++ Self::PowerAdapterDir(_) => true, ++ Self::PowerBatteriesDir => true, ++ Self::PowerBatteryDir(_) => true, ++ Self::PowerFile(_) => false, + Self::SchemeRoot => false, + Self::RegisterPci => false, + } +@@ -65,8 +274,21 @@ impl HandleKind<'_> { + .ok_or(Error::new(EBADFD))? + .length(), + Self::Symbol { description, .. } => description.len(), ++ Self::Resources(contents) => contents.len(), ++ Self::Reboot => 0, ++ Self::Dmi(contents) => contents.len(), ++ Self::PowerFile(contents) => contents.len(), + // Directories +- Self::TopLevel | Self::Symbols(_) | Self::Tables => 0, ++ Self::TopLevel ++ | Self::Symbols(_) ++ | Self::ResourcesDir ++ | Self::Tables ++ | Self::DmiDir ++ | Self::PowerDir ++ | Self::PowerAdaptersDir ++ | Self::PowerAdapterDir(_) ++ | Self::PowerBatteriesDir ++ | Self::PowerBatteryDir(_) => 0, + Self::SchemeRoot | Self::RegisterPci => return Err(Error::new(EBADF)), + }) + } +@@ -77,10 +299,163 @@ impl<'acpi, 'sock> AcpiScheme<'acpi, 'sock> { + Self { + ctx, + handles: HandleMap::new(), +- pci_fd: None, + socket, + } + } ++ ++ fn power_snapshot(&self) -> Result { ++ self.ctx.power_snapshot().map_err(|error| match error { ++ crate::acpi::AmlEvalError::NotInitialized => Error::new(EAGAIN), ++ crate::acpi::AmlEvalError::Unsupported(message) => { ++ log::warn!("ACPI power surface unavailable: {message}"); ++ Error::new(EOPNOTSUPP) ++ } ++ other => { ++ log::warn!("Failed to build ACPI power snapshot: {:?}", other); ++ Error::new(EIO) ++ } ++ }) ++ } ++ ++ fn power_available(&self) -> bool { ++ matches!(self.ctx.power_snapshot(), Ok(_)) ++ } ++ ++ fn resources_handle(&self, path: &str) -> Result> { ++ if !self.ctx.pci_ready() { ++ let display_path = if path.is_empty() { "resources" } else { path }; ++ log::warn!( ++ "Deferring ACPI resource lookup for {display_path} until PCI registration is ready" ++ ); ++ return Err(Error::new(EAGAIN)); ++ } ++ ++ let normalized = path.trim_matches('/'); ++ if normalized.is_empty() { ++ return Ok(HandleKind::ResourcesDir); ++ } ++ ++ let symbol_path = resource_symbol_path(normalized).ok_or(Error::new(ENOENT))?; ++ if self.ctx.aml_lookup(&symbol_path).is_none() { ++ return Err(Error::new(ENOENT)); ++ } ++ ++ let aml_name = ++ AmlName::from_str(&format!("\\{symbol_path}")).map_err(|_| Error::new(ENOENT))?; ++ let buffer = match self.ctx.aml_eval(aml_name, Vec::new()) { ++ Ok(AmlSerdeValue::Buffer(bytes)) => bytes, ++ Ok(other) => { ++ log::debug!( ++ "Skipping ACPI resources for {normalized} due to unexpected _CRS value: {:?}", ++ other ++ ); ++ return Err(Error::new(ENOENT)); ++ } ++ Err(error) => { ++ log::debug!( ++ "Failed to evaluate ACPI resources for {symbol_path}: {:?}", ++ error ++ ); ++ return Err(Error::new(ENOENT)); ++ } ++ }; ++ ++ let descriptors: Vec = ++ decode_resource_template(&buffer).map_err(|error| { ++ log::warn!("Failed to decode ACPI _CRS for {symbol_path}: {error}"); ++ Error::new(EIO) ++ })?; ++ let serialized = ron::ser::to_string(&descriptors).map_err(|error| { ++ log::warn!("Failed to serialize decoded ACPI resources for {symbol_path}: {error}"); ++ Error::new(EIO) ++ })?; ++ ++ Ok(HandleKind::Resources(serialized)) ++ } ++ ++ fn power_handle(&self, path: &str) -> Result> { ++ let normalized = path.trim_matches('/'); ++ self.power_snapshot()?; ++ ++ if normalized.is_empty() { ++ return Ok(HandleKind::PowerDir); ++ } ++ if normalized == "on_battery" { ++ return Ok(HandleKind::PowerFile(power_bool_contents( ++ self.power_snapshot()?.on_battery(), ++ ))); ++ } ++ if normalized == "adapters" { ++ return Ok(HandleKind::PowerAdaptersDir); ++ } ++ if let Some(rest) = normalized.strip_prefix("adapters/") { ++ return self.power_adapter_handle(rest); ++ } ++ if normalized == "batteries" { ++ return Ok(HandleKind::PowerBatteriesDir); ++ } ++ if let Some(rest) = normalized.strip_prefix("batteries/") { ++ return self.power_battery_handle(rest); ++ } ++ ++ Err(Error::new(ENOENT)) ++ } ++ ++ fn power_adapter_handle(&self, path: &str) -> Result> { ++ let normalized = path.trim_matches('/'); ++ if normalized.is_empty() { ++ return Ok(HandleKind::PowerAdaptersDir); ++ } ++ ++ let mut parts = normalized.split('/'); ++ let adapter_id = parts.next().ok_or(Error::new(ENOENT))?; ++ let field = parts.next(); ++ if parts.next().is_some() { ++ return Err(Error::new(ENOENT)); ++ } ++ ++ let snapshot = self.power_snapshot()?; ++ let adapter = snapshot ++ .adapters ++ .iter() ++ .find(|adapter| adapter.id == adapter_id) ++ .ok_or(Error::new(ENOENT))?; ++ ++ match field { ++ None | Some("") => Ok(HandleKind::PowerAdapterDir(adapter.id.clone())), ++ Some(name) => Ok(HandleKind::PowerFile( ++ power_adapter_file_contents(adapter, name).ok_or(Error::new(ENOENT))?, ++ )), ++ } ++ } ++ ++ fn power_battery_handle(&self, path: &str) -> Result> { ++ let normalized = path.trim_matches('/'); ++ if normalized.is_empty() { ++ return Ok(HandleKind::PowerBatteriesDir); ++ } ++ ++ let mut parts = normalized.split('/'); ++ let battery_id = parts.next().ok_or(Error::new(ENOENT))?; ++ let field = parts.next(); ++ if parts.next().is_some() { ++ return Err(Error::new(ENOENT)); ++ } ++ ++ let snapshot = self.power_snapshot()?; ++ let battery = snapshot ++ .batteries ++ .iter() ++ .find(|battery| battery.id == battery_id) ++ .ok_or(Error::new(ENOENT))?; ++ ++ match field { ++ None | Some("") => Ok(HandleKind::PowerBatteryDir(battery.id.clone())), ++ Some(name) => Ok(HandleKind::PowerFile( ++ power_battery_file_contents(battery, name).ok_or(Error::new(ENOENT))?, ++ )), ++ } ++ } + } + + fn parse_hex_digit(hex: u8) -> Option { +@@ -182,49 +557,97 @@ impl SchemeSync for AcpiScheme<'_, '_> { + + let kind = match handle.kind { + HandleKind::SchemeRoot => { +- // TODO: arrayvec +- let components = { +- let mut v = arrayvec::ArrayVec::<&str, 3>::new(); +- let it = path.split('/'); +- for component in it.take(3) { +- v.push(component); +- } +- +- v +- }; +- +- match &*components { +- [""] => HandleKind::TopLevel, +- ["register_pci"] => HandleKind::RegisterPci, +- ["tables"] => HandleKind::Tables, ++ if path == "resources" || path == "resources/" { ++ self.resources_handle("")? ++ } else if let Some(rest) = path.strip_prefix("resources/") { ++ self.resources_handle(rest)? ++ } else { ++ // TODO: arrayvec ++ let components = { ++ let mut v = arrayvec::ArrayVec::<&str, 4>::new(); ++ let it = path.split('/'); ++ for component in it.take(4) { ++ v.push(component); ++ } + +- ["tables", table] => { +- let signature = parse_table(table.as_bytes()).ok_or(Error::new(ENOENT))?; +- HandleKind::Table(signature) +- } ++ v ++ }; ++ ++ match &*components { ++ [""] => HandleKind::TopLevel, ++ ["reboot"] => HandleKind::Reboot, ++ ["dmi"] => { ++ if flag_dir || flag_stat || path.ends_with('/') { ++ HandleKind::DmiDir ++ } else { ++ HandleKind::Dmi( ++ dmi_contents(self.ctx.dmi_info(), "match_all") ++ .expect("match_all should always resolve"), ++ ) ++ } ++ } ++ ["dmi", ""] => HandleKind::DmiDir, ++ ["dmi", field] => HandleKind::Dmi( ++ dmi_contents(self.ctx.dmi_info(), field).ok_or(Error::new(ENOENT))?, ++ ), ++ ["power"] => self.power_handle("")?, ++ ["power", tail] => self.power_handle(tail)?, ++ ["power", a, b] => self.power_handle(&format!("{a}/{b}"))?, ++ ["power", a, b, c] => self.power_handle(&format!("{a}/{b}/{c}"))?, ++ ["register_pci"] => HandleKind::RegisterPci, ++ ["tables"] => HandleKind::Tables, ++ ++ ["tables", table] => { ++ let signature = ++ parse_table(table.as_bytes()).ok_or(Error::new(ENOENT))?; ++ HandleKind::Table(signature) ++ } + +- ["symbols"] => { +- if let Ok(aml_symbols) = self.ctx.aml_symbols(self.pci_fd.as_ref()) { +- HandleKind::Symbols(aml_symbols) +- } else { +- return Err(Error::new(EIO)); ++ ["symbols"] => { ++ if !self.ctx.pci_ready() { ++ log::warn!( ++ "Deferring AML symbol scan until PCI registration is ready" ++ ); ++ return Err(Error::new(EAGAIN)); ++ } ++ if let Ok(aml_symbols) = self.ctx.aml_symbols() { ++ HandleKind::Symbols(aml_symbols) ++ } else { ++ return Err(Error::new(EIO)); ++ } + } +- } + +- ["symbols", symbol] => { +- if let Some(description) = self.ctx.aml_lookup(symbol) { +- HandleKind::Symbol { +- name: (*symbol).to_owned(), +- description, ++ ["symbols", symbol] => { ++ if !self.ctx.pci_ready() { ++ log::warn!( ++ "Deferring AML symbol lookup for {symbol} until PCI registration is ready" ++ ); ++ return Err(Error::new(EAGAIN)); ++ } ++ if let Some(description) = self.ctx.aml_lookup(symbol) { ++ HandleKind::Symbol { ++ name: (*symbol).to_owned(), ++ description, ++ } ++ } else { ++ return Err(Error::new(ENOENT)); + } +- } else { +- return Err(Error::new(ENOENT)); + } +- } + +- _ => return Err(Error::new(ENOENT)), ++ _ => return Err(Error::new(ENOENT)), ++ } ++ } ++ } ++ HandleKind::DmiDir => { ++ if path.is_empty() { ++ HandleKind::DmiDir ++ } else { ++ HandleKind::Dmi( ++ dmi_contents(self.ctx.dmi_info(), path).ok_or(Error::new(ENOENT))?, ++ ) + } + } ++ HandleKind::ResourcesDir => self.resources_handle(path)?, + HandleKind::Symbols(ref aml_symbols) => { + if let Some(description) = aml_symbols.lookup(path) { + HandleKind::Symbol { +@@ -235,6 +658,23 @@ impl SchemeSync for AcpiScheme<'_, '_> { + return Err(Error::new(ENOENT)); + } + } ++ HandleKind::PowerDir => self.power_handle(path)?, ++ HandleKind::PowerAdaptersDir => self.power_adapter_handle(path)?, ++ HandleKind::PowerAdapterDir(ref adapter_id) => { ++ if path.is_empty() { ++ HandleKind::PowerAdapterDir(adapter_id.clone()) ++ } else { ++ self.power_adapter_handle(&format!("{adapter_id}/{path}"))? ++ } ++ } ++ HandleKind::PowerBatteriesDir => self.power_battery_handle(path)?, ++ HandleKind::PowerBatteryDir(ref battery_id) => { ++ if path.is_empty() { ++ HandleKind::PowerBatteryDir(battery_id.clone()) ++ } else { ++ self.power_battery_handle(&format!("{battery_id}/{path}"))? ++ } ++ } + _ => return Err(Error::new(EACCES)), + }; + +@@ -296,7 +736,7 @@ impl SchemeSync for AcpiScheme<'_, '_> { + ) -> Result { + let offset: usize = offset.try_into().map_err(|_| Error::new(EINVAL))?; + +- let handle = self.handles.get_mut(id)?; ++ let handle = self.handles.get(id)?; + + if handle.stat { + return Err(Error::new(EBADF)); +@@ -309,6 +749,9 @@ impl SchemeSync for AcpiScheme<'_, '_> { + .ok_or(Error::new(EBADFD))? + .as_slice(), + HandleKind::Symbol { description, .. } => description.as_bytes(), ++ HandleKind::Resources(contents) => contents.as_bytes(), ++ HandleKind::Dmi(contents) => contents.as_bytes(), ++ HandleKind::PowerFile(contents) => contents.as_bytes(), + _ => return Err(Error::new(EINVAL)), + }; + +@@ -328,13 +771,95 @@ impl SchemeSync for AcpiScheme<'_, '_> { + mut buf: DirentBuf<&'buf mut [u8]>, + opaque_offset: u64, + ) -> Result> { +- let handle = self.handles.get_mut(id)?; ++ let handle = self.handles.get(id)?; + + match &handle.kind { + HandleKind::TopLevel => { +- const TOPLEVEL_ENTRIES: &[&str] = &["tables", "symbols"]; ++ for (idx, (name, kind)) in top_level_entries(self.power_available()) ++ .iter() ++ .enumerate() ++ .skip(opaque_offset as usize) ++ { ++ buf.entry(DirEntry { ++ inode: 0, ++ next_opaque_id: idx as u64 + 1, ++ name, ++ kind: *kind, ++ })?; ++ } ++ } ++ HandleKind::DmiDir => { ++ for (idx, name) in DMI_DIRECTORY_ENTRIES ++ .iter() ++ .enumerate() ++ .skip(opaque_offset as usize) ++ { ++ buf.entry(DirEntry { ++ inode: 0, ++ next_opaque_id: idx as u64 + 1, ++ name, ++ kind: DirentKind::Regular, ++ })?; ++ } ++ } ++ HandleKind::ResourcesDir => { ++ let aml_symbols = self.ctx.aml_symbols().map_err(|_| Error::new(EIO))?; ++ let entries = ++ resource_dir_entries(aml_symbols.symbols_cache().keys().map(String::as_str)); ++ for (idx, name) in entries.iter().enumerate().skip(opaque_offset as usize) { ++ buf.entry(DirEntry { ++ inode: 0, ++ next_opaque_id: idx as u64 + 1, ++ name, ++ kind: DirentKind::Regular, ++ })?; ++ } ++ } ++ HandleKind::PowerDir => { ++ const POWER_ROOT_ENTRIES: &[(&str, DirentKind)] = &[ ++ ("on_battery", DirentKind::Regular), ++ ("adapters", DirentKind::Directory), ++ ("batteries", DirentKind::Directory), ++ ]; ++ ++ for (idx, (name, kind)) in POWER_ROOT_ENTRIES ++ .iter() ++ .enumerate() ++ .skip(opaque_offset as usize) ++ { ++ buf.entry(DirEntry { ++ inode: 0, ++ next_opaque_id: idx as u64 + 1, ++ name, ++ kind: *kind, ++ })?; ++ } ++ } ++ HandleKind::PowerAdaptersDir => { ++ let snapshot = self.power_snapshot()?; ++ for (idx, adapter) in snapshot ++ .adapters ++ .iter() ++ .enumerate() ++ .skip(opaque_offset as usize) ++ { ++ buf.entry(DirEntry { ++ inode: 0, ++ next_opaque_id: idx as u64 + 1, ++ name: adapter.id.as_str(), ++ kind: DirentKind::Directory, ++ })?; ++ } ++ } ++ HandleKind::PowerAdapterDir(adapter_id) => { ++ let snapshot = self.power_snapshot()?; ++ let _adapter = snapshot ++ .adapters ++ .iter() ++ .find(|adapter| adapter.id == *adapter_id) ++ .ok_or(Error::new(EIO))?; + +- for (idx, name) in TOPLEVEL_ENTRIES ++ for (idx, name) in power_adapter_entry_names() + .iter() + .enumerate() + .skip(opaque_offset as usize) +@@ -343,10 +868,44 @@ impl SchemeSync for AcpiScheme<'_, '_> { + inode: 0, + next_opaque_id: idx as u64 + 1, + name, ++ kind: DirentKind::Regular, ++ })?; ++ } ++ } ++ HandleKind::PowerBatteriesDir => { ++ let snapshot = self.power_snapshot()?; ++ for (idx, battery) in snapshot ++ .batteries ++ .iter() ++ .enumerate() ++ .skip(opaque_offset as usize) ++ { ++ buf.entry(DirEntry { ++ inode: 0, ++ next_opaque_id: idx as u64 + 1, ++ name: battery.id.as_str(), + kind: DirentKind::Directory, + })?; + } + } ++ HandleKind::PowerBatteryDir(battery_id) => { ++ let snapshot = self.power_snapshot()?; ++ let battery = snapshot ++ .batteries ++ .iter() ++ .find(|battery| battery.id == *battery_id) ++ .ok_or(Error::new(EIO))?; ++ let entry_names = power_battery_entry_names(battery); ++ ++ for (idx, name) in entry_names.iter().enumerate().skip(opaque_offset as usize) { ++ buf.entry(DirEntry { ++ inode: 0, ++ next_opaque_id: idx as u64 + 1, ++ name, ++ kind: DirentKind::Regular, ++ })?; ++ } ++ } + HandleKind::Symbols(aml_symbols) => { + for (idx, (symbol_name, _value)) in aml_symbols + .symbols_cache() +@@ -444,6 +1003,38 @@ impl SchemeSync for AcpiScheme<'_, '_> { + Ok(result_len) + } + ++ fn write( ++ &mut self, ++ id: usize, ++ buf: &[u8], ++ _offset: u64, ++ _flags: u32, ++ _ctx: &CallerCtx, ++ ) -> Result { ++ let handle = self.handles.get_mut(id)?; ++ ++ if handle.stat { ++ return Err(Error::new(EBADF)); ++ } ++ if !handle.allowed_to_eval { ++ return Err(Error::new(EPERM)); ++ } ++ ++ match handle.kind { ++ HandleKind::Reboot => { ++ if buf.is_empty() { ++ return Err(Error::new(EINVAL)); ++ } ++ self.ctx.acpi_reboot().map_err(|error| { ++ log::error!("ACPI reboot failed: {error}"); ++ Error::new(EIO) ++ })?; ++ Ok(buf.len()) ++ } ++ _ => Err(Error::new(EBADF)), ++ } ++ } ++ + fn on_sendfd(&mut self, sendfd_request: &SendFdRequest) -> Result { + let id = sendfd_request.id(); + let num_fds = sendfd_request.num_fds(); +@@ -470,10 +1061,8 @@ impl SchemeSync for AcpiScheme<'_, '_> { + } + let new_fd = libredox::Fd::new(new_fd); + +- if self.pci_fd.is_some() { ++ if self.ctx.register_pci_fd(new_fd).is_err() { + return Err(Error::new(EINVAL)); +- } else { +- self.pci_fd = Some(new_fd); + } + + Ok(num_fds) +@@ -483,3 +1072,90 @@ impl SchemeSync for AcpiScheme<'_, '_> { + self.handles.remove(id); + } + } ++ ++#[cfg(test)] ++mod tests { ++ use super::{dmi_contents, resource_dir_entries, resource_symbol_path, top_level_entries}; ++ use crate::acpi::DmiInfo; ++ use syscall::dirent::DirentKind; ++ ++ #[test] ++ fn dmi_contents_exposes_individual_fields_and_match_all() { ++ let dmi_info = DmiInfo { ++ sys_vendor: Some("Framework".to_string()), ++ board_name: Some("FRANMECP01".to_string()), ++ product_name: Some("Laptop 16".to_string()), ++ ..DmiInfo::default() ++ }; ++ ++ assert_eq!( ++ dmi_contents(Some(&dmi_info), "sys_vendor").as_deref(), ++ Some("Framework") ++ ); ++ assert_eq!( ++ dmi_contents(Some(&dmi_info), "board_name").as_deref(), ++ Some("FRANMECP01") ++ ); ++ assert_eq!( ++ dmi_contents(Some(&dmi_info), "product_name").as_deref(), ++ Some("Laptop 16") ++ ); ++ assert_eq!( ++ dmi_contents(Some(&dmi_info), "match_all").as_deref(), ++ Some("sys_vendor=Framework\nboard_name=FRANMECP01\nproduct_name=Laptop 16") ++ ); ++ assert_eq!(dmi_contents(None, "bios_version").as_deref(), Some("")); ++ assert_eq!(dmi_contents(Some(&dmi_info), "unknown"), None); ++ } ++ ++ #[test] ++ fn top_level_entries_always_include_reboot() { ++ let entries = top_level_entries(false); ++ assert!(entries ++ .iter() ++ .any(|(name, kind)| { *name == "reboot" && matches!(kind, DirentKind::Regular) })); ++ } ++ ++ #[test] ++ fn top_level_entries_only_include_power_when_available() { ++ let hidden = top_level_entries(false); ++ let visible = top_level_entries(true); ++ ++ assert!(!hidden.iter().any(|(name, _)| *name == "power")); ++ assert!(visible ++ .iter() ++ .any(|(name, kind)| { *name == "power" && matches!(kind, DirentKind::Directory) })); ++ } ++ ++ #[test] ++ fn top_level_entries_always_include_resources() { ++ let entries = top_level_entries(false); ++ assert!(entries ++ .iter() ++ .any(|(name, kind)| { *name == "resources" && matches!(kind, DirentKind::Directory) })); ++ } ++ ++ #[test] ++ fn resource_symbol_path_accepts_dotted_and_slash_paths() { ++ assert_eq!( ++ resource_symbol_path("_SB.PCI0.I2C0.TPD0").as_deref(), ++ Some("_SB.PCI0.I2C0.TPD0._CRS") ++ ); ++ assert_eq!( ++ resource_symbol_path("\\_SB/PCI0/I2C0/TPD0").as_deref(), ++ Some("_SB.PCI0.I2C0.TPD0._CRS") ++ ); ++ } ++ ++ #[test] ++ fn resource_dir_entries_list_devices_with_crs_suffix() { ++ assert_eq!( ++ resource_dir_entries([ ++ "_SB.PCI0.I2C0.TPD0._CRS", ++ "_SB.PCI0.I2C1.TPD1._HID", ++ "_SB.PCI0.I2C0.TPD0._CRS", ++ ]), ++ vec!["_SB.PCI0.I2C0.TPD0".to_string()] ++ ); ++ } ++} diff --git a/local/patches/base/P2-boot-logging.patch b/local/patches/base/absorbed/P2-boot-logging.patch similarity index 100% rename from local/patches/base/P2-boot-logging.patch rename to local/patches/base/absorbed/P2-boot-logging.patch diff --git a/local/patches/base/absorbed/P2-boot-runtime-fixes.patch b/local/patches/base/absorbed/P2-boot-runtime-fixes.patch new file mode 100644 index 00000000..67715163 --- /dev/null +++ b/local/patches/base/absorbed/P2-boot-runtime-fixes.patch @@ -0,0 +1,313 @@ +diff --git a/drivers/hwd/src/backend/acpi.rs b/drivers/hwd/src/backend/acpi.rs +--- a/drivers/hwd/src/backend/acpi.rs ++++ b/drivers/hwd/src/backend/acpi.rs +@@ -1,27 +1,36 @@ + use amlserde::{AmlSerde, AmlSerdeValue}; +-use std::{error::Error, fs, process::Command}; ++use std::{error::Error, fs}; + + use super::Backend; + + pub struct AcpiBackend { +- rxsdt: Vec, ++ _rxsdt: Vec, + } + + impl Backend for AcpiBackend { + fn new() -> Result> { + let rxsdt = fs::read("/scheme/kernel.acpi/rxsdt")?; + +- // Spawn acpid +- //TODO: pass rxsdt data to acpid? +- #[allow(deprecated, reason = "we can't yet move this to init")] +- daemon::Daemon::spawn(Command::new("acpid")); +- +- Ok(Self { rxsdt }) ++ Ok(Self { _rxsdt: rxsdt }) + } + + fn probe(&mut self) -> Result<(), Box> { ++ let mut boot_critical_input_candidates = 0usize; ++ let mut thc_candidates = 0usize; ++ let mut non_hid_i2c_candidates = 0usize; ++ + // Read symbols from acpi scheme +- let entries = fs::read_dir("/scheme/acpi/symbols")?; ++ let entries = match fs::read_dir("/scheme/acpi/symbols") { ++ Ok(entries) => entries, ++ Err(err) ++ if err.kind() == std::io::ErrorKind::WouldBlock ++ || err.raw_os_error() == Some(11) => ++ { ++ log::debug!("hwd: ACPI symbols are not ready yet"); ++ return Ok(()); ++ } ++ Err(err) => return Err(Box::new(err)), ++ }; + // TODO: Reimplement with getdents? + let symbols_fd = libredox::Fd::open( + "/scheme/acpi/symbols", +@@ -100,13 +109,104 @@ + "PNP0C0F" => "PCI interrupt link", + "PNP0C50" => "I2C HID", + "PNP0F13" => "PS/2 port for PS/2-style mouse", ++ "80860F41" | "808622C1" => "DesignWare I2C controller", ++ "AMDI0010" | "AMDI0019" | "AMDI0510" => "AMD laptop I2C controller", ++ "INT33C2" | "INT33C3" | "INT3432" | "INT3433" | "INTC10EF" => { ++ "Intel LPSS/SerialIO I2C controller" ++ } ++ "INT34C5" | "INTC1055" => "Intel GPIO controller", ++ "INTC1050" | "INTC1051" | "INTC1080" | "INTC1081" | "INTC1082" => { ++ "Intel THC companion (QuickI2C/QuickSPI path)" ++ } ++ _ if is_elan_touchpad_id(&id) => "ELAN touchpad (I2C/SMBus path)", ++ _ if is_cypress_touchpad_id(&id) => "Cypress/Trackpad (non-HID I2C path)", ++ _ if is_synaptics_rmi_id(&id) => "Synaptics RMI touchpad (I2C/SMBus path)", + _ => "?", + }; + log::debug!("{}: {} ({})", name, id, what); ++ if is_boot_critical_i2c_surface(&id) { ++ boot_critical_input_candidates += 1; ++ log::info!("{}: {} is boot-critical for laptop input path", name, id); ++ } ++ if is_thc_companion(&id) { ++ thc_candidates += 1; ++ log::warn!( ++ "{}: {} indicates Intel THC path; DMA/report fast-path is not complete yet", ++ name, ++ id ++ ); ++ } ++ if is_non_hid_i2c_input_id(&id) { ++ non_hid_i2c_candidates += 1; ++ } + } + } + } ++ ++ if boot_critical_input_candidates == 0 { ++ log::warn!( ++ "hwd: no ACPI boot-critical I2C input candidates found; built-in laptop input may require additional controller/device support" ++ ); ++ } else { ++ log::info!( ++ "hwd: ACPI input candidates: total={} thc={} non_hid_i2c={}", ++ boot_critical_input_candidates, ++ thc_candidates, ++ non_hid_i2c_candidates ++ ); ++ } ++ + Ok(()) + } + } ++ ++fn is_boot_critical_i2c_surface(id: &str) -> bool { ++ matches!( ++ id, ++ "PNP0C50" ++ | "ACPI0C50" ++ | "80860F41" ++ | "808622C1" ++ | "AMDI0010" ++ | "AMDI0019" ++ | "AMDI0510" ++ | "INT33C2" ++ | "INT33C3" ++ | "INT3432" ++ | "INT3433" ++ | "INTC10EF" ++ | "INT34C5" ++ | "INTC1055" ++ | "INTC1050" ++ | "INTC1051" ++ | "INTC1080" ++ | "INTC1081" ++ | "INTC1082" ++ ) || is_elan_touchpad_id(id) ++ || is_cypress_touchpad_id(id) ++ || is_synaptics_rmi_id(id) ++} ++ ++fn is_thc_companion(id: &str) -> bool { ++ matches!( ++ id, ++ "INTC1050" | "INTC1051" | "INTC1080" | "INTC1081" | "INTC1082" ++ ) ++} ++ ++fn is_elan_touchpad_id(id: &str) -> bool { ++ id.starts_with("ELAN") ++} ++ ++fn is_cypress_touchpad_id(id: &str) -> bool { ++ id.starts_with("CYAP") ++} ++ ++fn is_synaptics_rmi_id(id: &str) -> bool { ++ id.starts_with("SYNA") ++} ++ ++fn is_non_hid_i2c_input_id(id: &str) -> bool { ++ is_elan_touchpad_id(id) || is_cypress_touchpad_id(id) || is_synaptics_rmi_id(id) ++} + +diff --git a/drivers/pcid-spawner/src/main.rs b/drivers/pcid-spawner/src/main.rs +--- a/drivers/pcid-spawner/src/main.rs ++++ b/drivers/pcid-spawner/src/main.rs +@@ -1,11 +1,40 @@ ++use std::env; + use std::fs; + use std::process::Command; ++use std::thread; + + use anyhow::{anyhow, Context, Result}; + + use pcid_interface::config::Config; + use pcid_interface::PciFunctionHandle; + ++fn strict_usb_boot() -> bool { ++ matches!( ++ env::var("REDBEAR_STRICT_USB_BOOT") ++ .ok() ++ .as_deref() ++ .map(str::to_ascii_lowercase) ++ .as_deref(), ++ Some("1" | "true" | "yes" | "on") ++ ) ++} ++ ++fn should_detach_in_initfs(initfs: bool, class: u8, subclass: u8, strict_usb_boot: bool) -> bool { ++ if !initfs { ++ return false; ++ } ++ ++ if class == 0x01 { ++ return false; ++ } ++ ++ if strict_usb_boot && class == 0x0C && subclass == 0x03 { ++ return false; ++ } ++ ++ true ++} ++ + fn main() -> Result<()> { + let mut args = pico_args::Arguments::from_env(); + let initfs = args.contains("--initfs"); +@@ -30,6 +59,7 @@ + } + + let config: Config = toml::from_str(&config_data)?; ++ let strict_usb_boot = strict_usb_boot(); + + for entry in fs::read_dir("/scheme/pci")? { + let entry = entry.context("failed to get entry")?; +@@ -87,15 +117,71 @@ + + log::info!("pcid-spawner: spawn {:?}", command); + ++ let device_addr = handle.config().func.addr; ++ + handle.enable_device(); + + let channel_fd = handle.into_inner_fd(); + command.env("PCID_CLIENT_CHANNEL", channel_fd.to_string()); + + #[allow(deprecated, reason = "we can't yet move this to init")] +- daemon::Daemon::spawn(command); +- syscall::close(channel_fd as usize).unwrap(); ++ if should_detach_in_initfs( ++ initfs, ++ full_device_id.class, ++ full_device_id.subclass, ++ strict_usb_boot, ++ ) { ++ log::warn!( ++ "pcid-spawner: detached initfs spawn for {} to avoid blocking early boot", ++ device_addr ++ ); ++ ++ let device_addr = device_addr.to_string(); ++ thread::spawn(move || { ++ #[allow(deprecated, reason = "we can't yet move this to init")] ++ if let Err(err) = daemon::Daemon::spawn(command) { ++ log::error!( ++ "pcid-spawner: spawn/readiness failed for {}: {}", ++ device_addr, ++ err ++ ); ++ log::error!( ++ "pcid-spawner: {} remains enabled without a confirmed ready driver", ++ device_addr ++ ); ++ } ++ if let Err(err) = syscall::close(channel_fd as usize) { ++ log::error!( ++ "pcid-spawner: failed to close channel fd {} for {}: {}", ++ channel_fd, ++ device_addr, ++ err ++ ); ++ } ++ }); ++ } else { ++ #[allow(deprecated, reason = "we can't yet move this to init")] ++ if let Err(err) = daemon::Daemon::spawn(command) { ++ log::error!( ++ "pcid-spawner: spawn/readiness failed for {}: {}", ++ device_addr, ++ err ++ ); ++ log::error!( ++ "pcid-spawner: {} remains enabled without a confirmed ready driver", ++ device_addr ++ ); ++ } ++ if let Err(err) = syscall::close(channel_fd as usize) { ++ log::error!( ++ "pcid-spawner: failed to close channel fd {} for {}: {}", ++ channel_fd, ++ device_addr, ++ err ++ ); ++ } ++ } + } + + Ok(()) + +diff --git a/drivers/pcid/src/main.rs b/drivers/pcid/src/main.rs +--- a/drivers/pcid/src/main.rs ++++ b/drivers/pcid/src/main.rs +@@ -12,6 +12,7 @@ + }; + use redox_scheme::scheme::register_sync_scheme; + use scheme_utils::Blocking; ++use syscall::{sendfd, SendFdFlags}; + + use crate::cfg_access::Pcie; + use pcid_interface::{FullDeviceId, LegacyInterruptLine, PciBar, PciFunction, PciRom}; +@@ -262,14 +263,13 @@ + let access_fd = socket + .create_this_scheme_fd(0, access_id, syscall::O_RDWR, 0) + .expect("failed to issue this resource"); +- let access_bytes = access_fd.to_ne_bytes(); +- let _ = register_pci +- .call_wo( +- &access_bytes, +- syscall::CallFlags::WRITE | syscall::CallFlags::FD, +- &[], +- ) +- .expect("failed to send pci_fd to acpid"); ++ sendfd( ++ register_pci.raw(), ++ access_fd as usize, ++ SendFdFlags::empty().bits(), ++ 0, ++ ) ++ .expect("failed to send pci_fd to acpid"); + } + Err(err) => { + if err.errno() == libredox::errno::ENODEV { diff --git a/local/patches/base/absorbed/P2-boot-runtime-noise-and-net-race.patch b/local/patches/base/absorbed/P2-boot-runtime-noise-and-net-race.patch new file mode 100644 index 00000000..2e957a89 --- /dev/null +++ b/local/patches/base/absorbed/P2-boot-runtime-noise-and-net-race.patch @@ -0,0 +1,144 @@ +# P2-boot-runtime-noise-and-net-race.patch +# +# Reduce expected boot-time warning noise and harden netstack startup ordering: +# - procmgr: unknown cancellation is trace-level (benign race) +# - acpid: warn once for unsupported power surface +# - ahcid: SATAPI probe failures are informational on empty media +# - netstack: retry network adapter discovery during early boot races + +diff --git a/bootstrap/src/procmgr.rs b/bootstrap/src/procmgr.rs +--- a/bootstrap/src/procmgr.rs ++++ b/bootstrap/src/procmgr.rs +@@ -296,8 +296,8 @@ fn handle_scheme<'a>( + } + } + } else { +- log::warn!("Cancellation for unknown id {:?}", req.id); ++ log::trace!("Cancellation for unknown id {:?}", req.id); + Pending + } + } + +diff --git a/drivers/acpid/src/scheme.rs b/drivers/acpid/src/scheme.rs +--- a/drivers/acpid/src/scheme.rs ++++ b/drivers/acpid/src/scheme.rs +@@ -8,6 +8,7 @@ use ron::de::SpannedError; + use scheme_utils::HandleMap; + use std::convert::{TryFrom, TryInto}; + use std::str::FromStr; ++use std::sync::atomic::{AtomicBool, Ordering}; + use syscall::dirent::{DirEntry, DirentBuf, DirentKind}; + use syscall::schemev2::NewFdFlags; + use syscall::FobtainFdFlags; +@@ -29,6 +30,8 @@ use crate::acpi::{ + }; + use crate::resources::{decode_resource_template, ResourceDescriptor}; + ++static POWER_SURFACE_UNAVAILABLE_WARNED: AtomicBool = AtomicBool::new(false); ++ + pub struct AcpiScheme<'acpi, 'sock> { + ctx: &'acpi AcpiContext, + handles: HandleMap>, +@@ -307,8 +310,10 @@ impl<'acpi, 'sock> AcpiScheme<'acpi, 'sock> { + self.ctx.power_snapshot().map_err(|error| match error { + crate::acpi::AmlEvalError::NotInitialized => Error::new(EAGAIN), + crate::acpi::AmlEvalError::Unsupported(message) => { +- log::warn!("ACPI power surface unavailable: {message}"); ++ if !POWER_SURFACE_UNAVAILABLE_WARNED.swap(true, Ordering::Relaxed) { ++ log::warn!("ACPI power surface unavailable: {message}"); ++ } + Error::new(EOPNOTSUPP) + } + other => { + +diff --git a/drivers/storage/ahcid/src/ahci/mod.rs b/drivers/storage/ahcid/src/ahci/mod.rs +--- a/drivers/storage/ahcid/src/ahci/mod.rs ++++ b/drivers/storage/ahcid/src/ahci/mod.rs +@@ -64,8 +64,8 @@ pub fn disks(base: usize, name: &str) -> (&'static mut HbaMem, Vec) { + HbaPortType::SATAPI => match DiskATAPI::new(i, port) { + Ok(disk) => Some(AnyDisk::Atapi(disk)), + Err(err) => { +- error!("{}: {}", i, err); ++ info!("{}: {}", i, err); + None + } + }, + +diff --git a/netstack/src/main.rs b/netstack/src/main.rs +--- a/netstack/src/main.rs ++++ b/netstack/src/main.rs +@@ -6,6 +6,8 @@ use anyhow::{anyhow, bail, Context, Result}; + use event::{EventFlags, EventQueue}; + use libredox::flag::{O_NONBLOCK, O_RDWR}; + use libredox::Fd; ++use std::thread; ++use std::time::Duration; + + use redox_scheme::Socket; + use scheme::Smolnetd; +@@ -22,32 +24,45 @@ mod scheme; + fn get_network_adapter() -> Result { + use std::fs; + +- let mut adapters = vec![]; ++ const MAX_ATTEMPTS: u32 = 50; ++ const RETRY_DELAY: Duration = Duration::from_millis(100); + +- for entry_res in fs::read_dir("/scheme")? { +- let Ok(entry) = entry_res else { +- continue; +- }; ++ for attempt in 1..=MAX_ATTEMPTS { ++ let mut adapters = vec![]; + +- let Ok(scheme) = entry.file_name().into_string() else { +- continue; +- }; ++ for entry_res in fs::read_dir("/scheme")? { ++ let Ok(entry) = entry_res else { ++ continue; ++ }; + +- if !scheme.starts_with("network") { +- continue; +- } ++ let Ok(scheme) = entry.file_name().into_string() else { ++ continue; ++ }; + +- adapters.push(scheme); +- } ++ if !scheme.starts_with("network") { ++ continue; ++ } + +- if adapters.is_empty() { +- bail!("no network adapter found"); +- } else { +- let adapter = adapters.remove(0); ++ adapters.push(scheme); ++ } ++ + if !adapters.is_empty() { +- // FIXME allow using multiple network adapters at the same time +- warn!("Multiple network adapters found. Only {adapter} will be used"); ++ let adapter = adapters.remove(0); ++ if !adapters.is_empty() { ++ // FIXME allow using multiple network adapters at the same time ++ warn!("Multiple network adapters found. Only {adapter} will be used"); ++ } ++ return Ok(adapter); ++ } ++ ++ if attempt < MAX_ATTEMPTS { ++ warn!( ++ "no network adapter found yet (attempt {attempt}/{MAX_ATTEMPTS}), waiting {} ms", ++ RETRY_DELAY.as_millis() ++ ); ++ thread::sleep(RETRY_DELAY); + } +- Ok(adapter) + } ++ ++ bail!("no network adapter found") + } diff --git a/local/patches/base/P2-daemon-hardening.patch b/local/patches/base/absorbed/P2-daemon-hardening.patch similarity index 100% rename from local/patches/base/P2-daemon-hardening.patch rename to local/patches/base/absorbed/P2-daemon-hardening.patch diff --git a/local/patches/base/P2-daemon-ready-graceful.patch b/local/patches/base/absorbed/P2-daemon-ready-graceful.patch similarity index 100% rename from local/patches/base/P2-daemon-ready-graceful.patch rename to local/patches/base/absorbed/P2-daemon-ready-graceful.patch diff --git a/local/patches/base/absorbed/P2-hwd-misc.patch b/local/patches/base/absorbed/P2-hwd-misc.patch new file mode 100644 index 00000000..b0898592 --- /dev/null +++ b/local/patches/base/absorbed/P2-hwd-misc.patch @@ -0,0 +1,18 @@ +# P2-hwd-misc.patch +# Keep hwd focused on hardware probing. Init owns boot-time pcid startup. + +diff --git a/drivers/hwd/src/main.rs b/drivers/hwd/src/main.rs +index 79360e34..4de3d9f3 100644 +--- a/drivers/hwd/src/main.rs ++++ b/drivers/hwd/src/main.rs +@@ -37,10 +37,7 @@ fn daemon(daemon: daemon::Daemon) -> ! { + + //TODO: launch pcid based on backend information? + // Must launch after acpid but before probe calls /scheme/acpi/symbols +- #[allow(deprecated, reason = "we can't yet move this to init")] +- daemon::Daemon::spawn(process::Command::new("pcid")); +- + daemon.ready(); + + //TODO: HWD is meant to locate PCI/XHCI/etc devices in ACPI and DeviceTree definitions and start their drivers + diff --git a/local/patches/base/P2-i2c-gpio-ucsi-drivers.patch b/local/patches/base/absorbed/P2-i2c-gpio-ucsi-drivers.patch similarity index 100% rename from local/patches/base/P2-i2c-gpio-ucsi-drivers.patch rename to local/patches/base/absorbed/P2-i2c-gpio-ucsi-drivers.patch diff --git a/local/patches/base/P2-ihdad-device-refactor.patch b/local/patches/base/absorbed/P2-ihdad-device-refactor.patch similarity index 100% rename from local/patches/base/P2-ihdad-device-refactor.patch rename to local/patches/base/absorbed/P2-ihdad-device-refactor.patch diff --git a/local/patches/base/P2-ihdad-hda-stream.patch b/local/patches/base/absorbed/P2-ihdad-hda-stream.patch similarity index 100% rename from local/patches/base/P2-ihdad-hda-stream.patch rename to local/patches/base/absorbed/P2-ihdad-hda-stream.patch diff --git a/local/patches/base/absorbed/P2-init-acpid-wiring.patch b/local/patches/base/absorbed/P2-init-acpid-wiring.patch new file mode 100644 index 00000000..66d6a435 --- /dev/null +++ b/local/patches/base/absorbed/P2-init-acpid-wiring.patch @@ -0,0 +1,33 @@ +diff --git a/init.initfs.d/41_acpid.service b/init.initfs.d/41_acpid.service +new file mode 100644 +--- /dev/null ++++ b/init.initfs.d/41_acpid.service +@@ -0,0 +1,8 @@ ++[unit] ++description = "ACPI daemon" ++default_dependencies = false ++ ++[service] ++cmd = "acpid" ++inherit_envs = ["RSDP_ADDR", "RSDP_SIZE"] ++type = "notify" +diff --git a/init.initfs.d/40_drivers.target b/init.initfs.d/40_drivers.target +--- a/init.initfs.d/40_drivers.target ++++ b/init.initfs.d/40_drivers.target +@@ -7,4 +7,5 @@ requires_weak = [ + "40_bcm2835-sdhcid.service", + "40_hwd.service", + "40_pcid-spawner-initfs.service", ++ "41_acpid.service", + ] +diff --git a/init.initfs.d/40_hwd.service b/init.initfs.d/40_hwd.service +--- a/init.initfs.d/40_hwd.service ++++ b/init.initfs.d/40_hwd.service +@@ -1,6 +1,6 @@ + [unit] + description = "Hardware manager" +-requires_weak = ["10_inputd.service", "10_lived.service", "20_graphics.target"] ++requires_weak = ["10_inputd.service", "10_lived.service", "20_graphics.target", "41_acpid.service"] + + [service] + cmd = "hwd" diff --git a/local/patches/base/P2-initfs-pcid-service.patch b/local/patches/base/absorbed/P2-initfs-pcid-service.patch similarity index 100% rename from local/patches/base/P2-initfs-pcid-service.patch rename to local/patches/base/absorbed/P2-initfs-pcid-service.patch diff --git a/local/patches/base/P2-ixgbed-error-handling.patch b/local/patches/base/absorbed/P2-ixgbed-error-handling.patch similarity index 100% rename from local/patches/base/P2-ixgbed-error-handling.patch rename to local/patches/base/absorbed/P2-ixgbed-error-handling.patch diff --git a/local/patches/base/P2-misc-daemon-fixes.patch b/local/patches/base/absorbed/P2-misc-daemon-fixes.patch similarity index 100% rename from local/patches/base/P2-misc-daemon-fixes.patch rename to local/patches/base/absorbed/P2-misc-daemon-fixes.patch diff --git a/local/patches/base/absorbed/P2-network-driver-mains.patch b/local/patches/base/absorbed/P2-network-driver-mains.patch new file mode 100644 index 00000000..a2926d89 --- /dev/null +++ b/local/patches/base/absorbed/P2-network-driver-mains.patch @@ -0,0 +1,607 @@ +# P2-network-driver-mains.patch +# Extract network driver main.rs hardening: replace panic/unwrap/expect with +# proper error handling and graceful exits. +# +# Files: drivers/net/e1000d/src/main.rs, drivers/net/ixgbed/src/main.rs, +# drivers/net/rtl8139d/src/main.rs, drivers/net/rtl8168d/src/main.rs, +# drivers/net/virtio-netd/src/main.rs + +diff --git a/drivers/net/e1000d/src/main.rs b/drivers/net/e1000d/src/main.rs +index 373ea9b3..8ff57b33 100644 +--- a/drivers/net/e1000d/src/main.rs ++++ b/drivers/net/e1000d/src/main.rs +@@ -1,5 +1,6 @@ + use std::io::{Read, Write}; + use std::os::unix::io::AsRawFd; ++use std::process; + + use driver_network::NetworkScheme; + use event::{user_data, EventQueue}; +@@ -25,10 +26,13 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + common::file_level(), + ); + +- let irq = pci_config +- .func +- .legacy_interrupt_line +- .expect("e1000d: no legacy interrupts supported"); ++ let irq = match pci_config.func.legacy_interrupt_line { ++ Some(irq) => irq, ++ None => { ++ log::error!("e1000d: no legacy interrupts supported"); ++ process::exit(1); ++ } ++ }; + + log::info!("E1000 {}", pci_config.func.display()); + +@@ -38,7 +42,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + + let mut scheme = NetworkScheme::new( + move || unsafe { +- device::Intel8254x::new(address).expect("e1000d: failed to allocate device") ++ device::Intel8254x::new(address).unwrap_or_else(|err| { ++ log::error!("e1000d: failed to allocate device: {err}"); ++ process::exit(1); ++ }) + }, + daemon, + format!("network.{name}"), +@@ -51,7 +58,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + } + } + +- let event_queue = EventQueue::::new().expect("e1000d: failed to create event queue"); ++ let mut event_queue = EventQueue::::new().unwrap_or_else(|err| { ++ log::error!("e1000d: failed to create event queue: {err}"); ++ process::exit(1); ++ }); + + event_queue + .subscribe( +@@ -59,32 +69,65 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + Source::Irq, + event::EventFlags::READ, + ) +- .expect("e1000d: failed to subscribe to IRQ fd"); ++ .unwrap_or_else(|err| { ++ log::error!("e1000d: failed to subscribe to IRQ fd: {err}"); ++ process::exit(1); ++ }); + event_queue + .subscribe( + scheme.event_handle().raw(), + Source::Scheme, + event::EventFlags::READ, + ) +- .expect("e1000d: failed to subscribe to scheme fd"); +- +- libredox::call::setrens(0, 0).expect("e1000d: failed to enter null namespace"); +- +- scheme.tick().unwrap(); ++ .unwrap_or_else(|err| { ++ log::error!("e1000d: failed to subscribe to scheme fd: {err}"); ++ process::exit(1); ++ }); ++ ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ log::error!("e1000d: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("e1000d: failed initial scheme tick: {err}"); ++ process::exit(1); ++ } + +- for event in event_queue.map(|e| e.expect("e1000d: failed to get event")) { ++ loop { ++ let event = match event_queue.next() { ++ Some(Ok(event)) => event, ++ Some(Err(err)) => { ++ log::error!("e1000d: failed to get event: {err}"); ++ continue; ++ } ++ None => break, ++ }; + match event.user_data { + Source::Irq => { + let mut irq = [0; 8]; +- irq_file.read(&mut irq).unwrap(); ++ if let Err(err) = irq_file.read(&mut irq) { ++ log::error!("e1000d: failed to read IRQ: {err}"); ++ continue; ++ } + if unsafe { scheme.adapter().irq() } { +- irq_file.write(&mut irq).unwrap(); +- +- scheme.tick().expect("e1000d: failed to handle IRQ") ++ if let Err(err) = irq_file.write(&mut irq) { ++ log::error!("e1000d: failed to write IRQ: {err}"); ++ continue; ++ } ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("e1000d: failed to handle IRQ: {err}"); ++ } ++ } ++ } ++ Source::Scheme => { ++ if let Err(err) = scheme.tick() { ++ log::error!("e1000d: failed to handle scheme op: {err}"); + } + } +- Source::Scheme => scheme.tick().expect("e1000d: failed to handle scheme op"), + } + } +- unreachable!() ++ ++ process::exit(0); + } +diff --git a/drivers/net/ixgbed/src/main.rs b/drivers/net/ixgbed/src/main.rs +index 4a6ce74d..855d339d 100644 +--- a/drivers/net/ixgbed/src/main.rs ++++ b/drivers/net/ixgbed/src/main.rs +@@ -1,5 +1,6 @@ + use std::io::{Read, Write}; + use std::os::unix::io::AsRawFd; ++use std::process; + + use driver_network::NetworkScheme; + use event::{user_data, EventQueue}; +@@ -19,12 +20,23 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + let mut name = pci_config.func.name(); + name.push_str("_ixgbe"); + +- let irq = pci_config +- .func +- .legacy_interrupt_line +- .expect("ixgbed: no legacy interrupts supported"); ++ common::setup_logging( ++ "net", ++ "pci", ++ &name, ++ common::output_level(), ++ common::file_level(), ++ ); ++ ++ let irq = match pci_config.func.legacy_interrupt_line { ++ Some(irq) => irq, ++ None => { ++ log::error!("ixgbed: no legacy interrupts supported"); ++ process::exit(1); ++ } ++ }; + +- println!(" + IXGBE {}", pci_config.func.display()); ++ log::info!("IXGBE {}", pci_config.func.display()); + + let mut irq_file = irq.irq_handle("ixgbed"); + +@@ -34,8 +46,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + + let mut scheme = NetworkScheme::new( + move || { +- device::Intel8259x::new(address as usize, size) +- .expect("ixgbed: failed to allocate device") ++ device::Intel8259x::new(address as usize, size).unwrap_or_else(|err| { ++ log::error!("ixgbed: failed to allocate device: {err}"); ++ process::exit(1); ++ }) + }, + daemon, + format!("network.{name}"), +@@ -48,41 +62,77 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + } + } + +- let event_queue = EventQueue::::new().expect("ixgbed: Could not create event queue."); ++ let mut event_queue = EventQueue::::new().unwrap_or_else(|err| { ++ log::error!("ixgbed: failed to create event queue: {err}"); ++ process::exit(1); ++ }); ++ + event_queue + .subscribe( + irq_file.as_raw_fd() as usize, + Source::Irq, + event::EventFlags::READ, + ) +- .unwrap(); ++ .unwrap_or_else(|err| { ++ log::error!("ixgbed: failed to subscribe to IRQ fd: {err}"); ++ process::exit(1); ++ }); + event_queue + .subscribe( + scheme.event_handle().raw(), + Source::Scheme, + event::EventFlags::READ, + ) +- .unwrap(); +- +- libredox::call::setrens(0, 0).expect("ixgbed: failed to enter null namespace"); ++ .unwrap_or_else(|err| { ++ log::error!("ixgbed: failed to subscribe to scheme fd: {err}"); ++ process::exit(1); ++ }); ++ ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ log::error!("ixgbed: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("ixgbed: failed initial scheme tick: {err}"); ++ process::exit(1); ++ } + +- scheme.tick().unwrap(); ++ loop { ++ let event = match event_queue.next() { ++ Some(Ok(event)) => event, ++ Some(Err(err)) => { ++ log::error!("ixgbed: failed to get event: {err}"); ++ continue; ++ } ++ None => break, ++ }; + +- for event in event_queue.map(|e| e.expect("ixgbed: failed to get next event")) { + match event.user_data { + Source::Irq => { + let mut irq = [0; 8]; +- irq_file.read(&mut irq).unwrap(); ++ if let Err(err) = irq_file.read(&mut irq) { ++ log::error!("ixgbed: failed to read IRQ: {err}"); ++ continue; ++ } + if scheme.adapter().irq() { +- irq_file.write(&mut irq).unwrap(); +- +- scheme.tick().unwrap(); ++ if let Err(err) = irq_file.write(&mut irq) { ++ log::error!("ixgbed: failed to write IRQ: {err}"); ++ continue; ++ } ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("ixgbed: failed to handle IRQ: {err}"); ++ } + } + } + Source::Scheme => { +- scheme.tick().unwrap(); ++ if let Err(err) = scheme.tick() { ++ log::error!("ixgbed: failed to handle scheme op: {err}"); ++ } + } + } + } +- unreachable!() ++ ++ process::exit(0); + } +diff --git a/drivers/net/rtl8139d/src/main.rs b/drivers/net/rtl8139d/src/main.rs +index d470e814..64335a23 100644 +--- a/drivers/net/rtl8139d/src/main.rs ++++ b/drivers/net/rtl8139d/src/main.rs +@@ -1,5 +1,6 @@ + use std::io::{Read, Write}; + use std::os::unix::io::AsRawFd; ++use std::process; + + use driver_network::NetworkScheme; + use event::{user_data, EventQueue}; +@@ -32,7 +33,8 @@ fn map_bar(pcid_handle: &mut PciFunctionHandle) -> *mut u8 { + other => log::warn!("BAR {} is {:?} instead of memory BAR", barnum, other), + } + } +- panic!("rtl8139d: failed to find BAR"); ++ log::error!("rtl8139d: failed to find BAR"); ++ process::exit(1); + } + + fn main() { +@@ -61,7 +63,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + + let mut scheme = NetworkScheme::new( + move || unsafe { +- device::Rtl8139::new(bar as usize).expect("rtl8139d: failed to allocate device") ++ device::Rtl8139::new(bar as usize).unwrap_or_else(|err| { ++ log::error!("rtl8139d: failed to allocate device: {err}"); ++ process::exit(1); ++ }) + }, + daemon, + format!("network.{name}"), +@@ -74,42 +79,76 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + } + } + +- let event_queue = EventQueue::::new().expect("rtl8139d: Could not create event queue."); ++ let mut event_queue = EventQueue::::new().unwrap_or_else(|err| { ++ log::error!("rtl8139d: failed to create event queue: {err}"); ++ process::exit(1); ++ }); + event_queue + .subscribe( + irq_file.irq_handle().as_raw_fd() as usize, + Source::Irq, + event::EventFlags::READ, + ) +- .unwrap(); ++ .unwrap_or_else(|err| { ++ log::error!("rtl8139d: failed to subscribe to IRQ fd: {err}"); ++ process::exit(1); ++ }); + event_queue + .subscribe( + scheme.event_handle().raw(), + Source::Scheme, + event::EventFlags::READ, + ) +- .unwrap(); +- +- libredox::call::setrens(0, 0).expect("rtl8139d: failed to enter null namespace"); +- +- scheme.tick().unwrap(); ++ .unwrap_or_else(|err| { ++ log::error!("rtl8139d: failed to subscribe to scheme fd: {err}"); ++ process::exit(1); ++ }); ++ ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ log::error!("rtl8139d: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8139d: failed initial scheme tick: {err}"); ++ process::exit(1); ++ } + +- for event in event_queue.map(|e| e.expect("rtl8139d: failed to get next event")) { ++ loop { ++ let event = match event_queue.next() { ++ Some(Ok(event)) => event, ++ Some(Err(err)) => { ++ log::error!("rtl8139d: failed to get next event: {err}"); ++ continue; ++ } ++ None => break, ++ }; + match event.user_data { + Source::Irq => { + let mut irq = [0; 8]; +- irq_file.irq_handle().read(&mut irq).unwrap(); ++ if let Err(err) = irq_file.irq_handle().read(&mut irq) { ++ log::error!("rtl8139d: failed to read IRQ: {err}"); ++ continue; ++ } + //TODO: This may be causing spurious interrupts + if unsafe { scheme.adapter_mut().irq() } { +- irq_file.irq_handle().write(&mut irq).unwrap(); +- +- scheme.tick().unwrap(); ++ if let Err(err) = irq_file.irq_handle().write(&mut irq) { ++ log::error!("rtl8139d: failed to write IRQ: {err}"); ++ continue; ++ } ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8139d: failed to handle IRQ tick: {err}"); ++ } + } + } + Source::Scheme => { +- scheme.tick().unwrap(); ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8139d: failed to handle scheme op: {err}"); ++ } + } + } + } +- unreachable!() ++ ++ process::exit(0); + } +diff --git a/drivers/net/rtl8168d/src/main.rs b/drivers/net/rtl8168d/src/main.rs +index 1d9963a3..bd2fcb1a 100644 +--- a/drivers/net/rtl8168d/src/main.rs ++++ b/drivers/net/rtl8168d/src/main.rs +@@ -1,5 +1,6 @@ + use std::io::{Read, Write}; + use std::os::unix::io::AsRawFd; ++use std::process; + + use driver_network::NetworkScheme; + use event::{user_data, EventQueue}; +@@ -32,7 +33,8 @@ fn map_bar(pcid_handle: &mut PciFunctionHandle) -> *mut u8 { + other => log::warn!("BAR {} is {:?} instead of memory BAR", barnum, other), + } + } +- panic!("rtl8168d: failed to find BAR"); ++ log::error!("rtl8168d: failed to find BAR"); ++ process::exit(1); + } + + fn main() { +@@ -61,7 +63,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + + let mut scheme = NetworkScheme::new( + move || unsafe { +- device::Rtl8168::new(bar as usize).expect("rtl8168d: failed to allocate device") ++ device::Rtl8168::new(bar as usize).unwrap_or_else(|err| { ++ log::error!("rtl8168d: failed to allocate device: {err}"); ++ process::exit(1); ++ }) + }, + daemon, + format!("network.{name}"), +@@ -74,42 +79,76 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + } + } + +- let event_queue = EventQueue::::new().expect("rtl8168d: Could not create event queue."); ++ let mut event_queue = EventQueue::::new().unwrap_or_else(|err| { ++ log::error!("rtl8168d: failed to create event queue: {err}"); ++ process::exit(1); ++ }); + event_queue + .subscribe( + irq_file.irq_handle().as_raw_fd() as usize, + Source::Irq, + event::EventFlags::READ, + ) +- .unwrap(); ++ .unwrap_or_else(|err| { ++ log::error!("rtl8168d: failed to subscribe to IRQ fd: {err}"); ++ process::exit(1); ++ }); + event_queue + .subscribe( + scheme.event_handle().raw(), + Source::Scheme, + event::EventFlags::READ, + ) +- .unwrap(); +- +- libredox::call::setrens(0, 0).expect("rtl8168d: failed to enter null namespace"); +- +- scheme.tick().unwrap(); ++ .unwrap_or_else(|err| { ++ log::error!("rtl8168d: failed to subscribe to scheme fd: {err}"); ++ process::exit(1); ++ }); ++ ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ log::error!("rtl8168d: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8168d: failed initial scheme tick: {err}"); ++ process::exit(1); ++ } + +- for event in event_queue.map(|e| e.expect("rtl8168d: failed to get next event")) { ++ loop { ++ let event = match event_queue.next() { ++ Some(Ok(event)) => event, ++ Some(Err(err)) => { ++ log::error!("rtl8168d: failed to get next event: {err}"); ++ continue; ++ } ++ None => break, ++ }; + match event.user_data { + Source::Irq => { + let mut irq = [0; 8]; +- irq_file.irq_handle().read(&mut irq).unwrap(); ++ if let Err(err) = irq_file.irq_handle().read(&mut irq) { ++ log::error!("rtl8168d: failed to read IRQ: {err}"); ++ continue; ++ } + //TODO: This may be causing spurious interrupts + if unsafe { scheme.adapter_mut().irq() } { +- irq_file.irq_handle().write(&mut irq).unwrap(); +- +- scheme.tick().unwrap(); ++ if let Err(err) = irq_file.irq_handle().write(&mut irq) { ++ log::error!("rtl8168d: failed to write IRQ: {err}"); ++ continue; ++ } ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8168d: failed to handle IRQ tick: {err}"); ++ } + } + } + Source::Scheme => { +- scheme.tick().unwrap(); ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8168d: failed to handle scheme op: {err}"); ++ } + } + } + } +- unreachable!() ++ ++ process::exit(0); + } +diff --git a/drivers/net/virtio-netd/src/main.rs b/drivers/net/virtio-netd/src/main.rs +index 17d168ef..adbd1086 100644 +--- a/drivers/net/virtio-netd/src/main.rs ++++ b/drivers/net/virtio-netd/src/main.rs +@@ -3,6 +3,7 @@ mod scheme; + use std::fs::File; + use std::io::{Read, Write}; + use std::mem; ++use std::process; + + use driver_network::NetworkScheme; + use pcid_interface::PciFunctionHandle; +@@ -31,8 +32,11 @@ fn main() { + } + + fn daemon_runner(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { +- deamon(daemon, pcid_handle).unwrap(); +- unreachable!(); ++ deamon(daemon, pcid_handle).unwrap_or_else(|err| { ++ log::error!("virtio-netd: daemon failed: {err}"); ++ process::exit(1); ++ }); ++ process::exit(0); + } + + fn deamon( +@@ -52,7 +56,10 @@ fn deamon( + // 0x1000 - virtio-net + let pci_config = pcid_handle.config(); + +- assert_eq!(pci_config.func.full_device_id.device_id, 0x1000); ++ if pci_config.func.full_device_id.device_id != 0x1000 { ++ log::error!("virtio-netd: unexpected device ID {:#06x}, expected 0x1000", pci_config.func.full_device_id.device_id); ++ process::exit(1); ++ } + log::info!("virtio-net: initiating startup sequence :^)"); + + let device = virtio_core::probe_device(&mut pcid_handle)?; +@@ -84,7 +91,8 @@ fn deamon( + device.transport.ack_driver_feature(VIRTIO_NET_F_MAC); + mac + } else { +- unimplemented!() ++ log::error!("virtio-netd: device does not support MAC feature"); ++ return Err("virtio-netd: VIRTIO_NET_F_MAC not supported".into()); + }; + + device.transport.finalize_features(); +@@ -126,11 +134,22 @@ fn deamon( + data: 0, + })?; + +- libredox::call::setrens(0, 0).expect("virtio-netd: failed to enter null namespace"); ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ log::error!("virtio-netd: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); + +- scheme.tick()?; ++ if let Err(err) = scheme.tick() { ++ log::error!("virtio-netd: failed initial scheme tick: {err}"); ++ process::exit(1); ++ } + + loop { +- event_queue.read(&mut [0; mem::size_of::()])?; // Wait for event +- scheme.tick()?; ++ if let Err(err) = event_queue.read(&mut [0; mem::size_of::()]) { ++ log::error!("virtio-netd: failed to read event: {err}"); ++ continue; ++ } ++ if let Err(err) = scheme.tick() { ++ log::error!("virtio-netd: failed to handle scheme event: {err}"); ++ } + } diff --git a/local/patches/base/absorbed/P2-network-error-handling.patch b/local/patches/base/absorbed/P2-network-error-handling.patch new file mode 100644 index 00000000..15026cbf --- /dev/null +++ b/local/patches/base/absorbed/P2-network-error-handling.patch @@ -0,0 +1,118 @@ +# P2-network-error-handling.patch +# +# Network driver error handling: replace unwrap()/expect()/panic!() with proper +# error propagation and graceful exits across e1000, ixgbe, rtl8139, rtl8168d, +# and virtio-net drivers. +# +# Covers: +# - e1000d/device.rs: replace unreachable!() in DMA array conversion +# - ixgbed/Cargo.toml: add log dependency +# - rtl8139d/device.rs: replace unreachable!() with EIO error +# - rtl8168d/device.rs: replace unreachable!() with EIO error +# - virtio-netd/scheme.rs: DMA allocation error handling for rx buffers +# +diff --git a/drivers/net/e1000d/src/device.rs b/drivers/net/e1000d/src/device.rs +index 4c518f30..0e42d72b 100644 +--- a/drivers/net/e1000d/src/device.rs ++++ b/drivers/net/e1000d/src/device.rs +@@ -3,7 +3,7 @@ use std::{cmp, mem, ptr, slice, thread, time}; + + use driver_network::NetworkAdapter; + +-use syscall::error::Result; ++use syscall::error::{Error, Result, EIO}; + + use common::dma::Dma; + +@@ -207,12 +207,11 @@ impl NetworkAdapter for Intel8254x { + } + + fn dma_array() -> Result<[Dma; N]> { +- Ok((0..N) ++ let vec: Vec> = (0..N) + .map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() })) +- .collect::>>()? +- .try_into() +- .unwrap_or_else(|_| unreachable!())) ++ .collect::>>()?; ++ vec.try_into().map_err(|_| Error::new(EIO)) + } + impl Intel8254x { + pub unsafe fn new(base: usize) -> Result { + +diff --git a/drivers/net/ixgbed/Cargo.toml b/drivers/net/ixgbed/Cargo.toml +index d97ff398..fcaf4b19 100644 +--- a/drivers/net/ixgbed/Cargo.toml ++++ b/drivers/net/ixgbed/Cargo.toml +@@ -7,7 +7,8 @@ edition = "2021" + [dependencies] + bitflags.workspace = true + libredox.workspace = true ++log.workspace = true + redox_event.workspace = true + redox_syscall.workspace = true + + +diff --git a/drivers/net/rtl8139d/src/device.rs b/drivers/net/rtl8139d/src/device.rs +index 37167ee2..d7428132 100644 +--- a/drivers/net/rtl8139d/src/device.rs ++++ b/drivers/net/rtl8139d/src/device.rs +@@ -215,8 +215,8 @@ impl Rtl8139 { + .map(|_| Ok(Dma::zeroed()?.assume_init())) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()), ++ .map_err(|_| Error::new(EIO))?, + transmit_i: 0, + mac_address: [0; 6], + }; + +diff --git a/drivers/net/rtl8168d/src/device.rs b/drivers/net/rtl8168d/src/device.rs +index ae545ec4..7229a52d 100644 +--- a/drivers/net/rtl8168d/src/device.rs ++++ b/drivers/net/rtl8168d/src/device.rs +@@ -177,7 +177,7 @@ impl Rtl8168 { + .map(|_| Ok(Dma::zeroed()?.assume_init())) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()), ++ .map_err(|_| Error::new(EIO))?, + + receive_ring: Dma::zeroed()?.assume_init(), + receive_i: 0, +@@ -185,8 +185,8 @@ impl Rtl8168 { + .map(|_| Ok(Dma::zeroed()?.assume_init())) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()), ++ .map_err(|_| Error::new(EIO))?, + transmit_ring: Dma::zeroed()?.assume_init(), + transmit_i: 0, + transmit_buffer_h: [Dma::zeroed()?.assume_init()], + +diff --git a/drivers/net/virtio-netd/src/scheme.rs b/drivers/net/virtio-netd/src/scheme.rs +index 59b3b93e..d0acb2ba 100644 +--- a/drivers/net/virtio-netd/src/scheme.rs ++++ b/drivers/net/virtio-netd/src/scheme.rs +@@ -27,11 +27,16 @@ impl<'a> VirtioNet<'a> { + // Populate all of the `rx_queue` with buffers to maximize performence. + let mut rx_buffers = vec![]; + for i in 0..(rx.descriptor_len() as usize) { +- rx_buffers.push(unsafe { +- Dma::<[u8]>::zeroed_slice(MAX_BUFFER_LEN) +- .unwrap() +- .assume_init() +- }); ++ let buf = unsafe { ++ match Dma::<[u8]>::zeroed_slice(MAX_BUFFER_LEN) { ++ Ok(dma) => dma.assume_init(), ++ Err(err) => { ++ log::error!("virtio-netd: failed to allocate rx buffer: {err}"); ++ continue; ++ } ++ } ++ }; ++ rx_buffers.push(buf); + + let chain = ChainBuilder::new() + .chain(Buffer::new_unsized(&rx_buffers[i]).flags(DescriptorFlags::WRITE_ONLY)) diff --git a/local/patches/base/P2-pcid-cfg-access.patch b/local/patches/base/absorbed/P2-pcid-cfg-access.patch similarity index 100% rename from local/patches/base/P2-pcid-cfg-access.patch rename to local/patches/base/absorbed/P2-pcid-cfg-access.patch diff --git a/local/patches/base/P2-pcid-driver-interface.patch b/local/patches/base/absorbed/P2-pcid-driver-interface.patch similarity index 100% rename from local/patches/base/P2-pcid-driver-interface.patch rename to local/patches/base/absorbed/P2-pcid-driver-interface.patch diff --git a/local/patches/base/P2-ps2d-improvements.patch b/local/patches/base/absorbed/P2-ps2d-improvements.patch similarity index 100% rename from local/patches/base/P2-ps2d-improvements.patch rename to local/patches/base/absorbed/P2-ps2d-improvements.patch diff --git a/local/patches/base/P2-storage-driver-mains.patch b/local/patches/base/absorbed/P2-storage-driver-mains.patch similarity index 100% rename from local/patches/base/P2-storage-driver-mains.patch rename to local/patches/base/absorbed/P2-storage-driver-mains.patch diff --git a/local/patches/base/absorbed/P2-storage-error-handling.patch b/local/patches/base/absorbed/P2-storage-error-handling.patch new file mode 100644 index 00000000..3e715a0a --- /dev/null +++ b/local/patches/base/absorbed/P2-storage-error-handling.patch @@ -0,0 +1,601 @@ +# P2-storage-error-handling.patch +# +# Storage driver error handling: replace unwrap()/expect()/panic!() with proper +# error propagation and graceful exits across AHCI, IDE, NVMe, and VirtIO block drivers. +# +# Covers: +# - ahcid/disk_ata.rs: replace unreachable!() with EIO error +# - ahcid/disk_atapi.rs: replace unreachable!() with EBADF error +# - ahcid/hba.rs: DMA allocation error handling +# - ided/ide.rs: assert→debug_assert, try_into error handling +# - nvmed/executor.rs: executor initialization error handling +# - nvmed/identify.rs: DMA allocation, unreachable!() fallback +# - nvmed/mod.rs: assert→debug_assert, unwrap→proper error/exit +# - nvmed/queues.rs: unreachable!()→safe fallback +# - virtio-blkd/scheme.rs: DMA allocation error handling, assert→if check +# +diff --git a/drivers/storage/ahcid/src/ahci/disk_ata.rs b/drivers/storage/ahcid/src/ahci/disk_ata.rs +index 4f83c51d..7423603b 100644 +--- a/drivers/storage/ahcid/src/ahci/disk_ata.rs ++++ b/drivers/storage/ahcid/src/ahci/disk_ata.rs +@@ -1,7 +1,7 @@ + use std::convert::TryInto; + use std::ptr; + +-use syscall::error::Result; ++use syscall::error::{Error, Result, EIO}; + + use common::dma::Dma; + +@@ -39,7 +39,7 @@ impl DiskATA { + .map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() })) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()); ++ .map_err(|_| Error::new(EIO))?; + + let mut fb = unsafe { Dma::zeroed()?.assume_init() }; + let buf = unsafe { Dma::zeroed()?.assume_init() }; +diff --git a/drivers/storage/ahcid/src/ahci/disk_atapi.rs b/drivers/storage/ahcid/src/ahci/disk_atapi.rs +index a0e75c09..8fbdfbef 100644 +--- a/drivers/storage/ahcid/src/ahci/disk_atapi.rs ++++ b/drivers/storage/ahcid/src/ahci/disk_atapi.rs +@@ -37,7 +37,7 @@ impl DiskATAPI { + .map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() })) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()); ++ .map_err(|_| Error::new(EBADF))?; + + let mut fb = unsafe { Dma::zeroed()?.assume_init() }; + let mut buf = unsafe { Dma::zeroed()?.assume_init() }; +diff --git a/drivers/storage/ahcid/src/ahci/hba.rs b/drivers/storage/ahcid/src/ahci/hba.rs +index bea8792c..11a3d4ae 100644 +--- a/drivers/storage/ahcid/src/ahci/hba.rs ++++ b/drivers/storage/ahcid/src/ahci/hba.rs +@@ -178,8 +178,11 @@ impl HbaPort { + clb: &mut Dma<[HbaCmdHeader; 32]>, + ctbas: &mut [Dma; 32], + ) -> Result { +- let dest: Dma<[u16; 256]> = Dma::new([0; 256]).unwrap(); ++ let dest: Dma<[u16; 256]> = Dma::new([0; 256]).map_err(|err| { ++ error!("ahcid: failed to allocate DMA buffer: {err}"); ++ Error::new(EIO) ++ })?; + + let slot = self + .ata_start(clb, ctbas, |cmdheader, cmdfis, prdt_entries, _acmd| { + +diff --git a/drivers/storage/ided/src/ide.rs b/drivers/storage/ided/src/ide.rs +index 5faf3250..094e5889 100644 +--- a/drivers/storage/ided/src/ide.rs ++++ b/drivers/storage/ided/src/ide.rs +@@ -184,10 +184,10 @@ impl Disk for AtaDisk { + let block = start_block + (count as u64) / 512; + + //TODO: support other LBA modes +- assert!(block < 0x1_0000_0000_0000); ++ debug_assert!(block < 0x1_0000_0000_0000); + + let sectors = (chunk.len() + 511) / 512; +- assert!(sectors <= 128); ++ debug_assert!(sectors <= 128); + + log::trace!( + "IDE read chan {} dev {} block {:#x} count {:#x}", +@@ -205,7 +205,7 @@ impl Disk for AtaDisk { + // Make PRDT EOT match chunk size + for i in 0..sectors { + chan.prdt[i] = PrdtEntry { +- phys: (chan.buf.physical() + i * 512).try_into().unwrap(), ++ phys: (chan.buf.physical() + i * 512).try_into().map_err(|_| Error::new(EIO))?, + size: 512, + flags: if i + 1 == sectors { + 1 << 15 // End of table +@@ -216,7 +216,7 @@ impl Disk for AtaDisk { + } + // Set PRDT + let prdt = chan.prdt.physical(); +- chan.busmaster_prdt.write(prdt.try_into().unwrap()); ++ chan.busmaster_prdt.write(prdt.try_into().map_err(|_| Error::new(EIO))?); + // Set to read + chan.busmaster_command.writef(1 << 3, true); + // Clear interrupt and error bits +@@ -325,10 +325,10 @@ impl Disk for AtaDisk { + let block = start_block + (count as u64) / 512; + + //TODO: support other LBA modes +- assert!(block < 0x1_0000_0000_0000); ++ debug_assert!(block < 0x1_0000_0000_0000); + + let sectors = (chunk.len() + 511) / 512; +- assert!(sectors <= 128); ++ debug_assert!(sectors <= 128); + + log::trace!( + "IDE write chan {} dev {} block {:#x} count {:#x}", +@@ -346,7 +346,7 @@ impl Disk for AtaDisk { + // Make PRDT EOT match chunk size + for i in 0..sectors { + chan.prdt[i] = PrdtEntry { +- phys: (chan.buf.physical() + i * 512).try_into().unwrap(), ++ phys: (chan.buf.physical() + i * 512).try_into().map_err(|_| Error::new(EIO))?, + size: 512, + flags: if i + 1 == sectors { + 1 << 15 // End of table +@@ -357,8 +357,8 @@ impl Disk for AtaDisk { + } + // Set PRDT + let prdt = chan.prdt.physical(); +- chan.busmaster_prdt.write(prdt.try_into().unwrap()); ++ chan.busmaster_prdt.write(prdt.try_into().map_err(|_| Error::new(EIO))?); + // Set to write + chan.busmaster_command.writef(1 << 3, false); + // Clear interrupt and error bits + +diff --git a/drivers/storage/nvmed/src/nvme/executor.rs b/drivers/storage/nvmed/src/nvme/executor.rs +index 6242fa98..c1435e88 100644 +--- a/drivers/storage/nvmed/src/nvme/executor.rs ++++ b/drivers/storage/nvmed/src/nvme/executor.rs +@@ -34,7 +34,12 @@ impl Hardware for NvmeHw { + &VTABLE + } + fn current() -> std::rc::Rc> { +- THE_EXECUTOR.with(|exec| Rc::clone(exec.borrow().as_ref().unwrap())) ++ THE_EXECUTOR.with(|exec| { ++ Rc::clone(exec.borrow().as_ref().unwrap_or_else(|| { ++ log::error!("nvmed: internal error: executor not initialized"); ++ std::process::exit(1); ++ })) ++ }) + } + fn try_submit( + nvme: &Arc, +diff --git a/drivers/storage/nvmed/src/nvme/identify.rs b/drivers/storage/nvmed/src/nvme/identify.rs +index 05e5b9b2..b1b6e959 100644 +--- a/drivers/storage/nvmed/src/nvme/identify.rs ++++ b/drivers/storage/nvmed/src/nvme/identify.rs +@@ -126,7 +126,7 @@ impl LbaFormat { + 0b01 => RelativePerformance::Better, + 0b10 => RelativePerformance::Good, + 0b11 => RelativePerformance::Degraded, +- _ => unreachable!(), ++ _ => RelativePerformance::Degraded, + } + } + pub fn is_available(&self) -> bool { +@@ -153,7 +153,14 @@ impl Nvme { + /// Returns the serial number, model, and firmware, in that order. + pub async fn identify_controller(&self) { + // TODO: Use same buffer +- let data: Dma = unsafe { Dma::zeroed().unwrap().assume_init() }; ++ let data: Dma = unsafe { ++ Dma::zeroed() ++ .map(|dma| dma.assume_init()) ++ .unwrap_or_else(|err| { ++ log::error!("nvmed: failed to allocate identify DMA: {err}"); ++ std::process::exit(1); ++ }) ++ }; + + // println!(" - Attempting to identify controller"); + let comp = self +@@ -182,7 +189,14 @@ impl Nvme { + } + pub async fn identify_namespace_list(&self, base: u32) -> Vec { + // TODO: Use buffer +- let data: Dma<[u32; 1024]> = unsafe { Dma::zeroed().unwrap().assume_init() }; ++ let data: Dma<[u32; 1024]> = unsafe { ++ Dma::zeroed() ++ .map(|dma| dma.assume_init()) ++ .unwrap_or_else(|err| { ++ log::error!("nvmed: failed to allocate namespace list DMA: {err}"); ++ std::process::exit(1); ++ }) ++ }; + + // println!(" - Attempting to retrieve namespace ID list"); + let comp = self +@@ -198,7 +212,14 @@ impl Nvme { + } + pub async fn identify_namespace(&self, nsid: u32) -> NvmeNamespace { + //TODO: Use buffer +- let data: Dma = unsafe { Dma::zeroed().unwrap().assume_init() }; ++ let data: Dma = unsafe { ++ Dma::zeroed() ++ .map(|dma| dma.assume_init()) ++ .unwrap_or_else(|err| { ++ log::error!("nvmed: failed to allocate namespace DMA: {err}"); ++ std::process::exit(1); ++ }) ++ }; + + log::debug!("Attempting to identify namespace {nsid}"); + let comp = self +@@ -216,7 +237,10 @@ impl Nvme { + let block_size = data + .formatted_lba_size() + .lba_data_size() +- .expect("nvmed: error: size outside 512-2^64 range"); ++ .unwrap_or_else(|| { ++ log::error!("nvmed: error: size outside 512-2^64 range"); ++ std::process::exit(1); ++ }); + log::debug!("NVME block size: {}", block_size); + + NvmeNamespace { +diff --git a/drivers/storage/nvmed/src/nvme/mod.rs b/drivers/storage/nvmed/src/nvme/mod.rs +index 682ee933..90a25d5b 100644 +--- a/drivers/storage/nvmed/src/nvme/mod.rs ++++ b/drivers/storage/nvmed/src/nvme/mod.rs +@@ -160,7 +160,15 @@ impl Nvme { + } + fn cur_thread_ctxt(&self) -> Arc> { + // TODO: multi-threading +- Arc::clone(self.thread_ctxts.read().get(&0).unwrap()) ++ Arc::clone( ++ self.thread_ctxts ++ .read() ++ .get(&0) ++ .unwrap_or_else(|| { ++ log::error!("nvmed: internal error: thread context 0 missing"); ++ std::process::exit(1); ++ }), ++ ) + } + + pub unsafe fn submission_queue_tail(&self, qid: u16, tail: u16) { +@@ -208,10 +216,22 @@ impl Nvme { + } + + for (qid, iv) in self.cq_ivs.get_mut().iter_mut() { +- let ctxt = thread_ctxts.get(&0).unwrap().lock(); ++ let ctxt = match thread_ctxts.get(&0) { ++ Some(c) => c.lock(), ++ None => { ++ log::error!("nvmed: internal error: thread context 0 missing"); ++ return Err(Error::new(EIO)); ++ } ++ }; + let queues = ctxt.queues.borrow(); + +- let &(ref cq, ref sq) = queues.get(qid).unwrap(); ++ let (cq, sq) = match queues.get(qid) { ++ Some(pair) => pair, ++ None => { ++ log::error!("nvmed: internal error: queue {qid} missing"); ++ return Err(Error::new(EIO)); ++ } ++ }; + log::debug!( + "iv {iv} [cq {qid}: {:X}, {}] [sq {qid}: {:X}, {}]", + cq.data.physical(), +@@ -222,7 +242,13 @@ impl Nvme { + } + + { +- let main_ctxt = thread_ctxts.get(&0).unwrap().lock(); ++ let main_ctxt = match thread_ctxts.get(&0) { ++ Some(c) => c.lock(), ++ None => { ++ log::error!("nvmed: internal error: thread context 0 missing"); ++ return Err(Error::new(EIO)); ++ } ++ }; + + for (i, prp) in main_ctxt.buffer_prp.borrow_mut().iter_mut().enumerate() { + *prp = (main_ctxt.buffer.borrow_mut().physical() + i * 4096) as u64; +@@ -231,7 +257,13 @@ impl Nvme { + let regs = self.regs.get_mut(); + + let mut queues = main_ctxt.queues.borrow_mut(); +- let (asq, acq) = queues.get_mut(&0).unwrap(); ++ let (asq, acq) = match queues.get_mut(&0) { ++ Some(pair) => pair, ++ None => { ++ log::error!("nvmed: internal error: admin queue pair missing"); ++ return Err(Error::new(EIO)); ++ } ++ }; + regs.aqa + .write(((acq.data.len() as u32 - 1) << 16) | (asq.data.len() as u32 - 1)); + regs.asq_low.write(asq.data.physical() as u32); +@@ -281,14 +313,14 @@ impl Nvme { + let vector = vector as u8; + + if masked { +- assert_ne!( ++ debug_assert_ne!( + to_clear & (1 << vector), + (1 << vector), + "nvmed: internal error: cannot both mask and set" + ); + to_mask |= 1 << vector; + } else { +- assert_ne!( ++ debug_assert_ne!( + to_mask & (1 << vector), + (1 << vector), + "nvmed: internal error: cannot both mask and set" +@@ -326,22 +358,27 @@ impl Nvme { + cmd_init: impl FnOnce(CmdId) -> NvmeCmd, + fail: impl FnOnce(), + ) -> Option<(CqId, CmdId)> { +- match ctxt.queues.borrow_mut().get_mut(&sq_id).unwrap() { +- (sq, _cq) => { +- if sq.is_full() { +- fail(); +- return None; +- } +- let cmd_id = sq.tail; +- let tail = sq.submit_unchecked(cmd_init(cmd_id)); +- +- // TODO: Submit in bulk +- unsafe { +- self.submission_queue_tail(sq_id, tail); +- } +- Some((sq_id, cmd_id)) ++ let mut queues_ref = ctxt.queues.borrow_mut(); ++ let (sq, _cq) = match queues_ref.get_mut(&sq_id) { ++ Some(pair) => pair, ++ None => { ++ log::error!("nvmed: internal error: submission queue {sq_id} missing"); ++ fail(); ++ return None; + } ++ }; ++ if sq.is_full() { ++ fail(); ++ return None; ++ } ++ let cmd_id = sq.tail; ++ let tail = sq.submit_unchecked(cmd_init(cmd_id)); ++ ++ // TODO: Submit in bulk ++ unsafe { ++ self.submission_queue_tail(sq_id, tail); + } ++ Some((sq_id, cmd_id)) + } + + pub async fn create_io_completion_queue( +@@ -349,13 +386,19 @@ impl Nvme { + io_cq_id: CqId, + vector: Option, + ) -> NvmeCompQueue { +- let queue = NvmeCompQueue::new().expect("nvmed: failed to allocate I/O completion queue"); +- +- let len = u16::try_from(queue.data.len()) +- .expect("nvmed: internal error: I/O CQ longer than 2^16 entries"); +- let raw_len = len +- .checked_sub(1) +- .expect("nvmed: internal error: CQID 0 for I/O CQ"); ++ let queue = NvmeCompQueue::new().unwrap_or_else(|err| { ++ log::error!("nvmed: failed to allocate I/O completion queue: {err}"); ++ std::process::exit(1); ++ }); ++ ++ let len = u16::try_from(queue.data.len()).unwrap_or_else(|_| { ++ log::error!("nvmed: internal error: I/O CQ longer than 2^16 entries"); ++ std::process::exit(1); ++ }); ++ let raw_len = len.checked_sub(1).unwrap_or_else(|| { ++ log::error!("nvmed: internal error: CQID 0 for I/O CQ"); ++ std::process::exit(1); ++ }); + + let comp = self + .submit_and_complete_admin_command(|cid| { +@@ -370,22 +413,28 @@ impl Nvme { + .await; + + /*match comp.status.specific { +- 1 => panic!("invalid queue identifier"), +- 2 => panic!("invalid queue size"), +- 8 => panic!("invalid interrupt vector"), ++ 1 => { log::error!("nvmed: invalid queue identifier"); std::process::exit(1); } ++ 2 => { log::error!("nvmed: invalid queue size"); std::process::exit(1); } ++ 8 => { log::error!("nvmed: invalid interrupt vector"); std::process::exit(1); } + _ => (), + }*/ + + queue + } + pub async fn create_io_submission_queue(&self, io_sq_id: SqId, io_cq_id: CqId) -> NvmeCmdQueue { +- let q = NvmeCmdQueue::new().expect("failed to create submission queue"); +- +- let len = u16::try_from(q.data.len()) +- .expect("nvmed: internal error: I/O SQ longer than 2^16 entries"); +- let raw_len = len +- .checked_sub(1) +- .expect("nvmed: internal error: SQID 0 for I/O SQ"); ++ let q = NvmeCmdQueue::new().unwrap_or_else(|err| { ++ log::error!("nvmed: failed to create submission queue: {err}"); ++ std::process::exit(1); ++ }); ++ ++ let len = u16::try_from(q.data.len()).unwrap_or_else(|_| { ++ log::error!("nvmed: internal error: I/O SQ longer than 2^16 entries"); ++ std::process::exit(1); ++ }); ++ let raw_len = len.checked_sub(1).unwrap_or_else(|| { ++ log::error!("nvmed: internal error: SQID 0 for I/O SQ"); ++ std::process::exit(1); ++ }); + + let comp = self + .submit_and_complete_admin_command(|cid| { +@@ -399,9 +448,9 @@ impl Nvme { + }) + .await; + /*match comp.status.specific { +- 0 => panic!("completion queue invalid"), +- 1 => panic!("invalid queue identifier"), +- 2 => panic!("invalid queue size"), ++ 0 => { log::error!("nvmed: completion queue invalid"); std::process::exit(1); } ++ 1 => { log::error!("nvmed: invalid queue identifier"); std::process::exit(1); } ++ 2 => { log::error!("nvmed: invalid queue size"); std::process::exit(1); } + _ => (), + }*/ + +@@ -431,7 +480,10 @@ impl Nvme { + self.thread_ctxts + .read() + .get(&0) +- .unwrap() ++ .unwrap_or_else(|| { ++ log::error!("nvmed: internal error: thread context 0 missing"); ++ std::process::exit(1); ++ }) + .lock() + .queues + .borrow_mut() +@@ -497,8 +549,8 @@ impl Nvme { + for chunk in buf.chunks_mut(/* TODO: buf len */ 8192) { + let blocks = (chunk.len() + block_size - 1) / block_size; + +- assert!(blocks > 0); +- assert!(blocks <= 0x1_0000); ++ debug_assert!(blocks > 0); ++ debug_assert!(blocks <= 0x1_0000); + + self.namespace_rw(&*ctxt, namespace, lba, (blocks - 1) as u16, false) + .await?; +@@ -525,8 +577,8 @@ impl Nvme { + for chunk in buf.chunks(/* TODO: buf len */ 8192) { + let blocks = (chunk.len() + block_size - 1) / block_size; + +- assert!(blocks > 0); +- assert!(blocks <= 0x1_0000); ++ debug_assert!(blocks > 0); ++ debug_assert!(blocks <= 0x1_0000); + + ctxt.buffer.borrow_mut()[..chunk.len()].copy_from_slice(chunk); + +diff --git a/drivers/storage/nvmed/src/nvme/queues.rs b/drivers/storage/nvmed/src/nvme/queues.rs +index a3712aeb..438c905c 100644 +--- a/drivers/storage/nvmed/src/nvme/queues.rs ++++ b/drivers/storage/nvmed/src/nvme/queues.rs +@@ -145,8 +145,8 @@ impl Status { + 3 => Self::PathRelatedStatus(code), + 4..=6 => Self::Rsvd(code), + 7 => Self::Vendor(code), +- _ => unreachable!(), ++ _ => Self::Vendor(code), + } + } + } + +diff --git a/drivers/storage/virtio-blkd/src/scheme.rs b/drivers/storage/virtio-blkd/src/scheme.rs +index ec4ecf73..39fb24a8 100644 +--- a/drivers/storage/virtio-blkd/src/scheme.rs ++++ b/drivers/storage/virtio-blkd/src/scheme.rs +@@ -15,19 +15,34 @@ trait BlkExtension { + + impl BlkExtension for Queue<'_> { + async fn read(&self, block: u64, target: &mut [u8]) -> usize { +- let req = Dma::new(BlockVirtRequest { ++ let req = match Dma::new(BlockVirtRequest { + ty: BlockRequestTy::In, + reserved: 0, + sector: block, +- }) +- .unwrap(); ++ }) { ++ Ok(req) => req, ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate read request DMA: {err}"); ++ return 0; ++ } ++ }; + + let result = unsafe { +- Dma::<[u8]>::zeroed_slice(target.len()) +- .unwrap() +- .assume_init() ++ match Dma::<[u8]>::zeroed_slice(target.len()) { ++ Ok(dma) => dma.assume_init(), ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate read buffer DMA: {err}"); ++ return 0; ++ } ++ } ++ }; ++ let status = match Dma::new(u8::MAX) { ++ Ok(s) => s, ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate read status DMA: {err}"); ++ return 0; ++ } + }; +- let status = Dma::new(u8::MAX).unwrap(); + + let chain = ChainBuilder::new() + .chain(Buffer::new(&req)) +@@ -37,28 +52,46 @@ impl BlkExtension for Queue<'_> { + + // XXX: Subtract 1 because the of status byte. + let written = self.send(chain).await as usize - 1; +- assert_eq!(*status, 0); ++ if *status != 0 { ++ log::error!("virtio-blkd: read failed with status {}", *status); ++ return 0; ++ } + + target[..written].copy_from_slice(&result); + written + } + + async fn write(&self, block: u64, target: &[u8]) -> usize { +- let req = Dma::new(BlockVirtRequest { ++ let req = match Dma::new(BlockVirtRequest { + ty: BlockRequestTy::Out, + reserved: 0, + sector: block, +- }) +- .unwrap(); ++ }) { ++ Ok(req) => req, ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate write request DMA: {err}"); ++ return 0; ++ } ++ }; + + let mut result = unsafe { +- Dma::<[u8]>::zeroed_slice(target.len()) +- .unwrap() +- .assume_init() ++ match Dma::<[u8]>::zeroed_slice(target.len()) { ++ Ok(dma) => dma.assume_init(), ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate write buffer DMA: {err}"); ++ return 0; ++ } ++ } + }; + result.copy_from_slice(target.as_ref()); + +- let status = Dma::new(u8::MAX).unwrap(); ++ let status = match Dma::new(u8::MAX) { ++ Ok(s) => s, ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate write status DMA: {err}"); ++ return 0; ++ } ++ }; + + let chain = ChainBuilder::new() + .chain(Buffer::new(&req)) +@@ -67,7 +100,10 @@ impl BlkExtension for Queue<'_> { + .build(); + + self.send(chain).await as usize; +- assert_eq!(*status, 0); ++ if *status != 0 { ++ log::error!("virtio-blkd: write failed with status {}", *status); ++ return 0; ++ } + + target.len() + } diff --git a/local/patches/base/absorbed/P2-usb-pm-and-drivers.patch b/local/patches/base/absorbed/P2-usb-pm-and-drivers.patch new file mode 100644 index 00000000..3fd30d2f --- /dev/null +++ b/local/patches/base/absorbed/P2-usb-pm-and-drivers.patch @@ -0,0 +1,158 @@ +# P2-usb-pm-and-drivers.patch +# +# USB power management and driver interface improvements: +# suspend/resume commands, SCSI driver enablement, PortPmState type, +# IRQ reactor staged port state fallback. +# +# Covers: +# - usbctl/main.rs: pm-state, suspend, resume subcommands +# - xhcid/drivers.toml: enable SCSI over USB driver (was commented out) +# - xhcid/driver_interface.rs: PortPmState enum, suspend/resume/port_pm_state methods +# - xhcid/irq_reactor.rs: staged_port_states fallback in with_ring/with_ring_mut +# +diff --git a/drivers/usb/usbctl/src/main.rs b/drivers/usb/usbctl/src/main.rs +index 9b5773d9..232f7cfc 100644 +--- a/drivers/usb/usbctl/src/main.rs ++++ b/drivers/usb/usbctl/src/main.rs +@@ -15,6 +15,9 @@ fn main() { + Command::new("port") + .arg(Arg::new("PORT").num_args(1).required(true)) + .subcommand(Command::new("status")) ++ .subcommand(Command::new("pm-state")) ++ .subcommand(Command::new("suspend")) ++ .subcommand(Command::new("resume")) + .subcommand( + Command::new("endpoint") + .arg(Arg::new("ENDPOINT_NUM").num_args(1).required(true)) +@@ -38,7 +41,16 @@ fn main() { + if let Some(_status_scmd_matches) = port_scmd_matches.subcommand_matches("status") { + let state = handle.port_state().expect("Failed to get port state"); + println!("{}", state.as_str()); ++ } else if let Some(_pm_state_scmd_matches) = port_scmd_matches.subcommand_matches("pm-state") { ++ let state = handle ++ .port_pm_state() ++ .expect("Failed to get port power-management state"); ++ println!("{}", state.as_str()); ++ } else if let Some(_suspend_scmd_matches) = port_scmd_matches.subcommand_matches("suspend") { ++ handle.suspend_device().expect("Failed to suspend device"); ++ } else if let Some(_resume_scmd_matches) = port_scmd_matches.subcommand_matches("resume") { ++ handle.resume_device().expect("Failed to resume device"); + } else if let Some(endp_scmd_matches) = port_scmd_matches.subcommand_matches("endpoint") { + let endp_num = endp_scmd_matches + + .get_one::("ENDPOINT_NUM") +diff --git a/drivers/usb/xhcid/drivers.toml b/drivers/usb/xhcid/drivers.toml +index 83c90e23..470ec063 100644 +--- a/drivers/usb/xhcid/drivers.toml ++++ b/drivers/usb/xhcid/drivers.toml +@@ -1,10 +1,9 @@ +-#TODO: causes XHCI errors +-#[[drivers]] +-#name = "SCSI over USB" +-#class = 8 # Mass Storage class +-#subclass = 6 # SCSI transparent command set +-#command = ["usbscsid", "$SCHEME", "$PORT", "$IF_PROTO"] ++[[drivers]] ++name = "SCSI over USB" ++class = 8 # Mass Storage class ++subclass = 6 # SCSI transparent command set ++command = ["usbscsid", "$SCHEME", "$PORT", "$IF_PROTO"] + + [[drivers]] + name = "USB HUB" + +diff --git a/drivers/usb/xhcid/src/driver_interface.rs b/drivers/usb/xhcid/src/driver_interface.rs +index 727f8d7e..82f839ae 100644 +--- a/drivers/usb/xhcid/src/driver_interface.rs ++++ b/drivers/usb/xhcid/src/driver_interface.rs +@@ -444,6 +444,33 @@ impl str::FromStr for PortState { + } + } + ++#[repr(u8)] ++#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] ++pub enum PortPmState { ++ Active, ++ Suspended, ++} ++impl PortPmState { ++ pub fn as_str(&self) -> &'static str { ++ match self { ++ Self::Active => "active", ++ Self::Suspended => "suspended", ++ } ++ } ++} ++ ++impl str::FromStr for PortPmState { ++ type Err = Invalid; ++ ++ fn from_str(s: &str) -> result::Result { ++ Ok(match s { ++ "active" => Self::Active, ++ "suspended" => Self::Suspended, ++ _ => return Err(Invalid("read reserved port PM state")), ++ }) ++ } ++} ++ + #[repr(u8)] + #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] + pub enum EndpointStatus { +@@ -560,6 +587,16 @@ impl XhciClientHandle { + let _bytes_written = file.write(&[])?; + Ok(()) + } ++ pub fn suspend_device(&self) -> result::Result<(), XhciClientHandleError> { ++ let file = self.fd.openat("suspend", libredox::flag::O_WRONLY, 0)?; ++ let _bytes_written = file.write(&[])?; ++ Ok(()) ++ } ++ pub fn resume_device(&self) -> result::Result<(), XhciClientHandleError> { ++ let file = self.fd.openat("resume", libredox::flag::O_WRONLY, 0)?; ++ let _bytes_written = file.write(&[])?; ++ Ok(()) ++ } + pub fn get_standard_descs(&self) -> result::Result { + let json = self.read("descriptors")?; + Ok(serde_json::from_slice(&json)?) +@@ -582,7 +619,11 @@ impl XhciClientHandle { + let string = self.read_to_string("state")?; + Ok(string.parse()?) + } ++ pub fn port_pm_state(&self) -> result::Result { ++ let string = self.read_to_string("pm_state")?; ++ Ok(string.parse()?) ++ } + pub fn open_endpoint_ctl(&self, num: u8) -> result::Result { + let path = format!("endpoints/{}/ctl", num); + let fd = self.fd.openat(&path, libredox::flag::O_RDWR, 0)?; + +diff --git a/drivers/usb/xhcid/src/xhci/irq_reactor.rs b/drivers/usb/xhcid/src/xhci/irq_reactor.rs +index ac492d5b..310fe51f 100644 +--- a/drivers/usb/xhcid/src/xhci/irq_reactor.rs ++++ b/drivers/usb/xhcid/src/xhci/irq_reactor.rs +@@ -633,7 +633,10 @@ impl Xhci { + pub fn with_ring T>(&self, id: RingId, function: F) -> Option { + use super::RingOrStreams; + +- let slot_state = self.port_states.get(&id.port)?; ++ let slot_state = self ++ .port_states ++ .get(&id.port) ++ .or_else(|| self.staged_port_states.get(&id.port))?; + let endpoint_state = slot_state.endpoint_states.get(&id.endpoint_num)?; + + let ring_ref = match endpoint_state.transfer { +@@ -650,7 +653,10 @@ impl Xhci { + ) -> Option { + use super::RingOrStreams; + +- let mut slot_state = self.port_states.get_mut(&id.port)?; ++ let mut slot_state = self ++ .port_states ++ .get_mut(&id.port) ++ .or_else(|| self.staged_port_states.get_mut(&id.port))?; + let mut endpoint_state = slot_state.endpoint_states.get_mut(&id.endpoint_num)?; + + let ring_ref = match endpoint_state.transfer { diff --git a/local/patches/base/P2-virtio-core-vbox.patch b/local/patches/base/absorbed/P2-virtio-core-vbox.patch similarity index 100% rename from local/patches/base/P2-virtio-core-vbox.patch rename to local/patches/base/absorbed/P2-virtio-core-vbox.patch diff --git a/local/patches/base/P2-xhcid-remaining.patch b/local/patches/base/absorbed/P2-xhcid-remaining.patch similarity index 100% rename from local/patches/base/P2-xhcid-remaining.patch rename to local/patches/base/absorbed/P2-xhcid-remaining.patch diff --git a/local/patches/base/P3-acpi-power-dmi.patch b/local/patches/base/absorbed/P3-acpi-power-dmi.patch similarity index 100% rename from local/patches/base/P3-acpi-power-dmi.patch rename to local/patches/base/absorbed/P3-acpi-power-dmi.patch diff --git a/local/patches/base/P3-acpi-wave12-hardening.patch b/local/patches/base/absorbed/P3-acpi-wave12-hardening.patch similarity index 100% rename from local/patches/base/P3-acpi-wave12-hardening.patch rename to local/patches/base/absorbed/P3-acpi-wave12-hardening.patch diff --git a/local/patches/base/absorbed/P3-pcid-aer-scheme.patch b/local/patches/base/absorbed/P3-pcid-aer-scheme.patch new file mode 100644 index 00000000..9e382518 --- /dev/null +++ b/local/patches/base/absorbed/P3-pcid-aer-scheme.patch @@ -0,0 +1,398 @@ +--- a/drivers/pcid/src/cfg_access/mod.rs ++++ b/drivers/pcid/src/cfg_access/mod.rs +@@ -349,7 +349,11 @@ + let bus_addr = self.bus_addr(address.segment(), address.bus())?; + Some(unsafe { bus_addr.add(Self::bus_addr_offset_in_dwords(address, offset)) }) + } ++ ++ pub fn has_extended_config(&self, address: PciAddress) -> bool { ++ self.mmio_addr(address, 0x100).is_some() ++ } + } + + impl ConfigRegionAccess for Pcie { +--- a/drivers/pcid/src/scheme.rs ++++ b/drivers/pcid/src/scheme.rs +@@ -5,12 +5,61 @@ + use redox_scheme::{CallerCtx, OpenResult}; + use scheme_utils::HandleMap; + use syscall::dirent::{DirEntry, DirentBuf, DirentKind}; +-use syscall::error::{Error, Result, EACCES, EBADF, EINVAL, EIO, EISDIR, ENOENT, ENOTDIR, EALREADY}; ++use syscall::error::{ ++ Error, Result, EACCES, EALREADY, EBADF, EINVAL, EIO, EISDIR, ENOENT, ENOTDIR, EROFS, ++}; + use syscall::flag::{MODE_CHR, MODE_DIR, O_DIRECTORY, O_STAT}; + use syscall::schemev2::NewFdFlags; + use syscall::ENOLCK; + + use crate::cfg_access::Pcie; ++ ++const PCIE_EXTENDED_CAPABILITY_AER: u16 = 0x0001; ++ ++#[derive(Clone, Copy)] ++enum AerRegisterName { ++ UncorStatus, ++ UncorMask, ++ UncorSeverity, ++ CorStatus, ++ CorMask, ++ Cap, ++ HeaderLog, ++} ++ ++impl AerRegisterName { ++ fn from_path(path: &str) -> Option { ++ Some(match path { ++ "uncor_status" => Self::UncorStatus, ++ "uncor_mask" => Self::UncorMask, ++ "uncor_severity" => Self::UncorSeverity, ++ "cor_status" => Self::CorStatus, ++ "cor_mask" => Self::CorMask, ++ "cap" => Self::Cap, ++ "header_log" => Self::HeaderLog, ++ _ => return None, ++ }) ++ } ++ ++ const fn offset(self) -> u16 { ++ match self { ++ Self::UncorStatus => 0x00, ++ Self::UncorMask => 0x04, ++ Self::UncorSeverity => 0x08, ++ Self::CorStatus => 0x0C, ++ Self::CorMask => 0x10, ++ Self::Cap => 0x14, ++ Self::HeaderLog => 0x18, ++ } ++ } ++ ++ const fn len(self) -> usize { ++ match self { ++ Self::HeaderLog => 16, ++ _ => 4, ++ } ++ } ++} + + pub struct PciScheme { + handles: HandleMap, +@@ -20,13 +69,27 @@ + binds: HashMap, + } + enum Handle { +- TopLevel { entries: Vec }, ++ TopLevel { ++ entries: Vec, ++ }, + Access, +- Device, +- Channel { addr: PciAddress, st: ChannelState }, ++ Device { ++ addr: PciAddress, ++ }, ++ Channel { ++ addr: PciAddress, ++ st: ChannelState, ++ }, + SchemeRoot, + /// Represents an open handle to a device's bind endpoint +- Bind { addr: PciAddress }, ++ Bind { ++ addr: PciAddress, ++ }, ++ AerDir, ++ Aer { ++ addr: PciAddress, ++ register: AerRegisterName, ++ }, + /// Uevent surface for hotplug consumers. Opening uevent returns an object + /// from which device add/remove events can be read. Since pcid currently + /// only scans at startup, this surface is ready for hotplug polling consumers. +@@ -38,13 +101,23 @@ + } + impl Handle { + fn is_file(&self) -> bool { +- matches!(self, Self::Access | Self::Channel { .. } | Self::Bind { .. } | Self::Uevent) ++ matches!( ++ self, ++ Self::Access ++ | Self::Channel { .. } ++ | Self::Bind { .. } ++ | Self::Aer { .. } ++ | Self::Uevent ++ ) + } + fn is_dir(&self) -> bool { + !self.is_file() + } + fn requires_root(&self) -> bool { +- matches!(self, Self::Access | Self::Channel { .. } | Self::Bind { .. }) ++ matches!( ++ self, ++ Self::Access | Self::Channel { .. } | Self::Bind { .. } ++ ) + } + fn is_scheme_root(&self) -> bool { + matches!(self, Self::SchemeRoot) +@@ -57,6 +130,16 @@ + } + + const DEVICE_CONTENTS: &[&str] = &["channel", "bind"]; ++const DEVICE_AER_CONTENTS: &[&str] = &["channel", "bind", "aer"]; ++const AER_CONTENTS: &[&str] = &[ ++ "uncor_status", ++ "uncor_mask", ++ "uncor_severity", ++ "cor_status", ++ "cor_mask", ++ "cap", ++ "header_log", ++]; + + impl PciScheme { + pub fn access(&mut self) -> usize { +@@ -141,7 +224,12 @@ + + let (len, mode) = match handle.inner { + Handle::TopLevel { ref entries } => (entries.len(), MODE_DIR | 0o755), +- Handle::Device => (DEVICE_CONTENTS.len(), MODE_DIR | 0o755), ++ Handle::Device { addr } => ( ++ Self::device_entries(&self.pcie, addr).len(), ++ MODE_DIR | 0o755, ++ ), ++ Handle::AerDir => (AER_CONTENTS.len(), MODE_DIR | 0o755), ++ Handle::Aer { register, .. } => (register.len(), MODE_CHR | 0o444), + Handle::Access | Handle::Channel { .. } | Handle::Bind { .. } => (0, MODE_CHR | 0o600), + Handle::Uevent => (0, MODE_CHR | 0o644), + Handle::SchemeRoot => return Err(Error::new(EBADF)), +@@ -154,7 +242,7 @@ + &mut self, + id: usize, + buf: &mut [u8], +- _offset: u64, ++ offset: u64, + _fcntl_flags: u32, + _ctx: &CallerCtx, + ) -> Result { +@@ -166,11 +254,14 @@ + + match handle.inner { + Handle::TopLevel { .. } => Err(Error::new(EISDIR)), +- Handle::Device => Err(Error::new(EISDIR)), ++ Handle::Device { .. } | Handle::AerDir => Err(Error::new(EISDIR)), + Handle::Channel { + addr: _, + ref mut st, + } => Self::read_channel(st, buf), ++ Handle::Aer { addr, register } => { ++ Self::read_aer_register(&self.pcie, addr, register, buf, offset) ++ } + Handle::Uevent => { + // Uevent surface is ready for hotplug polling consumers. + // pcid currently only scans at startup, so return empty (EAGAIN would indicate no data available). +@@ -209,8 +300,15 @@ + } + return Ok(buf); + } +- Handle::Device => DEVICE_CONTENTS, +- Handle::Access | Handle::Channel { .. } | Handle::Bind { .. } | Handle::Uevent => return Err(Error::new(ENOTDIR)), ++ Handle::Device { addr } => Self::device_entries(&self.pcie, addr), ++ Handle::AerDir => AER_CONTENTS, ++ Handle::Access ++ | Handle::Channel { .. } ++ | Handle::Bind { .. } ++ | Handle::Aer { .. } ++ | Handle::Uevent => { ++ return Err(Error::new(ENOTDIR)); ++ } + Handle::SchemeRoot => return Err(Error::new(EBADF)), + }; + +@@ -243,6 +341,7 @@ + Handle::Channel { addr, ref mut st } => { + Self::write_channel(&self.pcie, &mut self.tree, addr, st, buf) + } ++ Handle::Aer { .. } => Err(Error::new(EROFS)), + + _ => Err(Error::new(EBADF)), + } +@@ -357,45 +456,151 @@ + binds: HashMap::new(), + } + } +- fn parse_after_pci_addr(&mut self, addr: PciAddress, after: &str, ctx: &CallerCtx) -> Result { ++ fn device_entries(pcie: &Pcie, addr: PciAddress) -> &'static [&'static str] { ++ if Self::find_pcie_extended_capability(pcie, addr, PCIE_EXTENDED_CAPABILITY_AER).is_some() { ++ DEVICE_AER_CONTENTS ++ } else { ++ DEVICE_CONTENTS ++ } ++ } ++ fn find_pcie_extended_capability( ++ pcie: &Pcie, ++ addr: PciAddress, ++ capability_id: u16, ++ ) -> Option { ++ if !pcie.has_extended_config(addr) { ++ return None; ++ } ++ ++ let mut offset = 0x100_u16; ++ ++ while offset <= 0xFFC { ++ let header = unsafe { pcie.read(addr, offset) }; ++ if header == 0 || header == u32::MAX { ++ return None; ++ } ++ ++ if (header & 0xFFFF) as u16 == capability_id { ++ return Some(offset); ++ } ++ ++ let next = ((header >> 20) & 0xFFF) as u16; ++ if next < 0x100 || next <= offset || next > 0xFFC || next % 4 != 0 { ++ return None; ++ } ++ offset = next; ++ } ++ ++ None ++ } ++ fn read_file_bytes(data: &[u8], buf: &mut [u8], offset: u64) -> Result { ++ let Ok(offset) = usize::try_from(offset) else { ++ return Ok(0); ++ }; ++ if offset >= data.len() { ++ return Ok(0); ++ } ++ ++ let count = std::cmp::min(buf.len(), data.len() - offset); ++ buf[..count].copy_from_slice(&data[offset..offset + count]); ++ Ok(count) ++ } ++ fn read_aer_register( ++ pcie: &Pcie, ++ addr: PciAddress, ++ register: AerRegisterName, ++ buf: &mut [u8], ++ offset: u64, ++ ) -> Result { ++ let Some(aer_base) = ++ Self::find_pcie_extended_capability(pcie, addr, PCIE_EXTENDED_CAPABILITY_AER) ++ else { ++ return Err(Error::new(ENOENT)); ++ }; ++ ++ let mut data = [0_u8; 16]; ++ for (index, chunk) in data[..register.len()].chunks_exact_mut(4).enumerate() { ++ let index = u16::try_from(index).map_err(|_| Error::new(EIO))?; ++ let value = unsafe { pcie.read(addr, aer_base + register.offset() + index * 4) }; ++ chunk.copy_from_slice(&value.to_le_bytes()); ++ } ++ ++ Self::read_file_bytes(&data[..register.len()], buf, offset) ++ } ++ fn parse_after_pci_addr( ++ &mut self, ++ addr: PciAddress, ++ after: &str, ++ ctx: &CallerCtx, ++ ) -> Result { + if after.chars().next().map_or(false, |c| c != '/') { + return Err(Error::new(ENOENT)); + } + let func = self.tree.get_mut(&addr).ok_or(Error::new(ENOENT))?; + + Ok(if after.is_empty() { +- Handle::Device ++ Handle::Device { addr } + } else { + let path = &after[1..]; + +- match path { +- "channel" => { +- if func.enabled { +- return Err(Error::new(ENOLCK)); ++ if path == "aer" { ++ if Self::find_pcie_extended_capability( ++ &self.pcie, ++ addr, ++ PCIE_EXTENDED_CAPABILITY_AER, ++ ) ++ .is_none() ++ { ++ return Err(Error::new(ENOENT)); ++ } ++ Handle::AerDir ++ } else if let Some(register_name) = path.strip_prefix("aer/") { ++ let register = ++ AerRegisterName::from_path(register_name).ok_or(Error::new(ENOENT))?; ++ if Self::find_pcie_extended_capability( ++ &self.pcie, ++ addr, ++ PCIE_EXTENDED_CAPABILITY_AER, ++ ) ++ .is_none() ++ { ++ return Err(Error::new(ENOENT)); ++ } ++ Handle::Aer { addr, register } ++ } else { ++ match path { ++ "channel" => { ++ if func.enabled { ++ return Err(Error::new(ENOLCK)); ++ } ++ func.inner.legacy_interrupt_line = crate::enable_function( ++ &self.pcie, ++ &mut func.endpoint_header, ++ &mut func.capabilities, ++ ); ++ func.enabled = true; ++ Handle::Channel { ++ addr, ++ st: ChannelState::AwaitingData, ++ } + } +- func.inner.legacy_interrupt_line = crate::enable_function( +- &self.pcie, +- &mut func.endpoint_header, +- &mut func.capabilities, +- ); +- func.enabled = true; +- Handle::Channel { +- addr, +- st: ChannelState::AwaitingData, ++ "bind" => { ++ let addr_str = format!("{}", addr); ++ if let Some(&owner_pid) = self.binds.get(&addr_str) { ++ log::info!( ++ "pcid: device {} already bound by pid {}", ++ addr_str, ++ owner_pid ++ ); ++ return Err(Error::new(EALREADY)); ++ } ++ let caller_pid = u32::try_from(ctx.pid).map_err(|_| Error::new(EINVAL))?; ++ self.binds.insert(addr_str.clone(), caller_pid); ++ log::info!("pcid: device {} bound by pid {}", addr_str, caller_pid); ++ Handle::Bind { addr } + } +- } +- "bind" => { +- let addr_str = format!("{}", addr); +- if let Some(&owner_pid) = self.binds.get(&addr_str) { +- log::info!("pcid: device {} already bound by pid {}", addr_str, owner_pid); +- return Err(Error::new(EALREADY)); +- } +- let caller_pid = ctx.pid; +- self.binds.insert(addr_str.clone(), caller_pid); +- log::info!("pcid: device {} bound by pid {}", addr_str, caller_pid); +- Handle::Bind { addr } +- } +- _ => return Err(Error::new(ENOENT)), ++ _ => return Err(Error::new(ENOENT)), ++ } + } + }) + } diff --git a/local/patches/base/P3-pcid-bind-scheme.patch b/local/patches/base/absorbed/P3-pcid-bind-scheme.patch similarity index 100% rename from local/patches/base/P3-pcid-bind-scheme.patch rename to local/patches/base/absorbed/P3-pcid-bind-scheme.patch diff --git a/local/patches/base/P3-pcid-uevent-format-fix.patch b/local/patches/base/absorbed/P3-pcid-uevent-format-fix.patch similarity index 100% rename from local/patches/base/P3-pcid-uevent-format-fix.patch rename to local/patches/base/absorbed/P3-pcid-uevent-format-fix.patch diff --git a/local/patches/base/P3-xhci-device-hardening.patch b/local/patches/base/absorbed/P3-xhci-device-hardening.patch similarity index 100% rename from local/patches/base/P3-xhci-device-hardening.patch rename to local/patches/base/absorbed/P3-xhci-device-hardening.patch diff --git a/local/patches/base/P5-init-daemon-panic-hardening.patch b/local/patches/base/absorbed/P5-init-daemon-panic-hardening.patch similarity index 100% rename from local/patches/base/P5-init-daemon-panic-hardening.patch rename to local/patches/base/absorbed/P5-init-daemon-panic-hardening.patch diff --git a/local/patches/base/P5-init-supervisor-restart.patch b/local/patches/base/absorbed/P5-init-supervisor-restart.patch similarity index 100% rename from local/patches/base/P5-init-supervisor-restart.patch rename to local/patches/base/absorbed/P5-init-supervisor-restart.patch diff --git a/local/patches/base/redox.patch b/local/patches/base/absorbed/redox.patch similarity index 99% rename from local/patches/base/redox.patch rename to local/patches/base/absorbed/redox.patch index e237fc34..b8835f2f 100644 --- a/local/patches/base/redox.patch +++ b/local/patches/base/absorbed/redox.patch @@ -10625,7 +10625,7 @@ diff --git a/drivers/pcid-spawner/src/main.rs b/drivers/pcid-spawner/src/main.rs index a968f4d4..bfff05c3 100644 --- a/drivers/pcid-spawner/src/main.rs +++ b/drivers/pcid-spawner/src/main.rs -@@ -1,11 +1,40 @@ +@@ -1,11 +1,41 @@ +use std::env; use std::fs; use std::process::Command; @@ -10667,7 +10667,7 @@ index a968f4d4..bfff05c3 100644 fn main() -> Result<()> { let mut args = pico_args::Arguments::from_env(); let initfs = args.contains("--initfs"); -@@ -30,6 +59,12 @@ fn main() -> Result<()> { +@@ -30,12 +59,33 @@ fn main() -> Result<()> { } let config: Config = toml::from_str(&config_data)?; diff --git a/local/patches/base/redox.patch.bak b/local/patches/base/redox.patch.bak index e70d3718..e237fc34 100644 --- a/local/patches/base/redox.patch.bak +++ b/local/patches/base/redox.patch.bak @@ -1,5 +1,5 @@ diff --git a/Cargo.lock b/Cargo.lock -index 9fcbd662..b4ea6b1d 100644 +index 9fcbd662..6aa362f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -31,11 +31,20 @@ dependencies = [ @@ -109,7 +109,13 @@ index 9fcbd662..b4ea6b1d 100644 [[package]] name = "gpt" version = "3.1.0" -@@ -1004,6 +1064,68 @@ dependencies = [ +@@ -999,11 +1059,74 @@ dependencies = [ + "common", + "daemon", + "fdt 0.1.5", ++ "libc", + "libredox", + "log", "ron", ] @@ -178,7 +184,7 @@ index 9fcbd662..b4ea6b1d 100644 [[package]] name = "iana-time-zone" version = "0.1.65" -@@ -1128,6 +1250,58 @@ dependencies = [ +@@ -1128,6 +1251,58 @@ dependencies = [ "scheme-utils", ] @@ -237,7 +243,15 @@ index 9fcbd662..b4ea6b1d 100644 [[package]] name = "ioslice" version = "0.6.0" -@@ -2390,6 +2564,24 @@ version = "1.19.0" +@@ -1174,6 +1349,7 @@ dependencies = [ + "daemon", + "driver-network", + "libredox", ++ "log", + "pcid", + "redox_event", + "redox_syscall 0.7.4", +@@ -2390,6 +2566,24 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" @@ -309,8 +323,28 @@ index 9e776232..36d87870 100644 libc = "0.2.181" log = "0.4" libredox = "0.1.16" +diff --git a/audiod/src/main.rs b/audiod/src/main.rs +index 51b103af..2354cf5f 100644 +--- a/audiod/src/main.rs ++++ b/audiod/src/main.rs +@@ -48,7 +48,14 @@ fn daemon(daemon: SchemeDaemon) -> anyhow::Result<()> { + + let pid = libredox::call::getpid()?; + +- let hw_file = Fd::open("/scheme/audiohw", flag::O_WRONLY | flag::O_CLOEXEC, 0)?; ++ let hw_file = match Fd::open("/scheme/audiohw", flag::O_WRONLY | flag::O_CLOEXEC, 0) { ++ Ok(fd) => fd, ++ Err(err) if err.errno() == syscall::ENODEV => { ++ eprintln!("audiod: no audio hardware detected"); ++ return Ok(()); ++ } ++ Err(err) => return Err(err).context("failed to open /scheme/audiohw"), ++ }; + + let socket = Socket::create().context("failed to create scheme")?; + diff --git a/daemon/src/lib.rs b/daemon/src/lib.rs -index 9f507221..a0ba9d88 100644 +index 9f507221..4e434082 100644 --- a/daemon/src/lib.rs +++ b/daemon/src/lib.rs @@ -11,12 +11,23 @@ use redox_scheme::Socket; @@ -392,7 +426,7 @@ index 9f507221..a0ba9d88 100644 } } } -@@ -94,12 +105,22 @@ impl SchemeDaemon { +@@ -96,13 +116,16 @@ impl SchemeDaemon { /// Notify the process that the scheme daemon is ready to accept requests. pub fn ready_with_fd(self, cap_fd: Fd) -> syscall::Result<()> { @@ -412,26 +446,6 @@ index 9f507221..a0ba9d88 100644 } /// Notify the process that the synchronous scheme daemon is ready to accept requests. -diff --git a/audiod/src/main.rs b/audiod/src/main.rs -index 5a8c8d06..c3a1d4f0 100644 ---- a/audiod/src/main.rs -+++ b/audiod/src/main.rs -@@ -48,7 +48,14 @@ fn daemon(daemon: SchemeDaemon) -> anyhow::Result<()> { - - let pid = libredox::call::getpid()?; - -- let hw_file = Fd::open("/scheme/audiohw", flag::O_WRONLY | flag::O_CLOEXEC, 0)?; -+ let hw_file = match Fd::open("/scheme/audiohw", flag::O_WRONLY | flag::O_CLOEXEC, 0) { -+ Ok(fd) => fd, -+ Err(err) if err.errno() == syscall::ENODEV => { -+ eprintln!("audiod: no audio hardware detected"); -+ return Ok(()); -+ } -+ Err(err) => return Err(err).context("failed to open /scheme/audiohw"), -+ }; - - let socket = Socket::create().context("failed to create scheme")?; - diff --git a/drivers/acpid/Cargo.toml b/drivers/acpid/Cargo.toml index 2d22a8f9..712b6d6e 100644 --- a/drivers/acpid/Cargo.toml @@ -2130,7 +2144,7 @@ index 94a1eb17..a7cde5d6 100644 Ok(dsdt) => dsdt, Err(error) => { log::error!("Failed to load DSDT: {}", error); -@@ -805,8 +2001,20 @@ impl Fadt { +@@ -805,8 +2001,12 @@ impl Fadt { context.fadt = Some(fadt.clone()); context.dsdt = Some(Dsdt(dsdt_sdt.clone())); @@ -2140,14 +2154,6 @@ index 94a1eb17..a7cde5d6 100644 + context.reset_value = reset_value; context.tables.push(dsdt_sdt); -+ -+ if context.pci_ready() { -+ if let Err(error) = context.refresh_s5_values() { -+ log::warn!("Failed to evaluate \\_S5 during FADT init: {error}"); -+ } -+ } else { -+ log::debug!("Deferring \\_S5 evaluation until PCI registration"); -+ } } } @@ -4194,6 +4200,1156 @@ index ffa8a94b..e4dbf930 100644 } #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +diff --git a/drivers/audio/ihdad/src/hda/device.rs b/drivers/audio/ihdad/src/hda/device.rs +index 78e8f0a2..e5742f80 100755 +--- a/drivers/audio/ihdad/src/hda/device.rs ++++ b/drivers/audio/ihdad/src/hda/device.rs +@@ -1,6 +1,6 @@ + #![allow(dead_code)] + +-use std::collections::HashMap; ++use std::collections::{HashMap, HashSet}; + use std::fmt::Write; + use std::str; + use std::task::Poll; +@@ -14,20 +14,55 @@ use redox_scheme::scheme::SchemeSync; + use redox_scheme::CallerCtx; + use redox_scheme::OpenResult; + use scheme_utils::{FpathWriter, HandleMap}; +-use syscall::error::{Error, Result, EACCES, EBADF, EIO, ENODEV, EWOULDBLOCK}; ++use syscall::error::{Error, Result, EACCES, EBADF, EIO, ENODEV, ENOENT, EWOULDBLOCK}; + + use spin::Mutex; + use syscall::schemev2::NewFdFlags; + + use super::common::*; ++use super::parser::AutoPinConfig; + use super::BitsPerSample; + use super::BufferDescriptorListEntry; + use super::CommandBuffer; ++use super::digital::DigitalCodecInfo; ++use super::dispatch::RouteDecision; ++use super::FixupEngine; + use super::HDANode; ++use super::InputStream; + use super::OutputStream; + use super::StreamBuffer; + use super::StreamDescriptorRegs; + ++#[derive(Debug, Clone)] ++pub struct ControllerPolicy { ++ pub prefer_msi: bool, ++ pub single_cmd_fallback: bool, ++ pub probe_mask: u16, ++ pub position_fix: PositionFixPolicy, ++ pub poll_jack: bool, ++} ++ ++#[derive(Debug, Clone, Copy, PartialEq, Eq)] ++pub enum PositionFixPolicy { ++ Auto, ++ Lpib, ++ Posbuf, ++ Dpic, ++ None, ++} ++ ++impl Default for ControllerPolicy { ++ fn default() -> Self { ++ ControllerPolicy { ++ prefer_msi: true, ++ single_cmd_fallback: false, ++ probe_mask: 0xFFFF, ++ position_fix: PositionFixPolicy::Auto, ++ poll_jack: false, ++ } ++ } ++} ++ + // GCTL - Global Control + const CRST: u32 = 1 << 0; // 1 bit + const FNCTRL: u32 = 1 << 1; // 1 bit +@@ -55,6 +90,20 @@ const RIRBDMAEN: u8 = 1 << 1; // 1 bit + const ICB: u16 = 1 << 0; + const IRV: u16 = 1 << 1; + ++// INTCTL bits ++const INTCTL_GIE: u32 = 1 << 31; // Global Interrupt Enable ++const INTCTL_CIE: u32 = 1 << 30; // Controller Interrupt Enable ++ ++// RIRBSTS bits (write-1-to-clear) ++const RIRBSTS_RINTFL: u8 = 1 << 0; // Response Interrupt Flag ++const RIRBSTS_RIRBOIS: u8 = 1 << 2; // RIRB Overrun Interrupt Status ++ ++// CORBSTS bits (write-1-to-clear) ++const CORBSTS_CMEI: u8 = 1 << 0; // CORB Memory Error Interrupt ++ ++// STATESTS mask — one bit per codec slot (bits 0-14) ++const STATESTS_MASK: u16 = 0x7FFF; ++ + // CORB and RIRB offset + + const COMMAND_BUFFER_OFFSET: usize = 0x40; +@@ -63,9 +112,8 @@ const NUM_SUB_BUFFS: usize = 32; + const SUB_BUFF_SIZE: usize = 2048; + + enum Handle { +- Todo, +- Pcmout(usize, usize, usize), // Card, index, block_ptr +- Pcmin(usize, usize, usize), // Card, index, block_ptr ++ Pcmout { stream_index: usize }, ++ Pcmin { stream_index: usize }, + StrBuf(Vec), + SchemeRoot, + } +@@ -121,6 +169,34 @@ struct Regs { + dpubase: Mmio, // 0x74 + } + ++struct CodecTopology { ++ codec_addr: CodecAddr, ++ afgs: Vec, ++ widget_map: HashMap, ++ outputs: Vec, ++ inputs: Vec, ++ output_pins: Vec, ++ input_pins: Vec, ++ beep_addr: Option, ++ pin_config: AutoPinConfig, ++} ++ ++impl CodecTopology { ++ fn new(codec_addr: CodecAddr) -> Self { ++ CodecTopology { ++ codec_addr, ++ afgs: Vec::new(), ++ widget_map: HashMap::new(), ++ outputs: Vec::new(), ++ inputs: Vec::new(), ++ output_pins: Vec::new(), ++ input_pins: Vec::new(), ++ beep_addr: None, ++ pin_config: AutoPinConfig::default(), ++ } ++ } ++} ++ + pub struct IntelHDA { + vend_prod: u32, + +@@ -131,6 +207,7 @@ pub struct IntelHDA { + cmd: CommandBuffer, + + codecs: Vec, ++ codecs_topology: HashMap, + + outputs: Vec, + inputs: Vec, +@@ -140,15 +217,20 @@ pub struct IntelHDA { + output_pins: Vec, + input_pins: Vec, + +- beep_addr: WidgetAddr, ++ beep_addr: Option, + + buff_desc: Dma<[BufferDescriptorListEntry; 256]>, ++ input_buff_desc: Dma<[BufferDescriptorListEntry; 256]>, + + output_streams: Vec, ++ input_streams: Vec, + + buffs: Vec>, + + int_counter: usize, ++ policy: ControllerPolicy, ++ fixup_engine: FixupEngine, ++ digital_codecs: Vec, + handles: Mutex>, + } + +@@ -160,6 +242,10 @@ impl IntelHDA { + .expect("Could not allocate physical memory for buffer descriptor list.") + .assume_init(); + ++ let input_buff_desc = Dma::<[BufferDescriptorListEntry; 256]>::zeroed() ++ .expect("Could not allocate physical memory for input buffer descriptor list.") ++ .assume_init(); ++ + log::debug!( + "Virt: {:016X}, Phys: {:016X}", + buff_desc.as_ptr() as usize, +@@ -182,11 +268,12 @@ impl IntelHDA { + + cmd: CommandBuffer::new(base + COMMAND_BUFFER_OFFSET, cmd_buff), + +- beep_addr: (0, 0), ++ beep_addr: None, + + widget_map: HashMap::::new(), + + codecs: Vec::::new(), ++ codecs_topology: HashMap::::new(), + + outputs: Vec::::new(), + inputs: Vec::::new(), +@@ -195,21 +282,34 @@ impl IntelHDA { + input_pins: Vec::::new(), + + buff_desc, ++ input_buff_desc, + + output_streams: Vec::::new(), ++ input_streams: Vec::::new(), + + buffs: Vec::>::new(), + + int_counter: 0, ++ policy: ControllerPolicy::default(), ++ fixup_engine: FixupEngine::new(), ++ digital_codecs: Vec::new(), + handles: Mutex::new(HandleMap::new()), + }; + + module.init()?; + ++ let vendor_id = ((module.vend_prod >> 16) & 0xFFFF) as u16; ++ let device_id = (module.vend_prod & 0xFFFF) as u16; ++ let route = RouteDecision::decide(vendor_id, device_id, 0x04, 0x03); ++ log::info!("IHDA: audio route decision: {:?} ({})", route.route, route.reason); ++ + module.info(); + module.enumerate()?; + + module.configure()?; ++ if let Err(err) = module.configure_input() { ++ log::debug!("IHDA: input configuration skipped: {:?}", err); ++ } + log::debug!("IHDA: Initialization finished."); + Ok(module) + } +@@ -219,23 +319,28 @@ impl IntelHDA { + + let use_immediate_command_interface = match self.vend_prod { + 0x8086_2668 => false, +- _ => true, ++ _ => !self.policy.single_cmd_fallback, + }; + + self.cmd.init(use_immediate_command_interface)?; ++ ++ self.regs.gctl.writef(UNSOL, true); ++ + self.init_interrupts(); + + Ok(()) + } + + pub fn init_interrupts(&mut self) { +- // TODO: provide a function to enable certain interrupts +- // This just enables the first output stream interupt and the global interrupt +- + let iss = self.num_input_streams(); +- self.regs +- .intctl +- .write((1 << 31) | /* (1 << 30) |*/ (1 << iss)); ++ ++ let mut wakeen: u16 = 0; ++ for &codec in &self.codecs { ++ wakeen |= 1 << codec; ++ } ++ self.regs.wakeen.write(wakeen); ++ ++ self.regs.intctl.write(INTCTL_GIE | INTCTL_CIE | (1 << iss) | (1 << 0)); + } + + pub fn irq(&mut self) -> bool { +@@ -248,6 +353,19 @@ impl IntelHDA { + self.int_counter + } + ++ pub fn policy(&self) -> &ControllerPolicy { ++ &self.policy ++ } ++ ++ pub fn set_policy(&mut self, policy: ControllerPolicy) { ++ log::info!( ++ "IHDA: policy updated msi={} single_cmd={} probe_mask={:04X} pos_fix={:?} poll_jack={}", ++ policy.prefer_msi, policy.single_cmd_fallback, policy.probe_mask, ++ policy.position_fix, policy.poll_jack ++ ); ++ self.policy = policy; ++ } ++ + pub fn read_node(&mut self, addr: WidgetAddr) -> Result { + let mut node = HDANode::new(); + let mut temp: u64; +@@ -341,70 +459,156 @@ impl IntelHDA { + } + + pub fn enumerate(&mut self) -> Result<()> { ++ // Clear old global state (kept for migration safety) + self.output_pins.clear(); + self.input_pins.clear(); ++ self.outputs.clear(); ++ self.inputs.clear(); ++ self.widget_map.clear(); ++ self.codecs_topology.clear(); ++ ++ let codec_addrs = self.codecs.clone(); ++ for codec in codec_addrs { ++ let mut topo = CodecTopology::new(codec); ++ ++ let root = self.read_node((codec, 0))?; ++ log::debug!("{}", root); ++ ++ let root_count = root.subnode_count; ++ let root_start = root.subnode_start; ++ ++ for i in 0..root_count { ++ let afg = self.read_node((codec, root_start + i))?; ++ log::debug!("{}", afg); ++ ++ // Only process audio function groups (type 0x01) ++ if afg.function_group_type != 0x01 { ++ log::debug!( ++ "Codec {}: function group {} is type {}, not audio \u{2014} skipping", ++ codec, ++ afg.addr.1, ++ afg.function_group_type ++ ); ++ continue; ++ } ++ ++ topo.afgs.push(afg.addr.1); + +- let codec: u8 = 0; +- +- let root = self.read_node((codec, 0))?; +- +- log::debug!("{}", root); +- +- let root_count = root.subnode_count; +- let root_start = root.subnode_start; +- +- //FIXME: So basically the way this is set up is to only support one codec and hopes the first one is an audio +- for i in 0..root_count { +- let afg = self.read_node((codec, root_start + i))?; +- log::debug!("{}", afg); +- let afg_count = afg.subnode_count; +- let afg_start = afg.subnode_start; +- +- for j in 0..afg_count { +- let mut widget = self.read_node((codec, afg_start + j))?; +- widget.is_widget = true; +- match widget.widget_type() { +- HDAWidgetType::AudioOutput => self.outputs.push(widget.addr), +- HDAWidgetType::AudioInput => self.inputs.push(widget.addr), +- HDAWidgetType::BeepGenerator => self.beep_addr = widget.addr, +- HDAWidgetType::PinComplex => { +- let config = widget.configuration_default(); +- if config.is_output() { +- self.output_pins.push(widget.addr); +- } else if config.is_input() { +- self.input_pins.push(widget.addr); ++ let afg_count = afg.subnode_count; ++ let afg_start = afg.subnode_start; ++ ++ for j in 0..afg_count { ++ let mut widget = self.read_node((codec, afg_start + j))?; ++ widget.is_widget = true; ++ match widget.widget_type() { ++ HDAWidgetType::AudioOutput => { ++ self.outputs.push(widget.addr); ++ topo.outputs.push(widget.addr); ++ } ++ HDAWidgetType::AudioInput => { ++ self.inputs.push(widget.addr); ++ topo.inputs.push(widget.addr); + } ++ HDAWidgetType::BeepGenerator => { ++ self.beep_addr = Some(widget.addr); ++ topo.beep_addr = Some(widget.addr); ++ } ++ HDAWidgetType::PinComplex => { ++ let config = widget.configuration_default(); ++ if config.is_output() { ++ self.output_pins.push(widget.addr); ++ topo.output_pins.push(widget.addr); ++ } else if config.is_input() { ++ self.input_pins.push(widget.addr); ++ topo.input_pins.push(widget.addr); ++ } ++ } ++ _ => {} + } +- _ => {} ++ ++ log::debug!("{}", widget); ++ self.widget_map.insert(widget.addr(), widget.clone()); ++ topo.widget_map.insert(widget.addr(), widget); + } ++ } + +- log::debug!("{}", widget); +- self.widget_map.insert(widget.addr(), widget); ++ log::debug!( ++ "Codec {}: {} AFGs, {} outputs, {} inputs, {} output pins, {} input pins", ++ codec, ++ topo.afgs.len(), ++ topo.outputs.len(), ++ topo.inputs.len(), ++ topo.output_pins.len(), ++ topo.input_pins.len(), ++ ); ++ ++ let widget_list: Vec<(WidgetAddr, HDANode)> = topo.widget_map.iter().map(|(a, n)| (*a, n.clone())).collect(); ++ topo.pin_config = AutoPinConfig::parse(&widget_list); ++ ++ self.codecs_topology.insert(codec, topo); ++ } ++ ++ for (codec, topo) in &self.codecs_topology { ++ if let Some(digi) = DigitalCodecInfo::detect_from_topology(*codec, &topo.widget_map) { ++ log::info!( ++ "IHDA: digital codec detected at {} (hdmi={}, {} pins, {} converters)", ++ codec, digi.is_hdmi, digi.pin_widgets.len(), digi.converter_widgets.len() ++ ); ++ self.digital_codecs.push(digi); + } + } + + Ok(()) + } + +- pub fn find_best_output_pin(&mut self) -> Result { +- let outs = &self.output_pins; ++ fn pick_primary_codec_for_output(&self) -> Option { ++ let mut candidates: Vec = self ++ .codecs_topology ++ .values() ++ .filter(|topo| !topo.output_pins.is_empty() && !topo.outputs.is_empty()) ++ .map(|topo| topo.codec_addr) ++ .collect(); ++ candidates.sort(); ++ candidates.into_iter().next() ++ } ++ ++ pub fn find_best_output_pin(&mut self, codec: CodecAddr) -> Result { ++ let outs: Vec = self ++ .codecs_topology ++ .get(&codec) ++ .ok_or_else(|| { ++ log::error!("No topology for codec {}", codec); ++ Error::new(ENODEV) ++ })? ++ .output_pins ++ .clone(); ++ + if outs.len() == 1 { + return Ok(outs[0]); + } else if outs.len() > 1 { +- //TODO: change output based on "unsolicited response" interrupts +- // Check for devices in this order: Headphone, Speaker, Line Out + for supported_device in &[DefaultDevice::HPOut, DefaultDevice::Speaker] { +- for &out in outs { +- let widget = self.widget_map.get(&out).unwrap(); +- let cd = widget.configuration_default(); ++ for &out in &outs { ++ let (addr, config_default) = { ++ let widget = self ++ .codecs_topology ++ .get(&codec) ++ .and_then(|t| t.widget_map.get(&out)) ++ .ok_or_else(|| { ++ log::error!( ++ "Widget {:?} not found in codec {} topology", ++ out, ++ codec ++ ); ++ Error::new(ENODEV) ++ })?; ++ (widget.addr, widget.config_default) ++ }; ++ let cd = ConfigurationDefault::from_u32(config_default); + if cd.sequence() == 0 && &cd.default_device() == supported_device { +- // Check for jack detect bit +- let pin_caps = self.cmd.cmd12(widget.addr, 0xF00, 0x0C)?; ++ let pin_caps = self.cmd.cmd12(addr, 0xF00, 0x0C)?; + if pin_caps & (1 << 2) != 0 { +- // Check for presence +- let pin_sense = self.cmd.cmd12(widget.addr, 0xF09, 0)?; ++ let pin_sense = self.cmd.cmd12(addr, 0xF09, 0)?; + if pin_sense & (1 << 31) == 0 { +- // Skip if nothing is plugged in + continue; + } + } +@@ -416,13 +620,26 @@ impl IntelHDA { + Err(Error::new(ENODEV)) + } + +- pub fn find_path_to_dac(&self, addr: WidgetAddr) -> Option> { +- let widget = self.widget_map.get(&addr).unwrap(); ++ pub fn find_path_to_dac( ++ &self, ++ addr: WidgetAddr, ++ codec: CodecAddr, ++ visited: &mut HashSet, ++ ) -> Option> { ++ if visited.contains(&addr) { ++ log::warn!("Cycle detected in widget graph at {:?}", addr); ++ return None; ++ } ++ visited.insert(addr); ++ ++ let topo = self.codecs_topology.get(&codec)?; ++ let widget = topo.widget_map.get(&addr)?; ++ + if widget.widget_type() == HDAWidgetType::AudioOutput { + Some(vec![addr]) + } else { + let connection = widget.connections.get(widget.connection_default as usize)?; +- let mut path = self.find_path_to_dac(*connection)?; ++ let mut path = self.find_path_to_dac(*connection, codec, visited)?; + path.insert(0, addr); + Some(path) + } +@@ -466,72 +683,92 @@ impl IntelHDA { + } + + pub fn configure(&mut self) -> Result<()> { +- let outpin = self.find_best_output_pin()?; ++ let codec = self.pick_primary_codec_for_output().ok_or_else(|| { ++ log::error!("No suitable codec found for audio output"); ++ Error::new(ENODEV) ++ })?; ++ ++ log::debug!("Selected codec {} for output", codec); ++ ++ let topo = self.codecs_topology.get(&codec).ok_or_else(|| { ++ log::error!("No topology for codec {}", codec); ++ Error::new(ENODEV) ++ })?; ++ ++ let vendor_id = ((self.vend_prod >> 16) & 0xFFFF) as u16; ++ let device_id = (self.vend_prod & 0xFFFF) as u16; ++ self.fixup_engine.match_fixups(vendor_id, device_id, None, &topo.pin_config); ++ ++ let primary_pins = topo.pin_config.primary_output_pins(); ++ let outpin = primary_pins.first().map(|p| p.addr).ok_or_else(|| { ++ log::error!("No primary output pins found by parser on codec {}", codec); ++ Error::new(ENODEV) ++ })?; + + log::debug!("Best pin: {:01X}:{:02X}", outpin.0, outpin.1); + +- let path = self.find_path_to_dac(outpin).unwrap(); ++ let path = { ++ let mut visited = HashSet::new(); ++ self.find_path_to_dac(outpin, codec, &mut visited) ++ .ok_or_else(|| { ++ log::error!( ++ "No path to DAC from pin {:01X}:{:02X} on codec {}", ++ outpin.0, ++ outpin.1, ++ codec ++ ); ++ Error::new(ENODEV) ++ })? ++ }; + +- let dac = *path.last().unwrap(); +- let pin = *path.first().unwrap(); ++ let dac = *path.last().ok_or_else(|| { ++ log::error!("Empty DAC path for pin {:01X}:{:02X}", outpin.0, outpin.1); ++ Error::new(ENODEV) ++ })?; ++ let pin = *path.first().ok_or_else(|| { ++ log::error!("Empty path (no pin) for pin {:01X}:{:02X}", outpin.0, outpin.1); ++ Error::new(ENODEV) ++ })?; + + log::debug!("Path to DAC: {:X?}", path); + +- // Set power state 0 (on) for all widgets in path + for &addr in &path { + self.set_power_state(addr, 0)?; + } + +- // Pin enable (0x80 = headphone amp enable, 0x40 = output enable) + self.cmd.cmd12(pin, 0x707, 0xC0)?; +- +- // EAPD enable + self.cmd.cmd12(pin, 0x70C, 2)?; +- +- // Set DAC stream and channel ++ self.cmd.cmd4(pin, 0x708, (1 << 8) | 1)?; + self.set_stream_channel(dac, 1, 0)?; + + self.update_sound_buffers(); + +- log::debug!( +- "Supported Formats: {:08X}", +- self.get_supported_formats((0, 0x1))? +- ); +- log::debug!("Capabilities: {:08X}", self.get_capabilities(path[0])?); ++ let (rate, bps, channels) = self.negotiate_stream_format(dac)?; ++ log::debug!("IHDA: negotiated stream format bps={:?} ch={}", bps, channels); + +- // Create output stream + let output = self.get_output_stream_descriptor(0).unwrap(); + output.set_address(self.buff_desc.physical()); +- output.set_pcm_format(&super::SR_44_1, BitsPerSample::Bits16, 2); +- output.set_cyclic_buffer_length((NUM_SUB_BUFFS * SUB_BUFF_SIZE) as u32); // number of bytes ++ output.set_pcm_format(rate, bps, channels); ++ output.set_cyclic_buffer_length((NUM_SUB_BUFFS * SUB_BUFF_SIZE) as u32); + output.set_stream_number(1); + output.set_last_valid_index((NUM_SUB_BUFFS - 1) as u16); + output.set_interrupt_on_completion(true); + +- // Set DAC converter format +- self.set_converter_format(dac, &super::SR_44_1, BitsPerSample::Bits16, 2)?; ++ self.set_converter_format(dac, rate, bps, channels)?; + +- // Get DAC converter format +- //TODO: should validate? + self.cmd.cmd12(dac, 0xA00, 0)?; + +- // Unmute and set gain to 0db for input and output amplifiers on all widgets in path + for &addr in &path { +- // Read widget capabilities + let caps = self.cmd.cmd12(addr, 0xF00, 0x09)?; + +- //TODO: do we need to set any other indexes? + let left = true; + let right = true; + let index = 0; + let mute = false; + +- // Check for input amp + if (caps & (1 << 1)) != 0 { +- // Read input capabilities + let in_caps = self.cmd.cmd12(addr, 0xF00, 0x0D)?; + let in_gain = (in_caps & 0x7f) as u8; +- // Set input gain + let output = false; + let input = true; + self.set_amplifier_gain_mute( +@@ -540,12 +777,9 @@ impl IntelHDA { + log::debug!("Set {:X?} input gain to 0x{:X}", addr, in_gain); + } + +- // Check for output amp + if (caps & (1 << 2)) != 0 { +- // Read output capabilities + let out_caps = self.cmd.cmd12(addr, 0xF00, 0x12)?; + let out_gain = (out_caps & 0x7f) as u8; +- // Set output gain + let output = true; + let input = false; + self.set_amplifier_gain_mute( +@@ -555,8 +789,6 @@ impl IntelHDA { + } + } + +- //TODO: implement hda-verb? +- + output.run(); + { + log::debug!("Waiting for output 0 to start running..."); +@@ -632,20 +864,21 @@ impl IntelHDA { + + */ + +- pub fn dump_codec(&self, codec: u8) -> String { ++ pub fn dump_all_codecs(&self) -> String { + let mut string = String::new(); + +- for (_, widget) in self.widget_map.iter() { +- let _ = writeln!(string, "{}", widget); ++ for (&codec, topo) in &self.codecs_topology { ++ let _ = writeln!(string, "Codec {}:", codec); ++ for (_, widget) in topo.widget_map.iter() { ++ let _ = writeln!(string, " {}", widget); ++ } + } + + string + } + +- // BEEP!! + pub fn beep(&mut self, div: u8) { +- let addr = self.beep_addr; +- if addr != (0, 0) { ++ if let Some(addr) = self.beep_addr { + let _ = self.cmd.cmd12(addr, 0xF0A, div); + } + } +@@ -700,7 +933,7 @@ impl IntelHDA { + log::debug!("Statests: {:04X}", statests); + + for i in 0..15 { +- if (statests >> i) & 0x1 == 1 { ++ if (statests >> i) & 0x1 == 1 && (self.policy.probe_mask >> i) & 0x1 == 1 { + self.codecs.push(i as CodecAddr); + } + } +@@ -812,6 +1045,54 @@ impl IntelHDA { + Ok(self.cmd.cmd12(addr, 0xF00, 0x0A)? as u32) + } + ++ fn negotiate_stream_format( ++ &mut self, ++ dac: WidgetAddr, ++ ) -> Result<(&'static super::SampleRate, BitsPerSample, u8)> { ++ let fmt = self.get_supported_formats(dac)?; ++ log::debug!("IHDA: DAC {:01X}:{:02X} supported formats: {:08X}", dac.0, dac.1, fmt); ++ ++ let rate = if fmt & (1 << 14) != 0 { ++ &super::SR_48 ++ } else if fmt & (1 << 13) != 0 { ++ &super::SR_44_1 ++ } else if fmt & (1 << 12) != 0 { ++ &super::SR_32 ++ } else if fmt & (1 << 11) != 0 { ++ &super::SR_22_05 ++ } else if fmt & (1 << 10) != 0 { ++ &super::SR_16 ++ } else if fmt & (1 << 9) != 0 { ++ &super::SR_11_025 ++ } else if fmt & (1 << 8) != 0 { ++ &super::SR_8 ++ } else { ++ log::error!("IHDA: no supported sample rate found in format {:08X}", fmt); ++ return Err(Error::new(ENODEV)); ++ }; ++ ++ let bps = if fmt & (1 << 21) != 0 { ++ BitsPerSample::Bits16 ++ } else if fmt & (1 << 23) != 0 { ++ BitsPerSample::Bits24 ++ } else if fmt & (1 << 24) != 0 { ++ BitsPerSample::Bits32 ++ } else if fmt & (1 << 22) != 0 { ++ BitsPerSample::Bits20 ++ } else if fmt & (1 << 20) != 0 { ++ BitsPerSample::Bits8 ++ } else { ++ log::error!("IHDA: no supported bit depth found in format {:08X}", fmt); ++ return Err(Error::new(ENODEV)); ++ }; ++ ++ let caps = self.get_capabilities(dac)?; ++ let max_channels = ((caps >> 0) & 0xFF) as u8 + 1; ++ let channels = if max_channels >= 2 { 2 } else { max_channels }; ++ ++ Ok((rate, bps, channels)) ++ } ++ + fn get_capabilities(&mut self, addr: WidgetAddr) -> Result { + Ok(self.cmd.cmd12(addr, 0xF00, 0x09)? as u32) + } +@@ -873,13 +1154,98 @@ impl IntelHDA { + //log::trace!("Status: {:02X} Pos: {:08X} Output CTL: {:06X}", output.status(), output.link_position(), output.control()); + + if os.current_block() == (open_block + 3) % NUM_SUB_BUFFS { +- // Block if we already are 3 buffers ahead + Poll::Pending + } else { + Poll::Ready(os.write_block(buf)) + } + } + ++ pub fn configure_input(&mut self) -> Result<()> { ++ let primary_codec = match self.pick_primary_codec_for_output() { ++ Some(c) => c, ++ None => return Err(Error::new(ENODEV)), ++ }; ++ ++ let topo = self.codecs_topology.get(&primary_codec).ok_or_else(|| { ++ log::error!("IHDA: no topology for codec {}", primary_codec); ++ Error::new(ENODEV) ++ })?; ++ ++ let input_pin = topo.input_pins.first().cloned().ok_or_else(|| { ++ log::debug!("IHDA: no input pins found on codec {}", primary_codec); ++ Error::new(ENODEV) ++ })?; ++ ++ let adc = topo.inputs.first().cloned().ok_or_else(|| { ++ log::debug!("IHDA: no ADC widgets found on codec {}", primary_codec); ++ Error::new(ENODEV) ++ })?; ++ ++ log::debug!( ++ "IHDA: configuring input: pin={:01X}:{:02X} adc={:01X}:{:02X}", ++ input_pin.0, input_pin.1, adc.0, adc.1 ++ ); ++ ++ self.cmd.cmd12(input_pin, 0x707, 0xC0)?; ++ self.set_power_state(input_pin, 0)?; ++ self.set_power_state(adc, 0)?; ++ self.set_stream_channel(adc, 2, 0)?; ++ ++ let iss = self.num_input_streams(); ++ if iss == 0 { ++ log::warn!("IHDA: no input streams available"); ++ return Err(Error::new(ENODEV)); ++ } ++ ++ let input_regs = self.get_input_stream_descriptor(0).ok_or_else(|| { ++ log::error!("IHDA: failed to get input stream descriptor 0"); ++ Error::new(ENODEV) ++ })?; ++ ++ let mut input_stream = InputStream::new(NUM_SUB_BUFFS, SUB_BUFF_SIZE, input_regs); ++ ++ for i in 0..NUM_SUB_BUFFS { ++ self.input_buff_desc[i].set_address(input_stream.phys() as u64 + (i * SUB_BUFF_SIZE) as u64); ++ self.input_buff_desc[i].set_length(SUB_BUFF_SIZE as u32); ++ self.input_buff_desc[i].set_interrupt_on_complete(true); ++ } ++ ++ let (rate, bps, channels) = self.negotiate_stream_format(adc)?; ++ ++ input_stream.regs().set_address(self.input_buff_desc.physical()); ++ input_stream.regs().set_pcm_format(rate, bps, channels); ++ input_stream.regs().set_cyclic_buffer_length((NUM_SUB_BUFFS * SUB_BUFF_SIZE) as u32); ++ input_stream.regs().set_stream_number(2); ++ input_stream.regs().set_last_valid_index((NUM_SUB_BUFFS - 1) as u16); ++ input_stream.regs().set_interrupt_on_completion(true); ++ ++ self.set_converter_format(adc, rate, bps, channels)?; ++ ++ self.input_streams.push(input_stream); ++ ++ let input_ref = self.input_streams.last_mut().unwrap(); ++ input_ref.regs().run(); ++ ++ log::debug!("IHDA: input stream 0 configured and running"); ++ Ok(()) ++ } ++ ++ pub fn read_from_input(&mut self, index: usize, buf: &mut [u8]) -> Poll> { ++ let input_stream = match self.input_streams.get_mut(index) { ++ Some(s) => s, ++ None => return Poll::Ready(Err(Error::new(EBADF))), ++ }; ++ ++ let pos = input_stream.regs().link_position() as usize; ++ let hw_block = pos / input_stream.block_size(); ++ ++ if input_stream.current_block() == hw_block { ++ Poll::Pending ++ } else { ++ Poll::Ready(input_stream.read_block(buf)) ++ } ++ } ++ + pub fn handle_interrupts(&mut self) -> bool { + let intsts = self.regs.intsts.read(); + if ((intsts >> 31) & 1) == 1 { +@@ -897,7 +1263,56 @@ impl IntelHDA { + intsts != 0 + } + +- pub fn handle_controller_interrupt(&mut self) {} ++ pub fn handle_controller_interrupt(&mut self) { ++ let statests = self.regs.statests.read(); ++ if statests & STATESTS_MASK != 0 { ++ for i in 0..15 { ++ if (statests >> i) & 1 != 0 { ++ log::info!("IHDA: state change on codec {}", i); ++ } ++ } ++ self.regs.statests.write(statests); ++ } ++ ++ let rirbsts = self.regs.rirbsts.read(); ++ if rirbsts & (RIRBSTS_RINTFL | RIRBSTS_RIRBOIS) != 0 { ++ let rirbwp = self.regs.rirbwp.read(); ++ let wp = rirbwp & 0xFF; ++ if wp != 0 { ++ log::debug!("IHDA: RIRB response available, wp={}", wp); ++ } ++ if rirbsts & RIRBSTS_RIRBOIS != 0 { ++ log::warn!("IHDA: RIRB overrun, clearing"); ++ } ++ self.regs.rirbsts.write(rirbsts & (RIRBSTS_RINTFL | RIRBSTS_RIRBOIS)); ++ } ++ ++ let corbsts = self.regs.corbsts.read(); ++ if corbsts & CORBSTS_CMEI != 0 { ++ log::error!("IHDA: CORB memory error, clearing"); ++ self.regs.corbsts.write(CORBSTS_CMEI); ++ } ++ } ++ ++ fn handle_unsolicited_response(&mut self, codec_addr: CodecAddr, response: u32) { ++ let tag = (response >> 26) & 0xF; ++ let payload = response & 0x03FFFFFF; ++ ++ log::info!( ++ "IHDA: unsolicited response codec {} tag={} payload={:06X}", ++ codec_addr, tag, payload ++ ); ++ ++ if tag == 1 { ++ let pin_widget = payload & 0x7F; ++ let plugged = (payload >> 31) & 1; ++ log::info!( ++ "IHDA: jack sense codec {} pin {} {}", ++ codec_addr, pin_widget, ++ if plugged != 0 { "plugged" } else { "unplugged" } ++ ); ++ } ++ } + + pub fn handle_stream_interrupts(&mut self, sis: u32) { + let iss = self.num_input_streams(); +@@ -1017,9 +1432,10 @@ impl SchemeSync for IntelHDA { + return Err(Error::new(EACCES)); + } + let handle = match path.trim_matches('/') { +- //TODO: allow multiple codecs +- "codec" => Handle::StrBuf(self.dump_codec(0).into_bytes()), +- _ => Handle::Todo, ++ "codec" => Handle::StrBuf(self.dump_all_codecs().into_bytes()), ++ "" | "pcmout" | "pcmout0" => Handle::Pcmout { stream_index: 0 }, ++ "pcmin" | "pcmin0" => Handle::Pcmin { stream_index: 0 }, ++ _ => return Err(Error::new(ENOENT)), + }; + let id = self.handles.lock().insert(handle); + +@@ -1038,18 +1454,44 @@ impl SchemeSync for IntelHDA { + _flags: u32, + _ctx: &CallerCtx, + ) -> Result { +- let handles = self.handles.lock(); +- let Handle::StrBuf(strbuf) = handles.get(id)? else { +- return Err(Error::new(EBADF)); ++ let (is_strbuf, is_pcmin) = { ++ let handles = self.handles.lock(); ++ match handles.get(id)? { ++ Handle::StrBuf(_) => (true, false), ++ Handle::Pcmin { .. } => (false, true), ++ _ => return Err(Error::new(EBADF)), ++ } + }; + +- let src = usize::try_from(offset) +- .ok() +- .and_then(|o| strbuf.get(o..)) +- .unwrap_or(&[]); +- let len = src.len().min(buf.len()); +- buf[..len].copy_from_slice(&src[..len]); +- Ok(len) ++ if is_strbuf { ++ let handles = self.handles.lock(); ++ let Handle::StrBuf(strbuf) = handles.get(id)? else { ++ return Err(Error::new(EBADF)); ++ }; ++ let src = usize::try_from(offset) ++ .ok() ++ .and_then(|o| strbuf.get(o..)) ++ .unwrap_or(&[]); ++ let len = src.len().min(buf.len()); ++ buf[..len].copy_from_slice(&src[..len]); ++ return Ok(len); ++ } ++ ++ if is_pcmin { ++ let index = { ++ let handles = self.handles.lock(); ++ match handles.get(id)? { ++ Handle::Pcmin { stream_index, .. } => *stream_index, ++ _ => return Err(Error::new(EBADF)), ++ } ++ }; ++ return match self.read_from_input(index, buf) { ++ Poll::Ready(r) => r, ++ Poll::Pending => Err(Error::new(EWOULDBLOCK)), ++ }; ++ } ++ ++ Err(Error::new(EBADF)) + } + + fn write( +@@ -1061,23 +1503,29 @@ impl SchemeSync for IntelHDA { + _ctx: &CallerCtx, + ) -> Result { + let index = { +- let mut handles = self.handles.lock(); +- match handles.get_mut(id)? { +- Handle::Todo => 0, ++ let handles = self.handles.lock(); ++ match handles.get(id)? { ++ Handle::Pcmout { stream_index, .. } => *stream_index as u8, + _ => return Err(Error::new(EBADF)), + } + }; + +- //log::debug!("Int count: {}", self.int_counter); +- + match self.write_to_output(index, buf) { + Poll::Ready(r) => r, + Poll::Pending => Err(Error::new(EWOULDBLOCK)), + } + } + +- fn fpath(&mut self, _id: usize, buf: &mut [u8], _ctx: &CallerCtx) -> Result { +- FpathWriter::with(buf, "audiohw", |_| Ok(())) ++ fn fpath(&mut self, id: usize, buf: &mut [u8], _ctx: &CallerCtx) -> Result { ++ let handles = self.handles.lock(); ++ let handle = handles.get(id)?; ++ let path = match handle { ++ Handle::Pcmout { .. } => "audiohw:pcmout0", ++ Handle::Pcmin { .. } => "audiohw:pcmin0", ++ Handle::StrBuf(_) => "audiohw:codec", ++ Handle::SchemeRoot => "audiohw:", ++ }; ++ FpathWriter::with(buf, path, |_| Ok(())) + } + + fn on_close(&mut self, id: usize) { +diff --git a/drivers/audio/ihdad/src/hda/mod.rs b/drivers/audio/ihdad/src/hda/mod.rs +index 7f01daf8..82ba89ae 100644 +--- a/drivers/audio/ihdad/src/hda/mod.rs ++++ b/drivers/audio/ihdad/src/hda/mod.rs +@@ -2,7 +2,11 @@ + pub mod cmdbuff; + pub mod common; + pub mod device; ++pub mod digital; ++pub mod dispatch; ++pub mod fixup; + pub mod node; ++pub mod parser; + pub mod stream; + + pub use self::node::*; +@@ -10,6 +14,8 @@ pub use self::stream::*; + + pub use self::cmdbuff::*; + pub use self::device::IntelHDA; ++pub use self::fixup::FixupEngine; ++pub use self::parser::AutoPinConfig; + pub use self::stream::BitsPerSample; + pub use self::stream::BufferDescriptorListEntry; + pub use self::stream::StreamBuffer; +diff --git a/drivers/audio/ihdad/src/hda/node.rs b/drivers/audio/ihdad/src/hda/node.rs +index 06c5121f..c1f9c31f 100644 +--- a/drivers/audio/ihdad/src/hda/node.rs ++++ b/drivers/audio/ihdad/src/hda/node.rs +@@ -1,7 +1,7 @@ + use super::common::*; + use std::{fmt, mem}; + +-#[derive(Clone)] ++#[derive(Clone, Debug)] + pub struct HDANode { + pub addr: WidgetAddr, + +diff --git a/drivers/audio/ihdad/src/hda/stream.rs b/drivers/audio/ihdad/src/hda/stream.rs +index caa3c364..a3f5ed73 100644 +--- a/drivers/audio/ihdad/src/hda/stream.rs ++++ b/drivers/audio/ihdad/src/hda/stream.rs +@@ -14,9 +14,9 @@ pub enum BaseRate { + } + + pub struct SampleRate { +- base: BaseRate, +- mult: u16, +- div: u16, ++ pub base: BaseRate, ++ pub mult: u16, ++ pub div: u16, + } + + use self::BaseRate::{BR44_1, BR48}; +@@ -78,6 +78,7 @@ pub const SR_192: SampleRate = SampleRate { + div: 1, + }; + ++#[derive(Debug, Clone, Copy)] + #[repr(u8)] + pub enum BitsPerSample { + Bits8 = 0, +@@ -271,6 +272,52 @@ impl OutputStream { + } + } + ++pub struct InputStream { ++ buff: StreamBuffer, ++ desc_regs: &'static mut StreamDescriptorRegs, ++} ++ ++impl InputStream { ++ pub fn new( ++ block_count: usize, ++ block_length: usize, ++ regs: &'static mut StreamDescriptorRegs, ++ ) -> InputStream { ++ InputStream { ++ buff: StreamBuffer::new(block_length, block_count).unwrap(), ++ desc_regs: regs, ++ } ++ } ++ ++ pub fn read_block(&mut self, buf: &mut [u8]) -> Result { ++ self.buff.read_block(buf) ++ } ++ ++ pub fn block_size(&self) -> usize { ++ self.buff.block_size() ++ } ++ ++ pub fn block_count(&self) -> usize { ++ self.buff.block_count() ++ } ++ ++ pub fn current_block(&self) -> usize { ++ self.buff.current_block() ++ } ++ ++ pub fn addr(&self) -> usize { ++ self.buff.addr() ++ } ++ ++ pub fn phys(&self) -> usize { ++ self.buff.phys() ++ } ++ ++ pub fn regs(&mut self) -> &mut StreamDescriptorRegs { ++ self.desc_regs ++ } ++} ++ + #[repr(C, packed)] + pub struct BufferDescriptorListEntry { + addr_low: Mmio, +@@ -379,6 +426,20 @@ impl StreamBuffer { + + Ok(len) + } ++ ++ pub fn read_block(&mut self, buf: &mut [u8]) -> Result { ++ let len = min(self.block_size(), buf.len()); ++ unsafe { ++ copy_nonoverlapping( ++ (self.addr() + self.current_block() * self.block_size()) as *const u8, ++ buf.as_mut_ptr(), ++ len, ++ ); ++ } ++ self.cur_pos += 1; ++ self.cur_pos %= self.block_count(); ++ Ok(len) ++ } + } + impl Drop for StreamBuffer { + fn drop(&mut self) { diff --git a/drivers/audio/ihdad/src/main.rs b/drivers/audio/ihdad/src/main.rs index 31a2add7..11d80133 100755 --- a/drivers/audio/ihdad/src/main.rs @@ -6478,52 +7634,56 @@ index 5bf2be91..20d755d2 100644 let offscreen_ptr = self.ptr.as_ptr() as *mut u32; let onscreen_ptr = framebuffer.onscreen as *mut u32; // FIXME use as_mut_ptr once stable diff --git a/drivers/graphics/virtio-gpud/src/main.rs b/drivers/graphics/virtio-gpud/src/main.rs -index b27f4c56..0f1a9e4d 100644 +index b27f4c56..f3514c8e 100644 --- a/drivers/graphics/virtio-gpud/src/main.rs +++ b/drivers/graphics/virtio-gpud/src/main.rs -@@ -482,7 +482,10 @@ fn main() { +@@ -482,8 +482,11 @@ fn main() { } fn daemon_runner(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { - deamon(daemon, pcid_handle).unwrap(); -+ if let Err(err) = deamon(daemon, pcid_handle) { -+ log::error!("virtio-gpud: startup failed: {err}"); +- unreachable!(); ++ deamon(daemon, pcid_handle).unwrap_or_else(|err| { ++ log::error!("virtio-gpud: daemon failed: {err}"); + std::process::exit(1); -+ } - unreachable!(); ++ }); ++ std::process::exit(0); } -@@ -500,7 +503,12 @@ fn deamon(deamon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: + fn deamon(deamon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow::Result<()> { +@@ -500,7 +503,10 @@ fn deamon(deamon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: // 0x1050 - virtio-gpu let pci_config = pcid_handle.config(); - assert_eq!(pci_config.func.full_device_id.device_id, 0x1050); + if pci_config.func.full_device_id.device_id != 0x1050 { -+ return Err(anyhow::anyhow!( -+ "unexpected virtio-gpu device id: {:04x}", -+ pci_config.func.full_device_id.device_id -+ )); ++ log::error!("virtio-gpud: unexpected device ID {:#06x}, expected 0x1050", pci_config.func.full_device_id.device_id); ++ std::process::exit(1); + } log::info!("virtio-gpu: initiating startup sequence :^)"); let device = DEVICE.try_call_once(|| virtio_core::probe_device(&mut pcid_handle))?; -@@ -530,8 +538,8 @@ fn deamon(deamon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: - // Needs to be before GpuScheme::new to avoid a deadlock due to initnsmgr blocking on +@@ -531,7 +537,10 @@ fn deamon(deamon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: // /scheme/event as it is already blocked on opening /scheme/display.virtio-gpu. // FIXME change the initnsmgr to not block on openat for the target scheme. -- let event_queue: EventQueue = + let event_queue: EventQueue = - EventQueue::new().expect("virtio-gpud: failed to create event queue"); -+ let event_queue: EventQueue = EventQueue::new() -+ .map_err(|err| anyhow::anyhow!("failed to create event queue: {err}"))?; ++ EventQueue::new().unwrap_or_else(|err| { ++ log::error!("virtio-gpud: failed to create event queue: {err}"); ++ std::process::exit(1); ++ }); let mut scheme = scheme::GpuScheme::new( config, -@@ -556,33 +564,40 @@ fn deamon(deamon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: +@@ -556,33 +565,48 @@ fn deamon(deamon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: Source::Input, event::EventFlags::READ, ) - .unwrap(); -+ .map_err(|err| anyhow::anyhow!("virtio-gpud: failed to subscribe to input events: {err}"))?; ++ .unwrap_or_else(|err| { ++ log::error!("virtio-gpud: failed to subscribe to input events: {err}"); ++ std::process::exit(1); ++ }); event_queue .subscribe( scheme.event_handle().raw(), @@ -6531,9 +7691,10 @@ index b27f4c56..0f1a9e4d 100644 event::EventFlags::READ, ) - .unwrap(); -+ .map_err(|err| { -+ anyhow::anyhow!("virtio-gpud: failed to subscribe to scheme events: {err}") -+ })?; ++ .unwrap_or_else(|err| { ++ log::error!("virtio-gpud: failed to subscribe to scheme events: {err}"); ++ std::process::exit(1); ++ }); event_queue .subscribe( device.irq_handle.as_raw_fd() as usize, @@ -6541,22 +7702,23 @@ index b27f4c56..0f1a9e4d 100644 event::EventFlags::READ, ) - .unwrap(); -+ .map_err(|err| { -+ anyhow::anyhow!("virtio-gpud: failed to subscribe to interrupt events: {err}") -+ })?; ++ .unwrap_or_else(|err| { ++ log::error!("virtio-gpud: failed to subscribe to interrupt events: {err}"); ++ std::process::exit(1); ++ }); let all = [Source::Input, Source::Scheme, Source::Interrupt]; -- for event in all -- .into_iter() + for event in all + .into_iter() - .chain(event_queue.map(|e| e.expect("virtio-gpud: failed to get next event").user_data)) -- { -+ for event in all.into_iter().chain(event_queue.filter_map(|e| match e { -+ Ok(ev) => Some(ev.user_data), -+ Err(err) => { -+ log::error!("virtio-gpud: failed to get next event: {err}"); -+ None -+ } -+ })) { ++ .chain(event_queue.filter_map(|e| match e { ++ Ok(ev) => Some(ev.user_data), ++ Err(err) => { ++ log::error!("virtio-gpud: failed to get next event: {err}"); ++ None ++ } ++ })) + { match event { Source::Input => scheme.handle_vt_events(), Source::Scheme => { @@ -6569,7 +7731,7 @@ index b27f4c56..0f1a9e4d 100644 } Source::Interrupt => loop { let before_gen = device.transport.config_generation(); -@@ -591,7 +606,11 @@ fn deamon(deamon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: +@@ -591,7 +615,11 @@ fn deamon(deamon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: if events & VIRTIO_GPU_EVENT_DISPLAY != 0 { let (adapter, objects) = scheme.adapter_and_kms_objects_mut(); @@ -6583,65 +7745,59 @@ index b27f4c56..0f1a9e4d 100644 adapter.probe_connector(objects, connector_id); } diff --git a/drivers/graphics/virtio-gpud/src/scheme.rs b/drivers/graphics/virtio-gpud/src/scheme.rs -index 22a985ee..075502a2 100644 +index 22a985ee..c60facfd 100644 --- a/drivers/graphics/virtio-gpud/src/scheme.rs +++ b/drivers/graphics/virtio-gpud/src/scheme.rs -@@ -10,7 +10,7 @@ use drm_sys::{ - DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT, - }; +@@ -64,10 +64,23 @@ impl DrmBuffer for VirtGpuFramebuffer<'_> { --use syscall::{EINVAL, PAGE_SIZE}; -+use syscall::{EIO, EINVAL, PAGE_SIZE}; - - use virtio_core::spec::{Buffer, ChainBuilder, DescriptorFlags}; - use virtio_core::transport::{Error, Queue, Transport}; -@@ -65,9 +65,21 @@ impl DrmBuffer for VirtGpuFramebuffer<'_> { impl Drop for VirtGpuFramebuffer<'_> { fn drop(&mut self) { - futures::executor::block_on(async { +- futures::executor::block_on(async { - let request = Dma::new(ResourceUnref::new(self.id)).unwrap(); -+ let request = match Dma::new(ResourceUnref::new(self.id)) { -+ Ok(r) => r, -+ Err(err) => { -+ log::error!("virtio-gpud: failed to allocate DMA for resource unref: {err}"); -+ return; -+ } -+ }; ++ let request = match Dma::new(ResourceUnref::new(self.id)) { ++ Ok(r) => r, ++ Err(err) => { ++ log::error!("virtio-gpud: failed to allocate unref request DMA: {err}"); ++ return; ++ } ++ }; ++ ++ let header = match Dma::new(ControlHeader::default()) { ++ Ok(h) => h, ++ Err(err) => { ++ log::error!("virtio-gpud: failed to allocate unref header DMA: {err}"); ++ return; ++ } ++ }; - let header = Dma::new(ControlHeader::default()).unwrap(); -+ let header = match Dma::new(ControlHeader::default()) { -+ Ok(h) => h, -+ Err(err) => { -+ log::error!("virtio-gpud: failed to allocate DMA for unref header: {err}"); -+ return; -+ } -+ }; ++ futures::executor::block_on(async { let command = ChainBuilder::new() .chain(Buffer::new(&request)) .chain(Buffer::new(&header).flags(DescriptorFlags::WRITE_ONLY)) -@@ -182,7 +194,9 @@ impl VirtGpuAdapter<'_> { +@@ -182,7 +195,9 @@ impl VirtGpuAdapter<'_> { .build(); self.control_queue.send(command).await; - assert!(response.header.ty == CommandTy::RespOkDisplayInfo); + if response.header.ty != CommandTy::RespOkDisplayInfo { -+ return Err(Error::QueueSetup("unexpected response type for display info")); ++ return Err(Error::Probe("unexpected display info response type")); + } Ok(response) } -@@ -197,7 +211,9 @@ impl VirtGpuAdapter<'_> { +@@ -197,7 +212,9 @@ impl VirtGpuAdapter<'_> { .build(); self.control_queue.send(command).await; - assert!(response.header.ty == CommandTy::RespOkEdid); + if response.header.ty != CommandTy::RespOkEdid { -+ return Err(Error::QueueSetup("unexpected response type for EDID")); ++ return Err(Error::Probe("unexpected EDID response type")); + } Ok(response) } -@@ -212,7 +228,7 @@ impl VirtGpuAdapter<'_> { +@@ -212,7 +229,7 @@ impl VirtGpuAdapter<'_> { ) { //Transfering cursor resource to host futures::executor::block_on(async { @@ -6650,7 +7806,7 @@ index 22a985ee..075502a2 100644 cursor.id, GpuRect { x: 0, -@@ -221,14 +237,38 @@ impl VirtGpuAdapter<'_> { +@@ -221,14 +238,33 @@ impl VirtGpuAdapter<'_> { height: 64, }, 0, @@ -6661,24 +7817,19 @@ index 22a985ee..075502a2 100644 + )) { + Ok(r) => r, + Err(err) => { -+ log::error!( -+ "virtio-gpud: failed to allocate DMA for cursor transfer: {err}" -+ ); ++ log::error!("virtio-gpud: failed to allocate cursor transfer DMA: {err}"); + return; + } + }; + let header = match self.send_request_fenced(transfer_request).await { + Ok(h) => h, + Err(err) => { -+ log::error!("virtio-gpud: failed to send cursor transfer request: {err}"); ++ log::error!("virtio-gpud: failed to send cursor transfer: {err}"); + return; + } + }; + if header.ty != CommandTy::RespOkNodata { -+ log::error!( -+ "virtio-gpud: unexpected response for cursor transfer: {:?}", -+ header.ty -+ ); ++ log::error!("virtio-gpud: cursor transfer returned {:?}", header.ty); + } }); @@ -6687,14 +7838,14 @@ index 22a985ee..075502a2 100644 + let request = match Dma::new(UpdateCursor::update_cursor(x, y, hot_x, hot_y, cursor.id)) { + Ok(r) => r, + Err(err) => { -+ log::error!("virtio-gpud: failed to allocate DMA for cursor update: {err}"); ++ log::error!("virtio-gpud: failed to allocate cursor update DMA: {err}"); + return; + } + }; futures::executor::block_on(async { let command = ChainBuilder::new().chain(Buffer::new(&request)).build(); self.cursor_queue.send(command).await; -@@ -236,7 +276,13 @@ impl VirtGpuAdapter<'_> { +@@ -236,7 +272,13 @@ impl VirtGpuAdapter<'_> { } fn move_cursor(&mut self, x: i32, y: i32) { @@ -6702,38 +7853,35 @@ index 22a985ee..075502a2 100644 + let request = match Dma::new(MoveCursor::move_cursor(x, y)) { + Ok(r) => r, + Err(err) => { -+ log::error!("virtio-gpud: failed to allocate DMA for cursor move: {err}"); ++ log::error!("virtio-gpud: failed to allocate move cursor DMA: {err}"); + return; + } + }; futures::executor::block_on(async { let command = ChainBuilder::new().chain(Buffer::new(&request)).build(); -@@ -246,7 +292,10 @@ impl VirtGpuAdapter<'_> { +@@ -246,7 +288,7 @@ impl VirtGpuAdapter<'_> { fn disable_cursor(&mut self) { if self.hidden_cursor.is_none() { - let (width, height) = self.hw_cursor_size().unwrap(); -+ let Some((width, height)) = self.hw_cursor_size() else { -+ log::error!("virtio-gpud: failed to get hardware cursor size"); -+ return; -+ }; ++ let (width, height) = self.hw_cursor_size().unwrap_or((64, 64)); let (cursor, stride) = self.create_dumb_buffer(width, height); unsafe { core::ptr::write_bytes( -@@ -257,8 +306,9 @@ impl VirtGpuAdapter<'_> { +@@ -257,7 +299,10 @@ impl VirtGpuAdapter<'_> { } self.hidden_cursor = Some(Arc::new(cursor)); } - let hidden_cursor = self.hidden_cursor.as_ref().unwrap().clone(); -- -+ let Some(hidden_cursor) = self.hidden_cursor.clone() else { -+ return; -+ }; ++ let hidden_cursor = self.hidden_cursor.as_ref().unwrap_or_else(|| { ++ log::error!("virtio-gpud: hidden_cursor missing after initialization"); ++ std::process::exit(1); ++ }).clone(); + self.update_cursor(&hidden_cursor, 0, 0, 0, 0); } - } -@@ -280,7 +330,9 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { +@@ -280,7 +325,9 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { fn init(&mut self, objects: &mut KmsObjects) { futures::executor::block_on(async { @@ -6744,49 +7892,34 @@ index 22a985ee..075502a2 100644 }); for display_id in 0..self.config.num_scanouts.get() { -@@ -310,7 +362,19 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { +@@ -310,7 +357,13 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { fn probe_connector(&mut self, objects: &mut KmsObjects, id: KmsObjectId) { futures::executor::block_on(async { - let mut connector = objects.get_connector(id).unwrap().lock().unwrap(); + let mut connector = match objects.get_connector(id) { -+ Ok(c) => match c.lock() { -+ Ok(guard) => guard, -+ Err(err) => { -+ log::error!("virtio-gpud: failed to lock connector: {err}"); -+ return; -+ } -+ }, -+ Err(e) => { -+ log::error!("virtio-gpud: connector not found: {e}"); ++ Ok(c) => c.lock().unwrap(), ++ Err(err) => { ++ log::error!("virtio-gpud: connector {:?} not found: {}", id, err); + return; + } + }; let display = &self.displays[connector.driver_data.display_id as usize]; connector.connection = if display.enabled { -@@ -325,7 +389,19 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { +@@ -325,7 +378,10 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { drop(connector); let blob = objects.add_blob(display.edid.clone()); - objects.get_connector(id).unwrap().lock().unwrap().edid = blob; + match objects.get_connector(id) { -+ Ok(c) => match c.lock() { -+ Ok(mut guard) => guard.edid = blob, -+ Err(err) => { -+ log::error!( -+ "virtio-gpud: failed to lock connector for EDID update: {err}" -+ ); -+ } -+ }, -+ Err(e) => { -+ log::error!("virtio-gpud: connector not found for EDID update: {e}"); -+ } ++ Ok(c) => c.lock().unwrap().edid = blob, ++ Err(err) => log::error!("virtio-gpud: connector {:?} not found on second access: {}", id, err), + } } else { connector.update_from_size(display.width, display.height); } -@@ -336,7 +412,13 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { +@@ -336,7 +392,13 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { futures::executor::block_on(async { let bpp = 32; let fb_size = width as usize * height as usize * bpp / 8; @@ -6794,14 +7927,14 @@ index 22a985ee..075502a2 100644 + let sgl = match sgl::Sgl::new(fb_size) { + Ok(s) => s, + Err(err) => { -+ log::error!("virtio-gpud: failed to allocate SGL for dumb buffer: {err}"); ++ log::error!("virtio-gpud: failed to allocate SGL: {err}"); + std::process::exit(1); + } + }; unsafe { core::ptr::write_bytes(sgl.as_ptr() as *mut u8, 255, fb_size); -@@ -345,22 +427,61 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { +@@ -345,22 +407,43 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { let res_id = ResourceId::alloc(); // Create a host resource using `VIRTIO_GPU_CMD_RESOURCE_CREATE_2D`. @@ -6816,34 +7949,23 @@ index 22a985ee..075502a2 100644 + )) { + Ok(r) => r, + Err(err) => { -+ log::error!("virtio-gpud: failed to allocate DMA for resource create: {err}"); -+ return ( -+ VirtGpuFramebuffer { -+ queue: self.control_queue.clone(), -+ id: res_id, -+ sgl, -+ width, -+ height, -+ }, -+ width * 4, -+ ); ++ log::error!("virtio-gpud: failed to allocate create 2d DMA: {err}"); ++ std::process::exit(1); + } + }; - let header = self.send_request(request).await.unwrap(); - assert_eq!(header.ty, CommandTy::RespOkNodata); -+ match self.send_request(request).await { -+ Ok(header) => { -+ if header.ty != CommandTy::RespOkNodata { -+ log::error!( -+ "virtio-gpud: unexpected response for resource create: {:?}", -+ header.ty -+ ); -+ } -+ } ++ let header = match self.send_request(request).await { ++ Ok(h) => h, + Err(err) => { -+ log::error!("virtio-gpud: failed to send resource create request: {err}"); ++ log::error!("virtio-gpud: failed to send create 2d: {err}"); ++ std::process::exit(1); + } ++ }; ++ if header.ty != CommandTy::RespOkNodata { ++ log::error!("virtio-gpud: create 2d returned {:?}", header.ty); ++ std::process::exit(1); + } // Use the allocated framebuffer from the guest ram, and attach it as backing @@ -6851,117 +7973,54 @@ index 22a985ee..075502a2 100644 - let mut mem_entries = - unsafe { Dma::zeroed_slice(sgl.chunks().len()).unwrap().assume_init() }; -+ let mut mem_entries = match unsafe { Dma::zeroed_slice(sgl.chunks().len()) } { -+ Ok(entries) => unsafe { entries.assume_init() }, -+ Err(err) => { -+ log::error!("virtio-gpud: failed to allocate DMA for memory entries: {err}"); -+ return ( -+ VirtGpuFramebuffer { -+ queue: self.control_queue.clone(), -+ id: res_id, -+ sgl, -+ width, -+ height, -+ }, -+ width * 4, -+ ); ++ let mut mem_entries = unsafe { ++ match Dma::zeroed_slice(sgl.chunks().len()) { ++ Ok(dma) => dma.assume_init(), ++ Err(err) => { ++ log::error!("virtio-gpud: failed to allocate mem entries DMA: {err}"); ++ std::process::exit(1); ++ } + } + }; for (entry, chunk) in mem_entries.iter_mut().zip(sgl.chunks().iter()) { *entry = MemEntry { address: chunk.phys as u64, -@@ -369,9 +490,43 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { +@@ -369,9 +452,20 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { }; } - let attach_request = - Dma::new(AttachBacking::new(res_id, mem_entries.len() as u32)).unwrap(); - let header = Dma::new(ControlHeader::default()).unwrap(); -+ let attach_request = match Dma::new(AttachBacking::new( -+ res_id, -+ mem_entries.len() as u32, -+ )) { ++ let attach_request = match Dma::new(AttachBacking::new(res_id, mem_entries.len() as u32)) { + Ok(r) => r, + Err(err) => { -+ log::error!( -+ "virtio-gpud: failed to allocate DMA for attach request: {err}" -+ ); -+ return ( -+ VirtGpuFramebuffer { -+ queue: self.control_queue.clone(), -+ id: res_id, -+ sgl, -+ width, -+ height, -+ }, -+ width * 4, -+ ); ++ log::error!("virtio-gpud: failed to allocate attach backing DMA: {err}"); ++ std::process::exit(1); + } + }; + let header = match Dma::new(ControlHeader::default()) { + Ok(h) => h, + Err(err) => { -+ log::error!("virtio-gpud: failed to allocate DMA for attach header: {err}"); -+ return ( -+ VirtGpuFramebuffer { -+ queue: self.control_queue.clone(), -+ id: res_id, -+ sgl, -+ width, -+ height, -+ }, -+ width * 4, -+ ); ++ log::error!("virtio-gpud: failed to allocate attach header DMA: {err}"); ++ std::process::exit(1); + } + }; let command = ChainBuilder::new() .chain(Buffer::new(&attach_request)) .chain(Buffer::new_unsized(&mem_entries)) -@@ -379,7 +534,12 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { +@@ -379,7 +473,9 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { .build(); self.control_queue.send(command).await; - assert_eq!(header.ty, CommandTy::RespOkNodata); + if header.ty != CommandTy::RespOkNodata { -+ log::error!( -+ "virtio-gpud: unexpected response for attach backing: {:?}", -+ header.ty -+ ); ++ log::error!("virtio-gpud: attach backing returned {:?}", header.ty); + } ( VirtGpuFramebuffer { -@@ -410,7 +570,13 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { - damage: Damage, - ) -> syscall::Result<()> { - futures::executor::block_on(async { -- let mut crtc = crtc.lock().unwrap(); -+ let mut crtc = match crtc.lock() { -+ Ok(guard) => guard, -+ Err(err) => { -+ log::error!("virtio-gpud: crtc mutex poisoned: {err}"); -+ return Err(syscall::Error::new(EIO)); -+ } -+ }; - let framebuffer = state - .fb_id - .map(|fb_id| objects.get_framebuffer(fb_id)) -@@ -418,7 +584,13 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { - crtc.state = state; - - for connector in objects.connectors() { -- let connector = connector.lock().unwrap(); -+ let connector = match connector.lock() { -+ Ok(guard) => guard, -+ Err(err) => { -+ log::error!("virtio-gpud: connector mutex poisoned: {err}"); -+ continue; -+ } -+ }; - - if connector.state.crtc_id != objects.crtc_ids()[crtc.crtc_index as usize] { - continue; -@@ -427,19 +599,40 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { +@@ -427,19 +523,27 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { let display_id = connector.driver_data.display_id; let Some(framebuffer) = framebuffer else { @@ -6977,27 +8036,14 @@ index 22a985ee..075502a2 100644 + )) { + Ok(r) => r, + Err(err) => { -+ log::error!( -+ "virtio-gpud: failed to allocate DMA for scanout: {err}" -+ ); -+ return Err(syscall::Error::new(EIO)); ++ log::error!("virtio-gpud: failed to allocate scanout clear DMA: {err}"); ++ return Ok(()); + } + }; -+ let header = match self.send_request(scanout_request).await { -+ Ok(h) => h, -+ Err(err) => { -+ log::error!( -+ "virtio-gpud: failed to send scanout request: {err}" -+ ); -+ return Err(syscall::Error::new(EIO)); -+ } -+ }; -+ if header.ty != CommandTy::RespOkNodata { -+ log::error!( -+ "virtio-gpud: unexpected response for scanout: {:?}", -+ header.ty -+ ); -+ return Err(syscall::Error::new(EIO)); ++ match self.send_request(scanout_request).await { ++ Ok(header) if header.ty == CommandTy::RespOkNodata => {} ++ Ok(header) => log::error!("virtio-gpud: scanout clear returned {:?}", header.ty), ++ Err(err) => log::error!("virtio-gpud: failed to send scanout clear: {err}"), + } self.displays[display_id as usize].active_resource = None; return Ok(()); @@ -7008,7 +8054,7 @@ index 22a985ee..075502a2 100644 framebuffer.buffer.id, GpuRect { x: 0, -@@ -448,22 +641,61 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { +@@ -448,22 +552,38 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { height: framebuffer.height, }, 0, @@ -7019,29 +8065,18 @@ index 22a985ee..075502a2 100644 + )) { + Ok(r) => r, + Err(err) => { -+ log::error!("virtio-gpud: failed to allocate DMA for transfer: {err}"); -+ return Err(syscall::Error::new(EIO)); ++ log::error!("virtio-gpud: failed to allocate xfer DMA: {err}"); ++ return Ok(()); + } + }; -+ let header = match self.send_request(req).await { -+ Ok(h) => h, -+ Err(err) => { -+ log::error!("virtio-gpud: failed to send transfer request: {err}"); -+ return Err(syscall::Error::new(EIO)); -+ } -+ }; -+ if header.ty != CommandTy::RespOkNodata { -+ log::error!( -+ "virtio-gpud: unexpected response for transfer: {:?}", -+ header.ty -+ ); -+ return Err(syscall::Error::new(EIO)); ++ match self.send_request(req).await { ++ Ok(header) if header.ty == CommandTy::RespOkNodata => {} ++ Ok(header) => log::error!("virtio-gpud: xfer returned {:?}", header.ty), ++ Err(err) => log::error!("virtio-gpud: failed to send xfer: {err}"), + } // FIXME once we support resizing we also need to check that the current and target size match -- if self.displays[display_id as usize].active_resource != Some(framebuffer.buffer.id) -+ if self.displays[display_id as usize].active_resource -+ != Some(framebuffer.buffer.id) + if self.displays[display_id as usize].active_resource != Some(framebuffer.buffer.id) { - let scanout_request = Dma::new(SetScanout::new( + let scanout_request = match Dma::new(SetScanout::new( @@ -7055,32 +8090,19 @@ index 22a985ee..075502a2 100644 + )) { + Ok(r) => r, + Err(err) => { -+ log::error!( -+ "virtio-gpud: failed to allocate DMA for scanout: {err}" -+ ); -+ return Err(syscall::Error::new(EIO)); ++ log::error!("virtio-gpud: failed to allocate scanout DMA: {err}"); ++ return Ok(()); + } + }; -+ let header = match self.send_request(scanout_request).await { -+ Ok(h) => h, -+ Err(err) => { -+ log::error!( -+ "virtio-gpud: failed to send scanout request: {err}" -+ ); -+ return Err(syscall::Error::new(EIO)); -+ } -+ }; -+ if header.ty != CommandTy::RespOkNodata { -+ log::error!( -+ "virtio-gpud: unexpected response for scanout: {:?}", -+ header.ty -+ ); -+ return Err(syscall::Error::new(EIO)); ++ match self.send_request(scanout_request).await { ++ Ok(header) if header.ty == CommandTy::RespOkNodata => {} ++ Ok(header) => log::error!("virtio-gpud: scanout returned {:?}", header.ty), ++ Err(err) => log::error!("virtio-gpud: failed to send scanout: {err}"), + } self.displays[display_id as usize].active_resource = Some(framebuffer.buffer.id); } -@@ -472,8 +704,27 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { +@@ -472,8 +592,18 @@ impl<'a> GraphicsAdapter for VirtGpuAdapter<'a> { framebuffer.buffer.id, damage.clip(framebuffer.width, framebuffer.height).into(), ); @@ -7089,27 +8111,30 @@ index 22a985ee..075502a2 100644 + let flush_dma = match Dma::new(flush) { + Ok(d) => d, + Err(err) => { -+ log::error!("virtio-gpud: failed to allocate DMA for flush: {err}"); -+ return Err(syscall::Error::new(EIO)); ++ log::error!("virtio-gpud: failed to allocate flush DMA: {err}"); ++ return Ok(()); + } + }; -+ let header = match self.send_request(flush_dma).await { -+ Ok(h) => h, -+ Err(err) => { -+ log::error!("virtio-gpud: failed to send flush request: {err}"); -+ return Err(syscall::Error::new(EIO)); -+ } -+ }; -+ if header.ty != CommandTy::RespOkNodata { -+ log::error!( -+ "virtio-gpud: unexpected response for flush: {:?}", -+ header.ty -+ ); -+ return Err(syscall::Error::new(EIO)); ++ match self.send_request(flush_dma).await { ++ Ok(header) if header.ty == CommandTy::RespOkNodata => {} ++ Ok(header) => log::error!("virtio-gpud: flush returned {:?}", header.ty), ++ Err(err) => log::error!("virtio-gpud: failed to send flush: {err}"), + } } Ok(()) +diff --git a/drivers/hwd/Cargo.toml b/drivers/hwd/Cargo.toml +index 3d37cfb3..40b51a1b 100644 +--- a/drivers/hwd/Cargo.toml ++++ b/drivers/hwd/Cargo.toml +@@ -6,6 +6,7 @@ edition = "2018" + + [dependencies] + fdt.workspace = true ++libc.workspace = true + log.workspace = true + ron.workspace = true + libredox = { workspace = true, default-features = false, features = ["std", "call"] } diff --git a/drivers/hwd/src/backend/acpi.rs b/drivers/hwd/src/backend/acpi.rs index 3da41d63..12d26261 100644 --- a/drivers/hwd/src/backend/acpi.rs @@ -7265,20 +8290,592 @@ index 3da41d63..12d26261 100644 + is_elan_touchpad_id(id) || is_cypress_touchpad_id(id) || is_synaptics_rmi_id(id) +} diff --git a/drivers/hwd/src/main.rs b/drivers/hwd/src/main.rs -index 79360e34..4a2b9469 100644 +index 79360e34..a0462f51 100644 --- a/drivers/hwd/src/main.rs +++ b/drivers/hwd/src/main.rs -@@ -38,7 +38,7 @@ fn daemon(daemon: daemon::Daemon) -> ! { +@@ -1,3 +1,5 @@ ++use std::os::fd::AsRawFd; ++use std::os::unix::process::CommandExt; + use std::process; + + mod backend; +@@ -37,8 +39,34 @@ fn daemon(daemon: daemon::Daemon) -> ! { + //TODO: launch pcid based on backend information? // Must launch after acpid but before probe calls /scheme/acpi/symbols - #[allow(deprecated, reason = "we can't yet move this to init")] +- #[allow(deprecated, reason = "we can't yet move this to init")] - daemon::Daemon::spawn(process::Command::new("pcid")); -+ let _ = daemon::Daemon::spawn(process::Command::new("pcid")); ++ // Fire-and-forget: daemon::Daemon::spawn blocks until pcid signals readiness, ++ // but pcid only signals after full PCI enumeration. If enumeration hangs on ++ // real hardware (unresponsive device, complex AML), hwd deadlocks initfs. ++ { ++ match std::io::pipe() { ++ Ok((_read_end, write_end)) => { ++ let write_fd: std::os::fd::OwnedFd = write_end.into(); ++ let raw_fd = write_fd.as_raw_fd(); ++ let mut cmd = std::process::Command::new("pcid"); ++ cmd.env("INIT_NOTIFY", raw_fd.to_string()); ++ unsafe { ++ cmd.pre_exec(move || { ++ if libc::fcntl(raw_fd, libc::F_SETFD, 0) == -1 { ++ return Err(std::io::Error::last_os_error()); ++ } ++ Ok(()) ++ }); ++ } ++ match cmd.spawn() { ++ Ok(_) => {} ++ Err(err) => log::error!("hwd: failed to spawn pcid: {}", err), ++ } ++ } ++ Err(err) => { ++ log::error!("hwd: failed to create pcid notification pipe: {}", err); ++ } ++ } ++ } daemon.ready(); +diff --git a/drivers/input/ps2d/src/controller.rs b/drivers/input/ps2d/src/controller.rs +index d7af4cba..638b7cc1 100644 +--- a/drivers/input/ps2d/src/controller.rs ++++ b/drivers/input/ps2d/src/controller.rs +@@ -97,6 +97,14 @@ enum KeyboardCommandData { + const DEFAULT_TIMEOUT: u64 = 50_000; + // Reset timeout in microseconds + const RESET_TIMEOUT: u64 = 1_000_000; ++// Maximum bytes to drain during flush (Linux: I8042_BUFFER_SIZE) ++const FLUSH_LIMIT: usize = 4096; ++// Controller self-test pass value (Linux: I8042_RET_CTL_TEST) ++const SELFTEST_PASS: u8 = 0x55; ++// Controller self-test retries (Linux: 5 attempts) ++const SELFTEST_RETRIES: usize = 5; ++// AUX port test pass value (Linux returns 0x00 on success) ++const AUX_TEST_PASS: u8 = 0x00; + + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + pub struct Ps2 { +@@ -271,6 +279,50 @@ impl Ps2 { + } + } + ++ /// Drain all pending bytes from the controller output buffer. ++ /// Borrowed from Linux i8042_flush(): stale firmware/BIOS bytes can be ++ /// misinterpreted as device responses during initialization. ++ fn flush(&mut self) -> usize { ++ let mut count = 0; ++ while self.status().contains(StatusFlags::OUTPUT_FULL) { ++ if count >= FLUSH_LIMIT { ++ warn!("flush: exceeded limit, controller may be stuck"); ++ break; ++ } ++ let data = self.data.read(); ++ trace!("flush: discarded {:02X}", data); ++ count += 1; ++ } ++ if count > 0 { ++ debug!("flushed {} stale bytes from controller", count); ++ } ++ count ++ } ++ ++ /// Test the AUX (mouse) port via controller command 0xA9. ++ /// Borrowed from Linux: verifies electrical connectivity before ++ /// attempting to talk to the mouse. Returns true if the port passed. ++ fn test_aux_port(&mut self) -> bool { ++ if let Err(err) = self.command(Command::TestSecond) { ++ warn!("aux port test command failed: {:?}", err); ++ return false; ++ } ++ match self.read() { ++ Ok(AUX_TEST_PASS) => { ++ debug!("aux port test passed"); ++ true ++ } ++ Ok(val) => { ++ warn!("aux port test failed: {:02X}", val); ++ false ++ } ++ Err(err) => { ++ warn!("aux port test read timeout: {:?}", err); ++ false ++ } ++ } ++ } ++ + pub fn init_keyboard(&mut self) -> Result<(), Error> { + let mut b; + +@@ -308,66 +360,125 @@ impl Ps2 { + } + + pub fn init(&mut self) -> Result<(), Error> { ++ // Linux i8042_controller_check(): verify controller is present by ++ // flushing any stale data. A stuck output buffer means no controller. ++ self.flush(); ++ ++ // Bare-metal controllers may be slow after firmware handoff. ++ // Give the controller a moment to finish POST before sending commands. ++ std::thread::sleep(std::time::Duration::from_millis(50)); ++ + { +- // Disable devices +- self.command(Command::DisableFirst)?; +- self.command(Command::DisableSecond)?; ++ // Disable both ports first — use retry because the controller ++ // may still be settling or temporarily unresponsive. ++ // Failure here is non-fatal: we continue and attempt the rest ++ // of initialization. A truly absent controller will fail later ++ // at self-test or keyboard reset. ++ if let Err(err) = self.retry( ++ format_args!("disable first port"), ++ 3, ++ |x| x.command(Command::DisableFirst), ++ ) { ++ warn!("disable first port failed: {:?}", err); ++ } ++ if let Err(err) = self.retry( ++ format_args!("disable second port"), ++ 3, ++ |x| x.command(Command::DisableSecond), ++ ) { ++ warn!("disable second port failed: {:?}", err); ++ } + } + +- // Disable clocks, disable interrupts, and disable translate ++ // Flush again after disabling — firmware may have queued more bytes ++ self.flush(); ++ ++ // Linux i8042_controller_init() step 1: write a known-safe config ++ // (interrupts off, both ports disabled) so stale config can't cause ++ // spurious interrupts during the rest of init. + { +- // Since the default config may have interrupts enabled, and the kernel may eat up +- // our data in that case, we will write a config without reading the current one + let config = ConfigFlags::POST_PASSED + | ConfigFlags::FIRST_DISABLED + | ConfigFlags::SECOND_DISABLED; + self.set_config(config)?; + } + +- // The keyboard seems to still collect bytes even when we disable +- // the port, so we must disable the keyboard too ++ // Linux i8042_controller_selftest(): retry up to 5 times with delay. ++ // "On some really fragile systems this does not take the first time." ++ { ++ let mut passed = false; ++ for attempt in 0..SELFTEST_RETRIES { ++ if let Err(err) = self.command(Command::TestController) { ++ warn!("self-test command failed (attempt {}): {:?}", attempt + 1, err); ++ continue; ++ } ++ match self.read() { ++ Ok(SELFTEST_PASS) => { ++ passed = true; ++ break; ++ } ++ Ok(val) => { ++ warn!( ++ "self-test unexpected value {:02X} (attempt {}/{})", ++ val, ++ attempt + 1, ++ SELFTEST_RETRIES ++ ); ++ } ++ Err(err) => { ++ warn!("self-test read timeout (attempt {}): {:?}", attempt + 1, err); ++ } ++ } ++ // Linux: msleep(50) between retries ++ std::thread::sleep(std::time::Duration::from_millis(50)); ++ } ++ if !passed { ++ // Linux on x86: "giving up on controller selftest, continuing anyway" ++ warn!("controller self-test did not pass after {} attempts, continuing", SELFTEST_RETRIES); ++ } ++ } ++ ++ // Flush any bytes the self-test may have left behind ++ self.flush(); ++ ++ // Linux i8042_controller_init() step 2: set keyboard defaults ++ // (disable scanning so keyboard doesn't send scancodes during init) + self.retry(format_args!("keyboard defaults"), 4, |x| { +- // Set defaults and disable scanning + let b = x.keyboard_command(KeyboardCommand::SetDefaultsDisable)?; + if b != 0xFA { + error!("keyboard failed to set defaults: {:02X}", b); + return Err(Error::CommandRetry); + } +- + Ok(b) + })?; + +- { +- // Perform the self test +- self.command(Command::TestController)?; +- let r = self.read()?; +- if r != 0x55 { +- warn!("self test unexpected value: {:02X}", r); +- } +- } +- + // Initialize keyboard + if let Err(err) = self.init_keyboard() { + error!("failed to initialize keyboard: {:?}", err); + return Err(err); + } + +- // Enable second device +- let enable_mouse = match self.command(Command::EnableSecond) { +- Ok(()) => true, +- Err(err) => { +- error!("failed to initialize mouse: {:?}", err); +- false ++ // Linux: test AUX port (command 0xA9) before enabling. ++ // Skips mouse init entirely if the port is not electrically present. ++ let aux_ok = self.test_aux_port(); ++ ++ // Enable second device (mouse) only if AUX port tested OK ++ let enable_mouse = if aux_ok { ++ match self.command(Command::EnableSecond) { ++ Ok(()) => true, ++ Err(err) => { ++ warn!("failed to enable aux port after test passed: {:?}", err); ++ false ++ } + } ++ } else { ++ info!("skipping mouse init: aux port test did not pass"); ++ false + }; + + { +- // Enable keyboard data reporting +- // Use inner function to prevent retries +- // Response is ignored since scanning is now on + if let Err(err) = self.keyboard_command_inner(KeyboardCommand::EnableReporting as u8) { + error!("failed to initialize keyboard reporting: {:?}", err); +- //TODO: fix by using interrupts? + } + } + +diff --git a/drivers/input/ps2d/src/main.rs b/drivers/input/ps2d/src/main.rs +index db17de2a..1ae055e4 100644 +--- a/drivers/input/ps2d/src/main.rs ++++ b/drivers/input/ps2d/src/main.rs +@@ -11,7 +11,7 @@ use std::process; + + use common::acquire_port_io_rights; + use event::{user_data, EventQueue}; +-use inputd::ProducerHandle; ++use inputd::InputProducer; + + use crate::state::Ps2d; + +@@ -31,7 +31,10 @@ fn daemon(daemon: daemon::Daemon) -> ! { + + acquire_port_io_rights().expect("ps2d: failed to get I/O permission"); + +- let input = ProducerHandle::new().expect("ps2d: failed to open input producer"); ++ let keyboard_input = InputProducer::new_named_or_fallback("ps2-keyboard") ++ .expect("ps2d: failed to open input producer"); ++ let mouse_input = InputProducer::new_named_or_fallback("ps2-mouse") ++ .expect("ps2d: failed to open input producer"); + + user_data! { + enum Source { +@@ -93,7 +96,7 @@ fn daemon(daemon: daemon::Daemon) -> ! { + + daemon.ready(); + +- let mut ps2d = Ps2d::new(input, time_file); ++ let mut ps2d = Ps2d::new(keyboard_input, mouse_input, time_file); + + let mut data = [0; 256]; + for event in event_queue.map(|e| e.expect("ps2d: failed to get next event").user_data) { +diff --git a/drivers/input/ps2d/src/mouse.rs b/drivers/input/ps2d/src/mouse.rs +index 9e95ab88..8087c8c4 100644 +--- a/drivers/input/ps2d/src/mouse.rs ++++ b/drivers/input/ps2d/src/mouse.rs +@@ -5,6 +5,11 @@ pub const RESET_RETRIES: usize = 10; + pub const RESET_TIMEOUT: Duration = Duration::from_millis(1000); + pub const COMMAND_TIMEOUT: Duration = Duration::from_millis(100); + ++const CMD_ACK: u8 = 0xFA; ++const CMD_RESEND: u8 = 0xFE; ++const BAT_COMPLETE: u8 = 0xAA; ++const BAT_FAIL: u8 = 0xFC; ++ + #[derive(Clone, Copy, Debug)] + #[repr(u8)] + #[allow(dead_code)] +@@ -58,9 +63,11 @@ impl MouseTx { + + fn handle(&mut self, data: u8, ps2: &mut Ps2) -> Result { + if self.write_i < self.write.len() { +- if data == 0xFA { ++ if data == CMD_ACK { + self.write_i += 1; + self.try_write(ps2)?; ++ } else if data == CMD_RESEND { ++ self.try_write(ps2)?; + } else { + log::error!("unknown mouse response {:02X}", data); + return Err(()); +@@ -251,25 +258,43 @@ impl MouseState { + MouseResult::None + } + MouseState::Reset => { +- if data == 0xFA { +- log::debug!("mouse reset ok"); ++ if data == CMD_ACK { ++ log::debug!("mouse reset ack"); + MouseResult::Timeout(RESET_TIMEOUT) +- } else if data == 0xAA { ++ } else if data == BAT_COMPLETE { + log::debug!("BAT completed"); + *self = MouseState::Bat; + MouseResult::Timeout(COMMAND_TIMEOUT) ++ } else if data == CMD_RESEND { ++ // Device asks us to resend the reset command (0xFF). ++ // Resend WITHOUT incrementing the retry counter — 0xFE is ++ // a normal protocol response, not a failure. ++ log::debug!("mouse requests resend during reset, resending 0xFF"); ++ match ps2.mouse_command_async(MouseCommand::Reset as u8) { ++ Ok(()) => MouseResult::Timeout(RESET_TIMEOUT), ++ Err(err) => { ++ log::error!("failed to resend mouse reset: {:?}", err); ++ self.reset(ps2) ++ } ++ } ++ } else if data == BAT_FAIL { ++ log::warn!("mouse BAT failed (0xFC)"); ++ self.reset(ps2) + } else { + log::warn!("unknown mouse response {:02X} after reset", data); + self.reset(ps2) + } + } + MouseState::Bat => { +- if data == MouseId::Base as u8 { +- // Enable intellimouse features ++ if data == CMD_RESEND { ++ // 0xFE after BAT is unusual — the device may be re-issuing ++ // BAT. Wait for the next byte (device ID or another BAT). ++ log::debug!("mouse resend (0xFE) during BAT, waiting"); ++ MouseResult::Timeout(COMMAND_TIMEOUT) ++ } else if data == MouseId::Base as u8 { + log::debug!("BAT mouse id {:02X} (base)", data); + self.identify_touchpad(ps2) + } else if data == MouseId::Intellimouse1 as u8 { +- // Extra packet already enabled + log::debug!("BAT mouse id {:02X} (intellimouse)", data); + self.enable_reporting(data, ps2) + } else { +@@ -320,10 +345,17 @@ impl MouseState { + } + } + MouseState::DeviceId => { +- if data == 0xFA { +- // Command OK response +- //TODO: handle this separately? ++ if data == CMD_ACK { + MouseResult::Timeout(COMMAND_TIMEOUT) ++ } else if data == CMD_RESEND { ++ log::debug!("mouse resend during DeviceId, resending GetDeviceId"); ++ match ps2.mouse_command_async(MouseCommand::GetDeviceId as u8) { ++ Ok(()) => MouseResult::Timeout(COMMAND_TIMEOUT), ++ Err(err) => { ++ log::error!("failed to resend GetDeviceId: {:?}", err); ++ self.reset(ps2) ++ } ++ } + } else if data == MouseId::Base as u8 || data == MouseId::Intellimouse1 as u8 { + log::debug!("mouse id {:02X}", data); + self.enable_reporting(data, ps2) +@@ -333,10 +365,28 @@ impl MouseState { + } + } + MouseState::EnableReporting { id } => { +- log::debug!("mouse id {:02X} enable reporting {:02X}", id, data); +- //TODO: handle response ok/error +- *self = MouseState::Streaming { id }; +- MouseResult::None ++ if data == CMD_ACK { ++ log::debug!("mouse id {:02X} reporting enabled", id); ++ *self = MouseState::Streaming { id }; ++ MouseResult::None ++ } else if data == CMD_RESEND { ++ log::debug!("mouse resend during EnableReporting, resending 0xF4"); ++ match ps2.mouse_command_async(MouseCommand::EnableReporting as u8) { ++ Ok(()) => MouseResult::Timeout(COMMAND_TIMEOUT), ++ Err(err) => { ++ log::error!("failed to resend EnableReporting: {:?}", err); ++ *self = MouseState::Streaming { id }; ++ MouseResult::None ++ } ++ } ++ } else { ++ log::warn!( ++ "unexpected mouse response {:02X} during enable reporting, streaming anyway", ++ data ++ ); ++ *self = MouseState::Streaming { id }; ++ MouseResult::None ++ } + } + MouseState::Streaming { id } => { + MouseResult::Packet(data, id == MouseId::Intellimouse1 as u8) +diff --git a/drivers/input/ps2d/src/state.rs b/drivers/input/ps2d/src/state.rs +index 9018dc6b..da304e05 100644 +--- a/drivers/input/ps2d/src/state.rs ++++ b/drivers/input/ps2d/src/state.rs +@@ -1,4 +1,4 @@ +-use inputd::ProducerHandle; ++use inputd::InputProducer; + use log::{error, warn}; + use orbclient::{ButtonEvent, KeyEvent, MouseEvent, MouseRelativeEvent, ScrollEvent}; + use std::{ +@@ -44,7 +44,8 @@ pub struct Ps2d { + ps2: Ps2, + vmmouse: bool, + vmmouse_relative: bool, +- input: ProducerHandle, ++ keyboard_input: InputProducer, ++ mouse_input: InputProducer, + time_file: File, + extended: bool, + mouse_x: i32, +@@ -59,9 +60,11 @@ pub struct Ps2d { + } + + impl Ps2d { +- pub fn new(input: ProducerHandle, time_file: File) -> Self { ++ pub fn new(keyboard_input: InputProducer, mouse_input: InputProducer, time_file: File) -> Self { + let mut ps2 = Ps2::new(); +- ps2.init().expect("failed to initialize"); ++ if let Err(err) = ps2.init() { ++ log::error!("ps2d: controller init failed: {:?}", err); ++ } + + // FIXME add an option for orbital to disable this when an app captures the mouse. + let vmmouse_relative = false; +@@ -77,7 +80,8 @@ impl Ps2d { + ps2, + vmmouse, + vmmouse_relative, +- input, ++ keyboard_input, ++ mouse_input, + time_file, + extended: false, + mouse_x: 0, +@@ -273,7 +277,7 @@ impl Ps2d { + }; + + if scancode != 0 { +- self.input ++ self.keyboard_input + .write_event( + KeyEvent { + character: '\0', +@@ -304,7 +308,7 @@ impl Ps2d { + + if self.vmmouse_relative { + if dx != 0 || dy != 0 { +- self.input ++ self.mouse_input + .write_event( + MouseRelativeEvent { + dx: dx as i32, +@@ -320,14 +324,14 @@ impl Ps2d { + if x != self.mouse_x || y != self.mouse_y { + self.mouse_x = x; + self.mouse_y = y; +- self.input ++ self.mouse_input + .write_event(MouseEvent { x, y }.to_event()) + .expect("ps2d: failed to write mouse event"); + } + }; + + if dz != 0 { +- self.input ++ self.mouse_input + .write_event( + ScrollEvent { + x: 0, +@@ -348,7 +352,7 @@ impl Ps2d { + self.mouse_left = left; + self.mouse_middle = middle; + self.mouse_right = right; +- self.input ++ self.mouse_input + .write_event( + ButtonEvent { + left, +@@ -441,13 +445,13 @@ impl Ps2d { + } + + if dx != 0 || dy != 0 { +- self.input ++ self.mouse_input + .write_event(MouseRelativeEvent { dx, dy }.to_event()) + .expect("ps2d: failed to write mouse event"); + } + + if dz != 0 { +- self.input ++ self.mouse_input + .write_event(ScrollEvent { x: 0, y: dz }.to_event()) + .expect("ps2d: failed to write scroll event"); + } +@@ -462,7 +466,7 @@ impl Ps2d { + self.mouse_left = left; + self.mouse_middle = middle; + self.mouse_right = right; +- self.input ++ self.mouse_input + .write_event( + ButtonEvent { + left, +diff --git a/drivers/input/usbhidd/src/main.rs b/drivers/input/usbhidd/src/main.rs +index 15c5b778..706c4008 100644 +--- a/drivers/input/usbhidd/src/main.rs ++++ b/drivers/input/usbhidd/src/main.rs +@@ -1,7 +1,7 @@ + use anyhow::{Context, Result}; + use std::{env, thread, time}; + +-use inputd::ProducerHandle; ++use inputd::InputProducer; + use orbclient::KeyEvent as OrbKeyEvent; + use rehid::{ + report_desc::{ReportTy, REPORT_DESC_TY}, +@@ -15,7 +15,7 @@ use xhcid_interface::{ + + mod reqs; + +-fn send_key_event(display: &mut ProducerHandle, usage_page: u16, usage: u16, pressed: bool) { ++fn send_key_event(display: &mut InputProducer, usage_page: u16, usage: u16, pressed: bool) { + let scancode = match usage_page { + 0x07 => match usage { + 0x04 => orbclient::K_A, +@@ -272,7 +272,9 @@ fn main() -> Result<()> { + let report_ty = ReportTy::Input; + let report_id = 0; + +- let mut display = ProducerHandle::new().context("Failed to open input socket")?; ++ let producer_name = format!("usb-{}-if{}", port, interface_num); ++ let mut display = InputProducer::new_named_or_fallback(&producer_name) ++ .context("Failed to open input socket")?; + let mut endpoint_opt = match endp_desc_opt { + Some((endp_num, _endp_desc)) => match handle.open_endpoint(endp_num as u8) { + Ok(ok) => Some(ok), diff --git a/drivers/inputd/src/lib.rs b/drivers/inputd/src/lib.rs -index b68e8211..0627f301 100644 +index b68e8211..f07a411d 100644 --- a/drivers/inputd/src/lib.rs +++ b/drivers/inputd/src/lib.rs @@ -64,25 +64,53 @@ impl ConsumerHandle { @@ -7365,11 +8962,452 @@ index b68e8211..0627f301 100644 Ok(Some(event)) } } +@@ -171,13 +203,11 @@ impl ControlHandle { + Ok(Self(File::open(path)?)) + } + +- /// Sent to Handle::Display + pub fn activate_vt(&mut self, vt: usize) -> io::Result { + let cmd = ControlEvent::from(VtActivate { vt }); + self.0.write(unsafe { any_as_u8_slice(&cmd) }) + } + +- /// Sent to Handle::Producer + pub fn activate_keymap(&mut self, keymap: usize) -> io::Result { + let cmd = ControlEvent::from(KeymapActivate { keymap }); + self.0.write(unsafe { any_as_u8_slice(&cmd) }) +@@ -209,3 +239,195 @@ impl ProducerHandle { + Ok(()) + } + } ++ ++pub struct NamedProducerHandle(File); ++ ++impl NamedProducerHandle { ++ pub fn new(name: &str) -> io::Result { ++ let path = format!("/scheme/input/producer/{name}"); ++ Ok(Self(File::open(path)?)) ++ } ++ ++ pub fn write_event(&mut self, event: orbclient::Event) -> io::Result<()> { ++ self.0.write(&event)?; ++ Ok(()) ++ } ++} ++ ++/// Convenience wrapper that tries a named producer first, ++/// falling back to the legacy anonymous producer on failure. ++pub enum InputProducer { ++ Named(NamedProducerHandle), ++ Legacy(ProducerHandle), ++} ++ ++impl InputProducer { ++ /// Open a named producer (`/scheme/input/producer/{name}`). ++ /// If the named path is unavailable, fall back to the legacy ++ /// `/scheme/input/producer` path so the driver keeps working on ++ /// older inputd builds or degraded schemes. ++ pub fn new_named_or_fallback(name: &str) -> io::Result { ++ match NamedProducerHandle::new(name) { ++ Ok(named) => Ok(InputProducer::Named(named)), ++ Err(named_err) => { ++ log::debug!( ++ "inputd: named producer '{}' unavailable ({}), falling back to legacy", ++ name, ++ named_err ++ ); ++ ProducerHandle::new().map(InputProducer::Legacy) ++ } ++ } ++ } ++ ++ /// Open the legacy anonymous producer directly. ++ pub fn new_legacy() -> io::Result { ++ ProducerHandle::new().map(InputProducer::Legacy) ++ } ++ ++ pub fn write_event(&mut self, event: orbclient::Event) -> io::Result<()> { ++ match self { ++ InputProducer::Named(h) => h.write_event(event), ++ InputProducer::Legacy(h) => h.write_event(event), ++ } ++ } ++} ++ ++pub struct DeviceConsumerHandle(File); ++ ++pub enum DeviceConsumerHandleEvent<'a> { ++ Events(&'a [Event]), ++} ++ ++impl DeviceConsumerHandle { ++ pub fn new(device_name: &str) -> io::Result { ++ let path = format!("/scheme/input/{device_name}"); ++ Ok(Self(File::open(path)?)) ++ } ++ ++ pub fn event_handle(&self) -> BorrowedFd<'_> { ++ self.0.as_fd() ++ } ++ ++ pub fn read_events<'a>( ++ &self, ++ events: &'a mut [Event], ++ ) -> io::Result> { ++ match read_to_slice(self.0.as_fd(), events) { ++ Ok(count) => Ok(DeviceConsumerHandleEvent::Events(&events[..count])), ++ Err(err) => Err(err.into()), ++ } ++ } ++} ++ ++#[derive(Debug, Clone)] ++#[repr(C)] ++pub struct HotplugEventHeader { ++ pub kind: u32, ++ pub device_id: u32, ++ pub name_len: u32, ++ pub reserved: u32, ++} ++ ++#[derive(Debug, Clone)] ++pub struct HotplugEvent { ++ pub kind: u32, ++ pub device_id: u32, ++ pub name: String, ++} ++ ++pub struct HotplugHandle { ++ file: File, ++ partial: Vec, ++} ++ ++impl HotplugHandle { ++ pub fn new() -> io::Result { ++ let file = File::open("/scheme/input/events")?; ++ Ok(Self { ++ file, ++ partial: Vec::new(), ++ }) ++ } ++ ++ pub fn event_handle(&self) -> BorrowedFd<'_> { ++ self.file.as_fd() ++ } ++ ++ pub fn read_event(&mut self) -> io::Result> { ++ let mut tmp = [0u8; 256]; ++ match self.file.read(&mut tmp) { ++ Ok(0) => {} ++ Ok(n) => self.partial.extend_from_slice(&tmp[..n]), ++ Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} ++ Err(e) => return Err(e), ++ } ++ ++ if self.partial.len() < 16 { ++ return Ok(None); ++ } ++ ++ let header = HotplugEventHeader { ++ kind: u32::from_ne_bytes(self.partial[0..4].try_into().map_err(|_| { ++ io::Error::new(io::ErrorKind::InvalidData, "header parse failed") ++ })?), ++ device_id: u32::from_ne_bytes(self.partial[4..8].try_into().map_err(|_| { ++ io::Error::new(io::ErrorKind::InvalidData, "header parse failed") ++ })?), ++ name_len: u32::from_ne_bytes(self.partial[8..12].try_into().map_err(|_| { ++ io::Error::new(io::ErrorKind::InvalidData, "header parse failed") ++ })?), ++ reserved: 0, ++ }; ++ ++ let total_len = 16 + header.name_len as usize; ++ if self.partial.len() < total_len { ++ return Ok(None); ++ } ++ ++ let name = String::from_utf8(self.partial[16..total_len].to_vec()).map_err(|e| { ++ io::Error::new(io::ErrorKind::InvalidData, format!("invalid UTF-8: {e}")) ++ })?; ++ ++ self.partial.drain(..total_len); ++ ++ Ok(Some(HotplugEvent { ++ kind: header.kind, ++ device_id: header.device_id, ++ name, ++ })) ++ } ++} ++ ++pub const RESERVED_DEVICE_NAMES: &[&str] = &[ ++ "producer", ++ "consumer", ++ "consumer_bootlog", ++ "events", ++ "handle", ++ "handle_early", ++ "control", ++]; ++ ++pub struct InputDeviceLister; ++ ++impl InputDeviceLister { ++ pub fn list() -> io::Result> { ++ let mut dir = std::fs::read_dir("/scheme/input/")?; ++ let mut devices = Vec::new(); ++ loop { ++ match dir.next() { ++ Some(Ok(entry)) => { ++ if let Some(name) = entry.file_name().to_str() { ++ if !RESERVED_DEVICE_NAMES.contains(&name) { ++ devices.push(name.to_owned()); ++ } ++ } ++ } ++ Some(Err(e)) => return Err(e), ++ None => break, ++ } ++ } ++ Ok(devices) ++ } ++} diff --git a/drivers/inputd/src/main.rs b/drivers/inputd/src/main.rs -index 07aa943e..61641b9f 100644 +index 07aa943e..89018568 100644 --- a/drivers/inputd/src/main.rs +++ b/drivers/inputd/src/main.rs -@@ -274,7 +274,7 @@ impl SchemeSync for InputScheme { +@@ -13,7 +13,7 @@ + + use core::mem::size_of; + use std::borrow::Cow; +-use std::collections::BTreeSet; ++use std::collections::{BTreeMap, BTreeSet}; + use std::mem::transmute; + use std::ops::ControlFlow; + use std::sync::atomic::{AtomicUsize, Ordering}; +@@ -26,8 +26,9 @@ use redox_scheme::{CallerCtx, OpenResult, Response, SignalBehavior, Socket}; + + use orbclient::{Event, EventOption}; + use scheme_utils::{Blocking, FpathWriter, HandleMap}; ++use syscall::dirent::{DirEntry, DirentBuf, DirentKind}; + use syscall::schemev2::NewFdFlags; +-use syscall::{Error as SysError, EventFlags, EACCES, EBADF, EEXIST, EINVAL}; ++use syscall::{Error as SysError, EventFlags, EACCES, EBADF, EEXIST, EINVAL, ENOENT, ENOTDIR}; + + pub mod keymap; + +@@ -35,8 +36,57 @@ use keymap::KeymapKind; + + use crate::keymap::KeymapData; + ++const DEVICE_ADD: u32 = 1; ++const DEVICE_REMOVE: u32 = 2; ++ ++fn validate_producer_name(name: &str) -> Result<(), SysError> { ++ if name.is_empty() || name.contains('/') { ++ return Err(SysError::new(EINVAL)); ++ } ++ if inputd::RESERVED_DEVICE_NAMES.contains(&name) { ++ return Err(SysError::new(EINVAL)); ++ } ++ Ok(()) ++} ++ ++fn serialize_hotplug(kind: u32, device_id: u32, name: &str) -> Vec { ++ let name_bytes = name.as_bytes(); ++ let header = HotplugHeader { ++ kind, ++ device_id, ++ name_len: name_bytes.len() as u32, ++ _reserved: 0, ++ }; ++ let mut out = Vec::with_capacity(16 + name_bytes.len()); ++ out.extend_from_slice(&header.to_bytes()); ++ out.extend_from_slice(name_bytes); ++ out ++} ++ ++#[repr(C)] ++struct HotplugHeader { ++ kind: u32, ++ device_id: u32, ++ name_len: u32, ++ _reserved: u32, ++} ++ ++impl HotplugHeader { ++ fn to_bytes(&self) -> [u8; 16] { ++ let mut buf = [0u8; 16]; ++ buf[0..4].copy_from_slice(&self.kind.to_ne_bytes()); ++ buf[4..8].copy_from_slice(&self.device_id.to_ne_bytes()); ++ buf[8..12].copy_from_slice(&self.name_len.to_ne_bytes()); ++ buf[12..16].copy_from_slice(&self._reserved.to_ne_bytes()); ++ buf ++ } ++} ++ + enum Handle { + Producer, ++ NamedProducer { ++ name: String, ++ }, + Consumer { + events: EventFlags, + pending: Vec, +@@ -46,6 +96,17 @@ enum Handle { + notified: bool, + vt: usize, + }, ++ DeviceConsumer { ++ device_name: String, ++ events: EventFlags, ++ pending: Vec, ++ notified: bool, ++ }, ++ HotplugEvents { ++ events: EventFlags, ++ pending: Vec, ++ notified: bool, ++ }, + Display { + events: EventFlags, + pending: Vec, +@@ -72,6 +133,9 @@ struct InputScheme { + rshift: bool, + + has_new_events: bool, ++ ++ devices: BTreeMap, ++ next_device_id: AtomicUsize, + } + + impl InputScheme { +@@ -90,9 +154,28 @@ impl InputScheme { + lshift: false, + rshift: false, + has_new_events: false, ++ ++ devices: BTreeMap::new(), ++ next_device_id: AtomicUsize::new(1), + } + } + ++ fn emit_hotplug(&mut self, kind: u32, device_id: u32, name: &str) { ++ let record = serialize_hotplug(kind, device_id, name); ++ for handle in self.handles.values_mut() { ++ if let Handle::HotplugEvents { ++ pending, ++ notified, ++ .. ++ } = handle ++ { ++ pending.extend_from_slice(&record); ++ *notified = false; ++ } ++ } ++ self.has_new_events = true; ++ } ++ + fn switch_vt(&mut self, new_active: usize) { + if let Some(active_vt) = self.active_vt { + if new_active == active_vt { +@@ -146,6 +229,43 @@ impl InputScheme { + + self.active_keymap = KeymapData::new(new_active.into()); + } ++ ++ fn deliver_to_legacy_consumers(&mut self, buf: &[u8]) { ++ if let Some(active_vt) = self.active_vt { ++ for handle in self.handles.values_mut() { ++ if let Handle::Consumer { ++ pending, ++ notified, ++ vt, ++ .. ++ } = handle ++ { ++ if *vt != active_vt { ++ continue; ++ } ++ pending.extend_from_slice(buf); ++ *notified = false; ++ } ++ } ++ } ++ } ++ ++ fn deliver_to_device_consumers(&mut self, name: &str, buf: &[u8]) { ++ for handle in self.handles.values_mut() { ++ if let Handle::DeviceConsumer { ++ device_name, ++ pending, ++ notified, ++ .. ++ } = handle ++ { ++ if device_name == name { ++ pending.extend_from_slice(buf); ++ *notified = false; ++ } ++ } ++ } ++ } + } + + impl SchemeSync for InputScheme { +@@ -170,7 +290,23 @@ impl SchemeSync for InputScheme { + let command = path_parts.next().ok_or(SysError::new(EINVAL))?; + + let handle_ty = match command { +- "producer" => Handle::Producer, ++ "producer" => { ++ if let Some(name) = path_parts.next() { ++ validate_producer_name(name)?; ++ if self.devices.contains_key(name) { ++ return Err(SysError::new(EEXIST)); ++ } ++ let device_id = self.next_device_id.fetch_add(1, Ordering::SeqCst) as u32; ++ self.devices.insert(name.to_owned(), device_id); ++ let handle = Handle::NamedProducer { ++ name: name.to_owned(), ++ }; ++ self.emit_hotplug(DEVICE_ADD, device_id, name); ++ handle ++ } else { ++ Handle::Producer ++ } ++ } + "consumer" => { + let vt = self.next_vt_id.fetch_add(1, Ordering::Relaxed); + self.vts.insert(vt); +@@ -253,9 +389,23 @@ impl SchemeSync for InputScheme { + } + "control" => Handle::Control, + +- _ => { +- log::error!("invalid path '{path}'"); +- return Err(SysError::new(EINVAL)); ++ "events" => Handle::HotplugEvents { ++ events: EventFlags::empty(), ++ pending: Vec::new(), ++ notified: false, ++ }, ++ ++ // dynamic device consumer: must be a currently registered device ++ name => { ++ if !self.devices.contains_key(name) { ++ return Err(SysError::new(ENOENT)); ++ } ++ Handle::DeviceConsumer { ++ device_name: name.to_owned(), ++ events: EventFlags::empty(), ++ pending: Vec::new(), ++ notified: false, ++ } + } + }; + +@@ -274,7 +424,7 @@ impl SchemeSync for InputScheme { let handle = self.handles.get(id)?; if let Handle::Consumer { vt, .. } = handle { @@ -7378,18 +9416,205 @@ index 07aa943e..61641b9f 100644 Ok(()) } else { Err(SysError::new(EINVAL)) -@@ -438,7 +438,9 @@ impl SchemeSync for InputScheme { +@@ -282,6 +432,50 @@ impl SchemeSync for InputScheme { + }) + } + ++ fn getdents<'buf>( ++ &mut self, ++ id: usize, ++ mut buf: DirentBuf<&'buf mut [u8]>, ++ opaque_offset: u64, ++ ) -> syscall::Result> { ++ let handle = self.handles.get(id)?; ++ if !matches!(handle, Handle::SchemeRoot) { ++ return Err(SysError::new(ENOTDIR)); ++ } ++ ++ let static_entries: &[&str] = &[ ++ "producer", ++ "consumer", ++ "consumer_bootlog", ++ "events", ++ "handle", ++ "handle_early", ++ "control", ++ ]; ++ ++ let device_names: Vec<&str> = self.devices.keys().map(|s| s.as_str()).collect(); ++ ++ let all_entries: Vec<(&str, DirentKind)> = static_entries ++ .iter() ++ .map(|&name| (name, DirentKind::Directory)) ++ .chain(device_names.iter().map(|&name| (name, DirentKind::Unspecified))) ++ .collect(); ++ ++ for (idx, (name, kind)) in all_entries ++ .iter() ++ .enumerate() ++ .skip(opaque_offset as usize) ++ { ++ buf.entry(DirEntry { ++ inode: 0, ++ next_opaque_id: idx as u64 + 1, ++ name, ++ kind: *kind, ++ })?; ++ } ++ Ok(buf) ++ } ++ + fn read( + &mut self, + id: usize, +@@ -313,6 +507,22 @@ impl SchemeSync for InputScheme { + Ok(copy) + } + ++ Handle::DeviceConsumer { pending, .. } => { ++ let copy = core::cmp::min(pending.len(), buf.len()); ++ for (i, byte) in pending.drain(..copy).enumerate() { ++ buf[i] = byte; ++ } ++ Ok(copy) ++ } ++ ++ Handle::HotplugEvents { pending, .. } => { ++ let copy = core::cmp::min(pending.len(), buf.len()); ++ for (i, byte) in pending.drain(..copy).enumerate() { ++ buf[i] = byte; ++ } ++ Ok(copy) ++ } ++ + Handle::Display { pending, .. } => { + if buf.len() % size_of::() == 0 { + let copy = core::cmp::min(pending.len(), buf.len() / size_of::()); +@@ -334,6 +544,10 @@ impl SchemeSync for InputScheme { + log::error!("producer tried to read"); + return Err(SysError::new(EINVAL)); + } ++ Handle::NamedProducer { .. } => { ++ log::error!("named producer tried to read"); ++ return Err(SysError::new(EINVAL)); ++ } + Handle::Control => { + log::error!("control tried to read"); + return Err(SysError::new(EINVAL)); +@@ -379,11 +593,20 @@ impl SchemeSync for InputScheme { + log::error!("consumer tried to write"); + return Err(SysError::new(EINVAL)); + } ++ Handle::DeviceConsumer { .. } => { ++ log::error!("device consumer tried to write"); ++ return Err(SysError::new(EINVAL)); ++ } ++ Handle::HotplugEvents { .. } => { ++ log::error!("hotplug events tried to write"); ++ return Err(SysError::new(EINVAL)); ++ } + Handle::Display { .. } => { + log::error!("display tried to write"); + return Err(SysError::new(EINVAL)); + } + Handle::Producer => {} ++ Handle::NamedProducer { .. } => {} + Handle::SchemeRoot => return Err(SysError::new(EBADF)), } - let handle = self.handles.get_mut(id)?; -- assert!(matches!(handle, Handle::Producer)); -+ if !matches!(handle, Handle::Producer) { -+ return Err(SysError::new(EBADF)); -+ } +@@ -397,6 +620,11 @@ impl SchemeSync for InputScheme { + buf.len() / size_of::(), + ) + }); ++ let producer_name = match self.handles.get(id)? { ++ Handle::NamedProducer { ref name } => Some(name.clone()), ++ Handle::Producer => None, ++ _ => return Err(SysError::new(EBADF)), ++ }; - let buf = unsafe { + for i in 0..events.len() { + let mut new_active_opt = None; +@@ -437,38 +665,21 @@ impl SchemeSync for InputScheme { + } + } + +- let handle = self.handles.get_mut(id)?; +- assert!(matches!(handle, Handle::Producer)); +- +- let buf = unsafe { ++ let serialized = unsafe { core::slice::from_raw_parts( -@@ -505,8 +507,8 @@ impl SchemeSync for InputScheme { + (events.as_ptr()) as *const u8, + events.len() * size_of::(), + ) + }; + +- if let Some(active_vt) = self.active_vt { +- for handle in self.handles.values_mut() { +- match handle { +- Handle::Consumer { +- pending, +- notified, +- vt, +- .. +- } => { +- if *vt != active_vt { +- continue; +- } +- +- pending.extend_from_slice(buf); +- *notified = false; +- } +- _ => continue, +- } +- } ++ if let Some(ref name) = producer_name { ++ self.deliver_to_device_consumers(name, serialized); + } + +- Ok(buf.len()) ++ // named producers also feed the legacy path; legacy producers only feed legacy ++ self.deliver_to_legacy_consumers(serialized); ++ ++ Ok(serialized.len()) + } + + fn fevent( +@@ -487,6 +698,24 @@ impl SchemeSync for InputScheme { + *notified = false; + Ok(EventFlags::empty()) + } ++ Handle::DeviceConsumer { ++ ref mut events, ++ ref mut notified, ++ .. ++ } => { ++ *events = flags; ++ *notified = false; ++ Ok(EventFlags::empty()) ++ } ++ Handle::HotplugEvents { ++ ref mut events, ++ ref mut notified, ++ .. ++ } => { ++ *events = flags; ++ *notified = false; ++ Ok(EventFlags::empty()) ++ } + Handle::Display { + ref mut events, + ref mut notified, +@@ -496,7 +725,7 @@ impl SchemeSync for InputScheme { + *notified = false; + Ok(EventFlags::empty()) + } +- Handle::Producer | Handle::Control => { ++ Handle::Producer | Handle::NamedProducer { .. } | Handle::Control => { + log::error!("producer or control tried to use an event queue"); + Err(SysError::new(EINVAL)) + } +@@ -505,8 +734,8 @@ impl SchemeSync for InputScheme { } fn on_close(&mut self, id: usize) { @@ -7400,11 +9625,16 @@ index 07aa943e..61641b9f 100644 self.vts.remove(&vt); if self.active_vt == Some(vt) { if let Some(&new_vt) = self.vts.last() { -@@ -516,7 +518,10 @@ impl SchemeSync for InputScheme { +@@ -516,7 +745,15 @@ impl SchemeSync for InputScheme { } } } - _ => {} ++ Some(Handle::NamedProducer { name, .. }) => { ++ if let Some(device_id) = self.devices.remove(&name) { ++ self.emit_hotplug(DEVICE_REMOVE, device_id, &name); ++ } ++ } + Some(_) => {} + None => { + log::warn!("inputd: on_close called with unknown handle id {id}"); @@ -7412,7 +9642,47 @@ index 07aa943e..61641b9f 100644 } } } -@@ -589,8 +594,11 @@ fn deamon(daemon: daemon::SchemeDaemon) -> anyhow::Result<()> { +@@ -564,6 +801,39 @@ fn deamon(daemon: daemon::SchemeDaemon) -> anyhow::Result<()> { + + *notified = true; + } ++ Handle::DeviceConsumer { ++ events, ++ pending, ++ ref mut notified, ++ .. ++ } => { ++ if pending.is_empty() || *notified || !events.contains(EventFlags::EVENT_READ) { ++ continue; ++ } ++ ++ socket_file.write_response( ++ Response::post_fevent(*id, EventFlags::EVENT_READ.bits()), ++ SignalBehavior::Restart, ++ )?; ++ ++ *notified = true; ++ } ++ Handle::HotplugEvents { ++ events, ++ pending, ++ ref mut notified, ++ } => { ++ if pending.is_empty() || *notified || !events.contains(EventFlags::EVENT_READ) { ++ continue; ++ } ++ ++ socket_file.write_response( ++ Response::post_fevent(*id, EventFlags::EVENT_READ.bits()), ++ SignalBehavior::Restart, ++ )?; ++ ++ *notified = true; ++ } + Handle::Display { + events, + pending, +@@ -589,8 +859,11 @@ fn deamon(daemon: daemon::SchemeDaemon) -> anyhow::Result<()> { } fn daemon_runner(daemon: daemon::SchemeDaemon) -> ! { @@ -7426,7 +9696,7 @@ index 07aa943e..61641b9f 100644 } const HELP: &str = r#" -@@ -608,13 +616,26 @@ fn main() { +@@ -608,13 +881,26 @@ fn main() { match val.as_ref() { // Activates a VT. "-A" => { @@ -7459,7 +9729,7 @@ index 07aa943e..61641b9f 100644 } // Activates a keymap. "-K" => { -@@ -630,11 +651,17 @@ fn main() { +@@ -630,11 +916,17 @@ fn main() { std::process::exit(1); }); @@ -7482,7 +9752,7 @@ index 07aa943e..61641b9f 100644 } // List available keymaps "--keymaps" => { -@@ -647,7 +674,10 @@ fn main() { +@@ -647,7 +939,10 @@ fn main() { println!("{}", HELP); } @@ -7494,76 +9764,95 @@ index 07aa943e..61641b9f 100644 } } else { common::setup_logging( +diff --git a/drivers/net/e1000d/src/device.rs b/drivers/net/e1000d/src/device.rs +index 4c518f30..0e42d72b 100644 +--- a/drivers/net/e1000d/src/device.rs ++++ b/drivers/net/e1000d/src/device.rs +@@ -3,7 +3,7 @@ use std::{cmp, mem, ptr, slice, thread, time}; + + use driver_network::NetworkAdapter; + +-use syscall::error::Result; ++use syscall::error::{Error, Result, EIO}; + + use common::dma::Dma; + +@@ -207,11 +207,10 @@ impl NetworkAdapter for Intel8254x { + } + + fn dma_array() -> Result<[Dma; N]> { +- Ok((0..N) ++ let vec: Vec> = (0..N) + .map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() })) +- .collect::>>()? +- .try_into() +- .unwrap_or_else(|_| unreachable!())) ++ .collect::>>()?; ++ vec.try_into().map_err(|_| Error::new(EIO)) + } + impl Intel8254x { + pub unsafe fn new(base: usize) -> Result { diff --git a/drivers/net/e1000d/src/main.rs b/drivers/net/e1000d/src/main.rs -index 373ea9b3..d971c0a1 100644 +index 373ea9b3..8ff57b33 100644 --- a/drivers/net/e1000d/src/main.rs +++ b/drivers/net/e1000d/src/main.rs -@@ -28,17 +28,38 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { - let irq = pci_config - .func - .legacy_interrupt_line +@@ -1,5 +1,6 @@ + use std::io::{Read, Write}; + use std::os::unix::io::AsRawFd; ++use std::process; + + use driver_network::NetworkScheme; + use event::{user_data, EventQueue}; +@@ -25,10 +26,13 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + common::file_level(), + ); + +- let irq = pci_config +- .func +- .legacy_interrupt_line - .expect("e1000d: no legacy interrupts supported"); -+ .unwrap_or_else(|| { ++ let irq = match pci_config.func.legacy_interrupt_line { ++ Some(irq) => irq, ++ None => { + log::error!("e1000d: no legacy interrupts supported"); -+ std::process::exit(1); -+ }); ++ process::exit(1); ++ } ++ }; log::info!("E1000 {}", pci_config.func.display()); -- let mut irq_file = irq.irq_handle("e1000d"); -+ let mut irq_file = match irq.try_irq_handle("e1000d") { -+ Ok(file) => file, -+ Err(err) => { -+ log::error!("e1000d: failed to open IRQ handle: {err}"); -+ std::process::exit(1); -+ } -+ }; - -- let address = unsafe { pcid_handle.map_bar(0) }.ptr.as_ptr() as usize; -+ let address = match unsafe { pcid_handle.try_map_bar(0) } { -+ Ok(bar) => bar.ptr.as_ptr() as usize, -+ Err(err) => { -+ log::error!("e1000d: failed to map BAR0: {err}"); -+ std::process::exit(1); -+ } -+ }; +@@ -38,7 +42,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { let mut scheme = NetworkScheme::new( move || unsafe { - device::Intel8254x::new(address).expect("e1000d: failed to allocate device") -+ match device::Intel8254x::new(address) { -+ Ok(device) => device, -+ Err(err) => { -+ log::error!("e1000d: failed to allocate device: {err}"); -+ std::process::exit(1); -+ } -+ } ++ device::Intel8254x::new(address).unwrap_or_else(|err| { ++ log::error!("e1000d: failed to allocate device: {err}"); ++ process::exit(1); ++ }) }, daemon, format!("network.{name}"), -@@ -51,7 +72,13 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { +@@ -51,7 +58,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { } } - let event_queue = EventQueue::::new().expect("e1000d: failed to create event queue"); -+ let event_queue = match EventQueue::::new() { -+ Ok(queue) => queue, -+ Err(err) => { -+ log::error!("e1000d: failed to create event queue: {err}"); -+ std::process::exit(1); -+ } -+ }; ++ let mut event_queue = EventQueue::::new().unwrap_or_else(|err| { ++ log::error!("e1000d: failed to create event queue: {err}"); ++ process::exit(1); ++ }); event_queue .subscribe( -@@ -59,32 +86,65 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { +@@ -59,32 +69,65 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { Source::Irq, event::EventFlags::READ, ) - .expect("e1000d: failed to subscribe to IRQ fd"); + .unwrap_or_else(|err| { + log::error!("e1000d: failed to subscribe to IRQ fd: {err}"); -+ std::process::exit(1); ++ process::exit(1); + }); event_queue .subscribe( @@ -7574,132 +9863,298 @@ index 373ea9b3..d971c0a1 100644 - .expect("e1000d: failed to subscribe to scheme fd"); - - libredox::call::setrens(0, 0).expect("e1000d: failed to enter null namespace"); +- +- scheme.tick().unwrap(); + .unwrap_or_else(|err| { + log::error!("e1000d: failed to subscribe to scheme fd: {err}"); -+ std::process::exit(1); ++ process::exit(1); + }); + -+ if let Err(err) = libredox::call::setrens(0, 0) { ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { + log::error!("e1000d: failed to enter null namespace: {err}"); -+ std::process::exit(1); -+ } - -- scheme.tick().unwrap(); ++ process::exit(1); ++ }); ++ + if let Err(err) = scheme.tick() { + log::error!("e1000d: failed initial scheme tick: {err}"); -+ std::process::exit(1); ++ process::exit(1); + } - for event in event_queue.map(|e| e.expect("e1000d: failed to get event")) { -+ for event in event_queue { -+ let event = match event { -+ Ok(event) => event, -+ Err(err) => { ++ loop { ++ let event = match event_queue.next() { ++ Some(Ok(event)) => event, ++ Some(Err(err)) => { + log::error!("e1000d: failed to get event: {err}"); -+ break; ++ continue; + } ++ None => break, + }; match event.user_data { Source::Irq => { let mut irq = [0; 8]; - irq_file.read(&mut irq).unwrap(); + if let Err(err) = irq_file.read(&mut irq) { -+ log::error!("e1000d: failed to read IRQ file: {err}"); -+ break; ++ log::error!("e1000d: failed to read IRQ: {err}"); ++ continue; + } if unsafe { scheme.adapter().irq() } { - irq_file.write(&mut irq).unwrap(); - - scheme.tick().expect("e1000d: failed to handle IRQ") + if let Err(err) = irq_file.write(&mut irq) { -+ log::error!("e1000d: failed to acknowledge IRQ: {err}"); -+ break; ++ log::error!("e1000d: failed to write IRQ: {err}"); ++ continue; + } + + if let Err(err) = scheme.tick() { + log::error!("e1000d: failed to handle IRQ: {err}"); -+ break; + } + } + } + Source::Scheme => { + if let Err(err) = scheme.tick() { + log::error!("e1000d: failed to handle scheme op: {err}"); -+ break; } } - Source::Scheme => scheme.tick().expect("e1000d: failed to handle scheme op"), } } - unreachable!() -+ std::process::exit(1) ++ ++ process::exit(0); } +diff --git a/drivers/net/ixgbed/Cargo.toml b/drivers/net/ixgbed/Cargo.toml +index d97ff398..fcaf4b19 100644 +--- a/drivers/net/ixgbed/Cargo.toml ++++ b/drivers/net/ixgbed/Cargo.toml +@@ -7,6 +7,7 @@ edition = "2021" + [dependencies] + bitflags.workspace = true + libredox.workspace = true ++log.workspace = true + redox_event.workspace = true + redox_syscall.workspace = true + +diff --git a/drivers/net/ixgbed/src/device.rs b/drivers/net/ixgbed/src/device.rs +index 0d59b46d..fc7c009f 100644 +--- a/drivers/net/ixgbed/src/device.rs ++++ b/drivers/net/ixgbed/src/device.rs +@@ -3,7 +3,7 @@ use std::time::{Duration, Instant}; + use std::{cmp, mem, ptr, slice, thread}; + + use driver_network::NetworkAdapter; +-use syscall::error::Result; ++use syscall::error::{Error, Result, EIO}; + + use common::dma::Dma; + +@@ -45,7 +45,12 @@ impl NetworkAdapter for Intel8259x { + + if (status & IXGBE_RXDADV_STAT_DD) != 0 { + if (status & IXGBE_RXDADV_STAT_EOP) == 0 { +- panic!("increase buffer size or decrease MTU") ++ log::error!("ixgbed: received fragmented packet, skipping descriptor"); ++ desc.read.pkt_addr = self.receive_buffer[self.receive_index].physical() as u64; ++ desc.read.hdr_addr = 0; ++ self.write_reg(IXGBE_RDT(0), self.receive_index as u32); ++ self.receive_index = wrap_ring(self.receive_index, self.receive_ring.len()); ++ return Ok(None); + } + + let data = unsafe { +@@ -132,13 +137,25 @@ impl Intel8259x { + .map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() })) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()), ++ .map_err(|v: Vec<_>| { ++ log::error!( ++ "ixgbed: internal error: DMA buffer array conversion failed (got {} items, expected 32)", ++ v.len() ++ ); ++ Error::new(EIO) ++ })?, + receive_ring: unsafe { Dma::zeroed()?.assume_init() }, + transmit_buffer: (0..32) + .map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() })) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()), ++ .map_err(|v: Vec<_>| { ++ log::error!( ++ "ixgbed: internal error: DMA buffer array conversion failed (got {} items, expected 32)", ++ v.len() ++ ); ++ Error::new(EIO) ++ })?, + receive_index: 0, + transmit_ring: unsafe { Dma::zeroed()?.assume_init() }, + transmit_ring_free: 32, +@@ -166,7 +183,7 @@ impl Intel8259x { + + if (status & IXGBE_RXDADV_STAT_DD) != 0 { + if (status & IXGBE_RXDADV_STAT_EOP) == 0 { +- panic!("increase buffer size or decrease MTU") ++ log::error!("ixgbed: received fragmented packet, buffer too small"); + } + + return unsafe { desc.wb.upper.length as usize }; +@@ -205,13 +222,8 @@ impl Intel8259x { + self.mac_address = mac; + } + +- /// Returns the register at `self.base` + `register`. +- /// +- /// # Panics +- /// +- /// Panics if `self.base` + `register` does not belong to the mapped memory of the PCIe device. + fn read_reg(&self, register: u32) -> u32 { +- assert!( ++ debug_assert!( + register as usize <= self.size - 4 as usize, + "MMIO access out of bounds" + ); +@@ -219,13 +231,8 @@ impl Intel8259x { + unsafe { ptr::read_volatile((self.base + register as usize) as *mut u32) } + } + +- /// Sets the register at `self.base` + `register`. +- /// +- /// # Panics +- /// +- /// Panics if `self.base` + `register` does not belong to the mapped memory of the PCIe device. + fn write_reg(&self, register: u32, data: u32) -> u32 { +- assert!( ++ debug_assert!( + register as usize <= self.size - 4 as usize, + "MMIO access out of bounds" + ); +@@ -279,7 +286,7 @@ impl Intel8259x { + + let mac = self.get_mac_addr(); + +- println!( ++ log::info!( + " - MAC: {:>02X}:{:>02X}:{:>02X}:{:>02X}:{:>02X}:{:>02X}", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5] + ); +@@ -438,13 +445,11 @@ impl Intel8259x { + } + + /// Sets the rx queues` descriptors and enables the queues. +- /// +- /// # Panics +- /// Panics if length of `self.receive_ring` is not a power of 2. + fn start_rx_queue(&mut self, queue_id: u16) { +- if self.receive_ring.len() & (self.receive_ring.len() - 1) != 0 { +- panic!("number of receive queue entries must be a power of 2"); +- } ++ debug_assert!( ++ self.receive_ring.len() & (self.receive_ring.len() - 1) == 0, ++ "number of receive queue entries must be a power of 2" ++ ); + + for i in 0..self.receive_ring.len() { + self.receive_ring[i].read.pkt_addr = self.receive_buffer[i].physical() as u64; +@@ -466,13 +471,11 @@ impl Intel8259x { + } + + /// Enables the tx queues. +- /// +- /// # Panics +- /// Panics if length of `self.transmit_ring` is not a power of 2. + fn start_tx_queue(&mut self, queue_id: u16) { +- if self.transmit_ring.len() & (self.transmit_ring.len() - 1) != 0 { +- panic!("number of receive queue entries must be a power of 2"); +- } ++ debug_assert!( ++ self.transmit_ring.len() & (self.transmit_ring.len() - 1) == 0, ++ "number of transmit queue entries must be a power of 2" ++ ); + + for i in 0..self.transmit_ring.len() { + self.transmit_ring[i].read.buffer_addr = self.transmit_buffer[i].physical() as u64; +@@ -506,14 +509,14 @@ impl Intel8259x { + + /// Waits for the link to come up. + fn wait_for_link(&self) { +- println!(" - waiting for link"); ++ log::info!(" - waiting for link"); + let time = Instant::now(); + let mut speed = self.get_link_speed(); + while speed == 0 && time.elapsed().as_secs() < 10 { + thread::sleep(Duration::from_millis(100)); + speed = self.get_link_speed(); + } +- println!(" - link speed is {} Mbit/s", self.get_link_speed()); ++ log::info!(" - link speed is {} Mbit/s", self.get_link_speed()); + } + + /// Enables or disables promisc mode of this device. diff --git a/drivers/net/ixgbed/src/main.rs b/drivers/net/ixgbed/src/main.rs -index 4a6ce74d..a9b6dd82 100644 +index 4a6ce74d..855d339d 100644 --- a/drivers/net/ixgbed/src/main.rs +++ b/drivers/net/ixgbed/src/main.rs -@@ -22,20 +22,44 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { - let irq = pci_config - .func - .legacy_interrupt_line +@@ -1,5 +1,6 @@ + use std::io::{Read, Write}; + use std::os::unix::io::AsRawFd; ++use std::process; + + use driver_network::NetworkScheme; + use event::{user_data, EventQueue}; +@@ -19,12 +20,23 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + let mut name = pci_config.func.name(); + name.push_str("_ixgbe"); + +- let irq = pci_config +- .func +- .legacy_interrupt_line - .expect("ixgbed: no legacy interrupts supported"); -+ .unwrap_or_else(|| { -+ eprintln!("ixgbed: no legacy interrupts supported"); -+ std::process::exit(1); -+ }); - - println!(" + IXGBE {}", pci_config.func.display()); - -- let mut irq_file = irq.irq_handle("ixgbed"); -+ let mut irq_file = match irq.try_irq_handle("ixgbed") { -+ Ok(file) => file, -+ Err(err) => { -+ eprintln!("ixgbed: failed to open IRQ handle: {err}"); -+ std::process::exit(1); ++ common::setup_logging( ++ "net", ++ "pci", ++ &name, ++ common::output_level(), ++ common::file_level(), ++ ); ++ ++ let irq = match pci_config.func.legacy_interrupt_line { ++ Some(irq) => irq, ++ None => { ++ log::error!("ixgbed: no legacy interrupts supported"); ++ process::exit(1); + } + }; -- let mapped_bar = unsafe { pcid_handle.map_bar(0) }; -+ if let Err(err) = pci_config.func.bars[0].try_mem() { -+ eprintln!("ixgbed: invalid BAR0: {err}"); -+ std::process::exit(1); -+ } -+ let mapped_bar = match unsafe { pcid_handle.try_map_bar(0) } { -+ Ok(bar) => bar, -+ Err(err) => { -+ eprintln!("ixgbed: failed to map BAR0: {err}"); -+ std::process::exit(1); -+ } -+ }; - let address = mapped_bar.ptr.as_ptr(); - let size = mapped_bar.bar_size; +- println!(" + IXGBE {}", pci_config.func.display()); ++ log::info!("IXGBE {}", pci_config.func.display()); + + let mut irq_file = irq.irq_handle("ixgbed"); + +@@ -34,8 +46,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { let mut scheme = NetworkScheme::new( move || { - device::Intel8259x::new(address as usize, size) - .expect("ixgbed: failed to allocate device") -+ match device::Intel8259x::new(address as usize, size) { -+ Ok(device) => device, -+ Err(err) => { -+ eprintln!("ixgbed: failed to allocate device: {err}"); -+ std::process::exit(1); -+ } -+ } ++ device::Intel8259x::new(address as usize, size).unwrap_or_else(|err| { ++ log::error!("ixgbed: failed to allocate device: {err}"); ++ process::exit(1); ++ }) }, daemon, format!("network.{name}"), -@@ -48,41 +72,78 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { +@@ -48,41 +62,77 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { } } - let event_queue = EventQueue::::new().expect("ixgbed: Could not create event queue."); -+ let event_queue = match EventQueue::::new() { -+ Ok(queue) => queue, -+ Err(err) => { -+ eprintln!("ixgbed: could not create event queue: {err}"); -+ std::process::exit(1); -+ } -+ }; ++ let mut event_queue = EventQueue::::new().unwrap_or_else(|err| { ++ log::error!("ixgbed: failed to create event queue: {err}"); ++ process::exit(1); ++ }); ++ event_queue .subscribe( irq_file.as_raw_fd() as usize, @@ -7708,8 +10163,8 @@ index 4a6ce74d..a9b6dd82 100644 ) - .unwrap(); + .unwrap_or_else(|err| { -+ eprintln!("ixgbed: failed to subscribe IRQ fd: {err}"); -+ std::process::exit(1); ++ log::error!("ixgbed: failed to subscribe to IRQ fd: {err}"); ++ process::exit(1); + }); event_queue .subscribe( @@ -7721,258 +10176,453 @@ index 4a6ce74d..a9b6dd82 100644 - - libredox::call::setrens(0, 0).expect("ixgbed: failed to enter null namespace"); + .unwrap_or_else(|err| { -+ eprintln!("ixgbed: failed to subscribe scheme fd: {err}"); -+ std::process::exit(1); ++ log::error!("ixgbed: failed to subscribe to scheme fd: {err}"); ++ process::exit(1); + }); + -+ if let Err(err) = libredox::call::setrens(0, 0) { -+ eprintln!("ixgbed: failed to enter null namespace: {err}"); -+ std::process::exit(1); ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ log::error!("ixgbed: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("ixgbed: failed initial scheme tick: {err}"); ++ process::exit(1); + } - scheme.tick().unwrap(); -+ if let Err(err) = scheme.tick() { -+ eprintln!("ixgbed: failed initial scheme tick: {err}"); -+ std::process::exit(1); -+ } ++ loop { ++ let event = match event_queue.next() { ++ Some(Ok(event)) => event, ++ Some(Err(err)) => { ++ log::error!("ixgbed: failed to get event: {err}"); ++ continue; ++ } ++ None => break, ++ }; - for event in event_queue.map(|e| e.expect("ixgbed: failed to get next event")) { -+ for event in event_queue { -+ let event = match event { -+ Ok(event) => event, -+ Err(err) => { -+ eprintln!("ixgbed: failed to get next event: {err}"); -+ break; -+ } -+ }; match event.user_data { Source::Irq => { let mut irq = [0; 8]; - irq_file.read(&mut irq).unwrap(); + if let Err(err) = irq_file.read(&mut irq) { -+ eprintln!("ixgbed: failed to read IRQ file: {err}"); -+ break; ++ log::error!("ixgbed: failed to read IRQ: {err}"); ++ continue; + } if scheme.adapter().irq() { - irq_file.write(&mut irq).unwrap(); - - scheme.tick().unwrap(); + if let Err(err) = irq_file.write(&mut irq) { -+ eprintln!("ixgbed: failed to acknowledge IRQ: {err}"); -+ break; ++ log::error!("ixgbed: failed to write IRQ: {err}"); ++ continue; + } + + if let Err(err) = scheme.tick() { -+ eprintln!("ixgbed: failed to handle IRQ: {err}"); -+ break; ++ log::error!("ixgbed: failed to handle IRQ: {err}"); + } } } Source::Scheme => { - scheme.tick().unwrap(); + if let Err(err) = scheme.tick() { -+ eprintln!("ixgbed: failed to handle scheme op: {err}"); -+ break; ++ log::error!("ixgbed: failed to handle scheme op: {err}"); + } } } } - unreachable!() -+ std::process::exit(0) ++ ++ process::exit(0); } +diff --git a/drivers/net/rtl8139d/src/device.rs b/drivers/net/rtl8139d/src/device.rs +index 37167ee2..d7428132 100644 +--- a/drivers/net/rtl8139d/src/device.rs ++++ b/drivers/net/rtl8139d/src/device.rs +@@ -215,7 +215,7 @@ impl Rtl8139 { + .map(|_| Ok(Dma::zeroed()?.assume_init())) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()), ++ .map_err(|_| Error::new(EIO))?, + transmit_i: 0, + mac_address: [0; 6], + }; diff --git a/drivers/net/rtl8139d/src/main.rs b/drivers/net/rtl8139d/src/main.rs -index d470e814..aa377446 100644 +index d470e814..64335a23 100644 --- a/drivers/net/rtl8139d/src/main.rs +++ b/drivers/net/rtl8139d/src/main.rs -@@ -3,7 +3,7 @@ use std::os::unix::io::AsRawFd; +@@ -1,5 +1,6 @@ + use std::io::{Read, Write}; + use std::os::unix::io::AsRawFd; ++use std::process; use driver_network::NetworkScheme; use event::{user_data, EventQueue}; --use pcid_interface::irq_helpers::pci_allocate_interrupt_vector; -+use pcid_interface::irq_helpers::try_pci_allocate_interrupt_vector; - use pcid_interface::PciFunctionHandle; - - pub mod device; -@@ -20,19 +20,19 @@ where - } - } - --fn map_bar(pcid_handle: &mut PciFunctionHandle) -> *mut u8 { -+fn map_bar(pcid_handle: &mut PciFunctionHandle) -> Result<*mut u8, &'static str> { - let config = pcid_handle.config(); - - // RTL8139 uses BAR2, RTL8169 uses BAR1, search in that order - for &barnum in &[2, 1] { - match config.func.bars[usize::from(barnum)] { - pcid_interface::PciBar::Memory32 { .. } | pcid_interface::PciBar::Memory64 { .. } => unsafe { -- return pcid_handle.map_bar(barnum).ptr.as_ptr(); -+ return Ok(pcid_handle.map_bar(barnum).ptr.as_ptr()); - }, +@@ -32,7 +33,8 @@ fn map_bar(pcid_handle: &mut PciFunctionHandle) -> *mut u8 { other => log::warn!("BAR {} is {:?} instead of memory BAR", barnum, other), } } - panic!("rtl8139d: failed to find BAR"); -+ Err("failed to find a usable MMIO BAR") ++ log::error!("rtl8139d: failed to find BAR"); ++ process::exit(1); } fn main() { -@@ -55,13 +55,31 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { - - log::info!(" + RTL8139 {}", pci_config.func.display()); - -- let bar = map_bar(&mut pcid_handle); -+ let bar = match map_bar(&mut pcid_handle) { -+ Ok(bar) => bar, -+ Err(err) => { -+ log::error!("rtl8139d: {err}"); -+ std::process::exit(1); -+ } -+ }; - -- let irq_file = pci_allocate_interrupt_vector(&mut pcid_handle, "rtl8139d"); -+ let irq_file = match try_pci_allocate_interrupt_vector(&mut pcid_handle, "rtl8139d") { -+ Ok(irq) => irq, -+ Err(err) => { -+ log::error!("rtl8139d: failed to allocate interrupt vector: {err}"); -+ std::process::exit(1); -+ } -+ }; +@@ -61,7 +63,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { let mut scheme = NetworkScheme::new( move || unsafe { - device::Rtl8139::new(bar as usize).expect("rtl8139d: failed to allocate device") -+ match device::Rtl8139::new(bar as usize) { -+ Ok(device) => device, -+ Err(err) => { -+ log::error!("rtl8139d: failed to allocate device: {err}"); -+ std::process::exit(1); -+ } -+ } ++ device::Rtl8139::new(bar as usize).unwrap_or_else(|err| { ++ log::error!("rtl8139d: failed to allocate device: {err}"); ++ process::exit(1); ++ }) }, daemon, format!("network.{name}"), +@@ -74,42 +79,76 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + } + } + +- let event_queue = EventQueue::::new().expect("rtl8139d: Could not create event queue."); ++ let mut event_queue = EventQueue::::new().unwrap_or_else(|err| { ++ log::error!("rtl8139d: failed to create event queue: {err}"); ++ process::exit(1); ++ }); + event_queue + .subscribe( + irq_file.irq_handle().as_raw_fd() as usize, + Source::Irq, + event::EventFlags::READ, + ) +- .unwrap(); ++ .unwrap_or_else(|err| { ++ log::error!("rtl8139d: failed to subscribe to IRQ fd: {err}"); ++ process::exit(1); ++ }); + event_queue + .subscribe( + scheme.event_handle().raw(), + Source::Scheme, + event::EventFlags::READ, + ) +- .unwrap(); +- +- libredox::call::setrens(0, 0).expect("rtl8139d: failed to enter null namespace"); +- +- scheme.tick().unwrap(); ++ .unwrap_or_else(|err| { ++ log::error!("rtl8139d: failed to subscribe to scheme fd: {err}"); ++ process::exit(1); ++ }); ++ ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ log::error!("rtl8139d: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8139d: failed initial scheme tick: {err}"); ++ process::exit(1); ++ } + +- for event in event_queue.map(|e| e.expect("rtl8139d: failed to get next event")) { ++ loop { ++ let event = match event_queue.next() { ++ Some(Ok(event)) => event, ++ Some(Err(err)) => { ++ log::error!("rtl8139d: failed to get next event: {err}"); ++ continue; ++ } ++ None => break, ++ }; + match event.user_data { + Source::Irq => { + let mut irq = [0; 8]; +- irq_file.irq_handle().read(&mut irq).unwrap(); ++ if let Err(err) = irq_file.irq_handle().read(&mut irq) { ++ log::error!("rtl8139d: failed to read IRQ: {err}"); ++ continue; ++ } + //TODO: This may be causing spurious interrupts + if unsafe { scheme.adapter_mut().irq() } { +- irq_file.irq_handle().write(&mut irq).unwrap(); +- +- scheme.tick().unwrap(); ++ if let Err(err) = irq_file.irq_handle().write(&mut irq) { ++ log::error!("rtl8139d: failed to write IRQ: {err}"); ++ continue; ++ } ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8139d: failed to handle IRQ tick: {err}"); ++ } + } + } + Source::Scheme => { +- scheme.tick().unwrap(); ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8139d: failed to handle scheme op: {err}"); ++ } + } + } + } +- unreachable!() ++ ++ process::exit(0); + } +diff --git a/drivers/net/rtl8168d/src/device.rs b/drivers/net/rtl8168d/src/device.rs +index ae545ec4..7229a52d 100644 +--- a/drivers/net/rtl8168d/src/device.rs ++++ b/drivers/net/rtl8168d/src/device.rs +@@ -177,7 +177,7 @@ impl Rtl8168 { + .map(|_| Ok(Dma::zeroed()?.assume_init())) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()), ++ .map_err(|_| Error::new(EIO))?, + + receive_ring: Dma::zeroed()?.assume_init(), + receive_i: 0, +@@ -185,7 +185,7 @@ impl Rtl8168 { + .map(|_| Ok(Dma::zeroed()?.assume_init())) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()), ++ .map_err(|_| Error::new(EIO))?, + transmit_ring: Dma::zeroed()?.assume_init(), + transmit_i: 0, + transmit_buffer_h: [Dma::zeroed()?.assume_init()], diff --git a/drivers/net/rtl8168d/src/main.rs b/drivers/net/rtl8168d/src/main.rs -index 1d9963a3..c6e21bd9 100644 +index 1d9963a3..bd2fcb1a 100644 --- a/drivers/net/rtl8168d/src/main.rs +++ b/drivers/net/rtl8168d/src/main.rs -@@ -3,7 +3,7 @@ use std::os::unix::io::AsRawFd; +@@ -1,5 +1,6 @@ + use std::io::{Read, Write}; + use std::os::unix::io::AsRawFd; ++use std::process; use driver_network::NetworkScheme; use event::{user_data, EventQueue}; --use pcid_interface::irq_helpers::pci_allocate_interrupt_vector; -+use pcid_interface::irq_helpers::try_pci_allocate_interrupt_vector; - use pcid_interface::PciFunctionHandle; - - pub mod device; -@@ -20,19 +20,19 @@ where - } - } - --fn map_bar(pcid_handle: &mut PciFunctionHandle) -> *mut u8 { -+fn map_bar(pcid_handle: &mut PciFunctionHandle) -> Result<*mut u8, &'static str> { - let config = pcid_handle.config(); - - // RTL8168 uses BAR2, RTL8169 uses BAR1, search in that order - for &barnum in &[2, 1] { - match config.func.bars[usize::from(barnum)] { - pcid_interface::PciBar::Memory32 { .. } | pcid_interface::PciBar::Memory64 { .. } => unsafe { -- return pcid_handle.map_bar(barnum).ptr.as_ptr(); -+ return Ok(pcid_handle.map_bar(barnum).ptr.as_ptr()); - }, +@@ -32,7 +33,8 @@ fn map_bar(pcid_handle: &mut PciFunctionHandle) -> *mut u8 { other => log::warn!("BAR {} is {:?} instead of memory BAR", barnum, other), } } - panic!("rtl8168d: failed to find BAR"); -+ Err("failed to find a usable MMIO BAR") ++ log::error!("rtl8168d: failed to find BAR"); ++ process::exit(1); } fn main() { -@@ -55,13 +55,31 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { - - log::info!("RTL8168 {}", pci_config.func.display()); - -- let bar = map_bar(&mut pcid_handle); -+ let bar = match map_bar(&mut pcid_handle) { -+ Ok(bar) => bar, -+ Err(err) => { -+ log::error!("rtl8168d: {err}"); -+ std::process::exit(1); -+ } -+ }; - -- let irq_file = pci_allocate_interrupt_vector(&mut pcid_handle, "rtl8168d"); -+ let irq_file = match try_pci_allocate_interrupt_vector(&mut pcid_handle, "rtl8168d") { -+ Ok(irq) => irq, -+ Err(err) => { -+ log::error!("rtl8168d: failed to allocate interrupt vector: {err}"); -+ std::process::exit(1); -+ } -+ }; +@@ -61,7 +63,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { let mut scheme = NetworkScheme::new( move || unsafe { - device::Rtl8168::new(bar as usize).expect("rtl8168d: failed to allocate device") -+ match device::Rtl8168::new(bar as usize) { -+ Ok(device) => device, -+ Err(err) => { -+ log::error!("rtl8168d: failed to allocate device: {err}"); -+ std::process::exit(1); -+ } -+ } ++ device::Rtl8168::new(bar as usize).unwrap_or_else(|err| { ++ log::error!("rtl8168d: failed to allocate device: {err}"); ++ process::exit(1); ++ }) }, daemon, format!("network.{name}"), +@@ -74,42 +79,76 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + } + } + +- let event_queue = EventQueue::::new().expect("rtl8168d: Could not create event queue."); ++ let mut event_queue = EventQueue::::new().unwrap_or_else(|err| { ++ log::error!("rtl8168d: failed to create event queue: {err}"); ++ process::exit(1); ++ }); + event_queue + .subscribe( + irq_file.irq_handle().as_raw_fd() as usize, + Source::Irq, + event::EventFlags::READ, + ) +- .unwrap(); ++ .unwrap_or_else(|err| { ++ log::error!("rtl8168d: failed to subscribe to IRQ fd: {err}"); ++ process::exit(1); ++ }); + event_queue + .subscribe( + scheme.event_handle().raw(), + Source::Scheme, + event::EventFlags::READ, + ) +- .unwrap(); +- +- libredox::call::setrens(0, 0).expect("rtl8168d: failed to enter null namespace"); +- +- scheme.tick().unwrap(); ++ .unwrap_or_else(|err| { ++ log::error!("rtl8168d: failed to subscribe to scheme fd: {err}"); ++ process::exit(1); ++ }); ++ ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ log::error!("rtl8168d: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8168d: failed initial scheme tick: {err}"); ++ process::exit(1); ++ } + +- for event in event_queue.map(|e| e.expect("rtl8168d: failed to get next event")) { ++ loop { ++ let event = match event_queue.next() { ++ Some(Ok(event)) => event, ++ Some(Err(err)) => { ++ log::error!("rtl8168d: failed to get next event: {err}"); ++ continue; ++ } ++ None => break, ++ }; + match event.user_data { + Source::Irq => { + let mut irq = [0; 8]; +- irq_file.irq_handle().read(&mut irq).unwrap(); ++ if let Err(err) = irq_file.irq_handle().read(&mut irq) { ++ log::error!("rtl8168d: failed to read IRQ: {err}"); ++ continue; ++ } + //TODO: This may be causing spurious interrupts + if unsafe { scheme.adapter_mut().irq() } { +- irq_file.irq_handle().write(&mut irq).unwrap(); +- +- scheme.tick().unwrap(); ++ if let Err(err) = irq_file.irq_handle().write(&mut irq) { ++ log::error!("rtl8168d: failed to write IRQ: {err}"); ++ continue; ++ } ++ ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8168d: failed to handle IRQ tick: {err}"); ++ } + } + } + Source::Scheme => { +- scheme.tick().unwrap(); ++ if let Err(err) = scheme.tick() { ++ log::error!("rtl8168d: failed to handle scheme op: {err}"); ++ } + } + } + } +- unreachable!() ++ ++ process::exit(0); + } diff --git a/drivers/net/virtio-netd/src/main.rs b/drivers/net/virtio-netd/src/main.rs -index 17d168ef..56f2c045 100644 +index 17d168ef..adbd1086 100644 --- a/drivers/net/virtio-netd/src/main.rs +++ b/drivers/net/virtio-netd/src/main.rs -@@ -31,7 +31,10 @@ fn main() { +@@ -3,6 +3,7 @@ mod scheme; + use std::fs::File; + use std::io::{Read, Write}; + use std::mem; ++use std::process; + + use driver_network::NetworkScheme; + use pcid_interface::PciFunctionHandle; +@@ -31,8 +32,11 @@ fn main() { } fn daemon_runner(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { - deamon(daemon, pcid_handle).unwrap(); -+ if let Err(err) = deamon(daemon, pcid_handle) { -+ log::error!("virtio-netd: startup failed: {err}"); -+ std::process::exit(1); -+ } - unreachable!(); +- unreachable!(); ++ deamon(daemon, pcid_handle).unwrap_or_else(|err| { ++ log::error!("virtio-netd: daemon failed: {err}"); ++ process::exit(1); ++ }); ++ process::exit(0); } -@@ -52,7 +55,13 @@ fn deamon( + fn deamon( +@@ -52,7 +56,10 @@ fn deamon( // 0x1000 - virtio-net let pci_config = pcid_handle.config(); - assert_eq!(pci_config.func.full_device_id.device_id, 0x1000); + if pci_config.func.full_device_id.device_id != 0x1000 { -+ return Err(format!( -+ "unexpected virtio-net device id: {:04x}", -+ pci_config.func.full_device_id.device_id -+ ) -+ .into()); ++ log::error!("virtio-netd: unexpected device ID {:#06x}, expected 0x1000", pci_config.func.full_device_id.device_id); ++ process::exit(1); + } log::info!("virtio-net: initiating startup sequence :^)"); let device = virtio_core::probe_device(&mut pcid_handle)?; -@@ -84,7 +93,7 @@ fn deamon( +@@ -84,7 +91,8 @@ fn deamon( device.transport.ack_driver_feature(VIRTIO_NET_F_MAC); mac } else { - unimplemented!() -+ return Err("virtio-net: device does not expose VIRTIO_NET_F_MAC".into()); ++ log::error!("virtio-netd: device does not support MAC feature"); ++ return Err("virtio-netd: VIRTIO_NET_F_MAC not supported".into()); }; device.transport.finalize_features(); -@@ -126,7 +135,7 @@ fn deamon( +@@ -126,12 +134,23 @@ fn deamon( data: 0, })?; - libredox::call::setrens(0, 0).expect("virtio-netd: failed to enter null namespace"); -+ libredox::call::setrens(0, 0)?; ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ log::error!("virtio-netd: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); - scheme.tick()?; +- scheme.tick()?; ++ if let Err(err) = scheme.tick() { ++ log::error!("virtio-netd: failed initial scheme tick: {err}"); ++ process::exit(1); ++ } + loop { +- event_queue.read(&mut [0; mem::size_of::()])?; // Wait for event +- scheme.tick()?; ++ if let Err(err) = event_queue.read(&mut [0; mem::size_of::()]) { ++ log::error!("virtio-netd: failed to read event: {err}"); ++ continue; ++ } ++ if let Err(err) = scheme.tick() { ++ log::error!("virtio-netd: failed to handle scheme event: {err}"); ++ } + } + } +diff --git a/drivers/net/virtio-netd/src/scheme.rs b/drivers/net/virtio-netd/src/scheme.rs +index 59b3b93e..d0acb2ba 100644 +--- a/drivers/net/virtio-netd/src/scheme.rs ++++ b/drivers/net/virtio-netd/src/scheme.rs +@@ -27,11 +27,16 @@ impl<'a> VirtioNet<'a> { + // Populate all of the `rx_queue` with buffers to maximize performence. + let mut rx_buffers = vec![]; + for i in 0..(rx.descriptor_len() as usize) { +- rx_buffers.push(unsafe { +- Dma::<[u8]>::zeroed_slice(MAX_BUFFER_LEN) +- .unwrap() +- .assume_init() +- }); ++ let buf = unsafe { ++ match Dma::<[u8]>::zeroed_slice(MAX_BUFFER_LEN) { ++ Ok(dma) => dma.assume_init(), ++ Err(err) => { ++ log::error!("virtio-netd: failed to allocate rx buffer: {err}"); ++ continue; ++ } ++ } ++ }; ++ rx_buffers.push(buf); + + let chain = ChainBuilder::new() + .chain(Buffer::new_unsized(&rx_buffers[i]).flags(DescriptorFlags::WRITE_ONLY)) diff --git a/drivers/pcid-spawner/src/main.rs b/drivers/pcid-spawner/src/main.rs -index a968f4d4..a39b2af8 100644 +index a968f4d4..bfff05c3 100644 --- a/drivers/pcid-spawner/src/main.rs +++ b/drivers/pcid-spawner/src/main.rs @@ -1,11 +1,40 @@ @@ -7980,6 +10630,7 @@ index a968f4d4..a39b2af8 100644 use std::fs; use std::process::Command; +use std::thread; ++use std::time::Duration; use anyhow::{anyhow, Context, Result}; @@ -8020,14 +10671,36 @@ index a968f4d4..a39b2af8 100644 } let config: Config = toml::from_str(&config_data)?; -+ let strict_usb_boot = strict_usb_boot(); + + let strict_usb_boot = strict_usb_boot(); + + + + log::info!( + + "pcid-spawner: starting (initfs={}, strict_usb_boot={})", + + initfs, strict_usb_boot + + ); + -+ log::info!( -+ "pcid-spawner: starting (initfs={}, strict_usb_boot={})", -+ initfs, strict_usb_boot -+ ); ++ let pci_dir = { ++ let mut attempts = 0u32; ++ loop { ++ match fs::read_dir("/scheme/pci") { ++ Ok(dir) => break dir, ++ Err(e) if e.raw_os_error() == Some(19) => { ++ attempts += 1; ++ if attempts > 50 { ++ return Err(e).context("pcid-spawner: /scheme/pci never appeared after 5 s"); ++ } ++ log::warn!( ++ "pcid-spawner: /scheme/pci not ready yet (ENODEV, attempt {}/50), waiting 100 ms", ++ attempts ++ ); ++ thread::sleep(Duration::from_millis(100)); ++ } ++ Err(e) => return Err(e).context("pcid-spawner: failed to read /scheme/pci"), ++ } ++ } ++ }; - for entry in fs::read_dir("/scheme/pci")? { +- for entry in fs::read_dir("/scheme/pci")? { ++ for entry in pci_dir { let entry = entry.context("failed to get entry")?; @@ -55,10 +90,11 @@ fn main() -> Result<()> { }; @@ -8051,13 +10724,11 @@ index a968f4d4..a39b2af8 100644 continue; }; -@@ -85,16 +121,93 @@ fn main() -> Result<()> { +@@ -85,16 +121,105 @@ fn main() -> Result<()> { let mut command = Command::new(program); command.args(args); - log::info!("pcid-spawner: spawn {:?}", command); -- -- handle.enable_device(); + log::info!( + "pcid-spawner: matched {} to driver {:?}", + device_addr, @@ -8074,6 +10745,19 @@ index a968f4d4..a39b2af8 100644 + continue; + } +- handle.enable_device(); ++ let irq_info = handle.config().func.legacy_interrupt_line; ++ let irq_desc = match irq_info { ++ Some(irq) => format!("INTx#{irq}"), ++ None => "MSI/MSI-X only".to_string(), ++ }; ++ log::info!( ++ "pcid-spawner: {} enabled (interrupt: {}, driver: {:?})", ++ device_addr, ++ irq_desc, ++ driver.command, ++ ); + let channel_fd = handle.into_inner_fd(); command.env("PCID_CLIENT_CHANNEL", channel_fd.to_string()); @@ -8150,8 +10834,166 @@ index a968f4d4..a39b2af8 100644 } Ok(()) +diff --git a/drivers/pcid/src/cfg_access/fallback.rs b/drivers/pcid/src/cfg_access/fallback.rs +index 671d17f7..ea8f69f8 100644 +--- a/drivers/pcid/src/cfg_access/fallback.rs ++++ b/drivers/pcid/src/cfg_access/fallback.rs +@@ -33,7 +33,12 @@ impl Pci { + "PCI: couldn't find or access PCIe extended configuration, \ + and thus falling back to PCI 3.0 io ports" + ); +- common::acquire_port_io_rights().expect("pcid: failed to get IO port rights"); ++ common::acquire_port_io_rights() ++ .map_err(|err| { ++ log::error!("pcid: failed to get IO port rights: {err}"); ++ err ++ }) ++ .expect("pcid: IO port rights required for PCI 3.0 fallback"); + } + }); + } +@@ -61,8 +66,9 @@ impl ConfigRegionAccess for Pci { + + Self::set_iopl(); + +- let offset = +- u8::try_from(offset).expect("offset too large for PCI 3.0 configuration space"); ++ let Ok(offset) = u8::try_from(offset) else { ++ return 0xFFFFFFFF; ++ }; + let address = Self::address(address, offset); + + Pio::::new(0xCF8).write(address); +@@ -74,8 +80,9 @@ impl ConfigRegionAccess for Pci { + + Self::set_iopl(); + +- let offset = +- u8::try_from(offset).expect("offset too large for PCI 3.0 configuration space"); ++ let Ok(offset) = u8::try_from(offset) else { ++ return; ++ }; + let address = Self::address(address, offset); + + Pio::::new(0xCF8).write(address); +diff --git a/drivers/pcid/src/cfg_access/mod.rs b/drivers/pcid/src/cfg_access/mod.rs +index c2552448..0fe215a6 100644 +--- a/drivers/pcid/src/cfg_access/mod.rs ++++ b/drivers/pcid/src/cfg_access/mod.rs +@@ -38,42 +38,57 @@ fn locate_ecam_dtb( + ) + })?; + +- let address = node.reg().unwrap().next().unwrap().starting_address as u64; ++ let mut reg = node.reg().ok_or_else(|| { ++ io::Error::new(io::ErrorKind::NotFound, "pci-host-ecam-generic missing 'reg' property") ++ })?; ++ let address = reg.next().ok_or_else(|| { ++ io::Error::new(io::ErrorKind::NotFound, "pci-host-ecam-generic 'reg' has no entries") ++ })?.starting_address as u64; + +- let bus_range = node.property("bus-range").unwrap(); +- assert_eq!(bus_range.value.len(), 8); +- let start_bus = u32::from_be_bytes(<[u8; 4]>::try_from(&bus_range.value[0..4]).unwrap()); +- let end_bus = u32::from_be_bytes(<[u8; 4]>::try_from(&bus_range.value[4..8]).unwrap()); ++ let bus_range = node.property("bus-range").ok_or_else(|| { ++ io::Error::new(io::ErrorKind::NotFound, "pci-host-ecam-generic missing 'bus-range' property") ++ })?; ++ if bus_range.value.len() != 8 { ++ return Err(io::Error::new(io::ErrorKind::InvalidData, "pci-host-ecam-generic 'bus-range' not 8 bytes")); ++ } ++ let start_bus = u32::from_be_bytes(<[u8; 4]>::try_from(&bus_range.value[0..4]).map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "bus-range start parse failed"))?); ++ let end_bus = u32::from_be_bytes(<[u8; 4]>::try_from(&bus_range.value[4..8]).map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "bus-range end parse failed"))?); + +- // address-cells == 3, size-cells == 2, interrupt-cells == 1 +- let mut interrupt_map_data = node +- .property("interrupt-map") +- .unwrap() ++ let interrupt_map_prop = node.property("interrupt-map").ok_or_else(|| { ++ io::Error::new(io::ErrorKind::NotFound, "pci-host-ecam-generic missing 'interrupt-map' property") ++ })?; ++ let mut interrupt_map_data = interrupt_map_prop + .value + .chunks_exact(4) +- .map(|x| u32::from_be_bytes(<[u8; 4]>::try_from(x).unwrap())); ++ .map(|x| u32::from_be_bytes(<[u8; 4]>::try_from(x).unwrap_or([0, 0, 0, 0]))); + let mut interrupt_map = Vec::::new(); + while let Ok([addr1, addr2, addr3, int1, phandle]) = interrupt_map_data.next_chunk::<5>() { +- let parent = dt.find_phandle(phandle).unwrap(); +- let parent_address_cells = u32::from_be_bytes( +- parent.property("#address-cells").unwrap().value[..4] +- .try_into() +- .unwrap(), +- ); ++ let Some(parent) = dt.find_phandle(phandle) else { ++ log::warn!("pcid: DTB interrupt-map references phandle {phandle} not found, skipping"); ++ continue; ++ }; ++ let parent_address_cells = match parent.property("#address-cells") { ++ Some(prop) => u32::from_be_bytes( ++ prop.value[..4] ++ .try_into() ++ .unwrap_or([0, 0, 0, 0]), ++ ), ++ None => 0, ++ }; + match parent_address_cells { + 0 => {} + 1 => { +- assert_eq!(interrupt_map_data.next().unwrap(), 0); ++ let _ = interrupt_map_data.next(); + } + 2 => { +- assert_eq!(interrupt_map_data.next_chunk::<2>().unwrap(), [0, 0]); ++ let _ = interrupt_map_data.next_chunk::<2>(); + } + 3 => { +- assert_eq!(interrupt_map_data.next_chunk::<3>().unwrap(), [0, 0, 0]); ++ let _ = interrupt_map_data.next_chunk::<3>(); + } + _ => break, + }; +- let parent_interrupt_cells = parent.interrupt_cells().unwrap(); ++ let parent_interrupt_cells = parent.interrupt_cells().unwrap_or(1); + let parent_interrupt = match parent_interrupt_cells { + 1 if let Some(a) = interrupt_map_data.next() => [a, 0, 0], + 2 if let Ok([a, b]) = interrupt_map_data.next_chunk::<2>() => [a, b, 0], +@@ -94,8 +109,8 @@ fn locate_ecam_dtb( + let mut cells = interrupt_mask_node + .value + .chunks_exact(4) +- .map(|x| u32::from_be_bytes(<[u8; 4]>::try_from(x).unwrap())); +- cells.next_chunk::<4>().unwrap().to_owned() ++ .map(|x| u32::from_be_bytes(<[u8; 4]>::try_from(x).unwrap_or([0, 0, 0, 0]))); ++ cells.next_chunk::<4>().unwrap_or([u32::MAX, u32::MAX, u32::MAX, u32::MAX]).to_owned() + } else { + [u32::MAX, u32::MAX, u32::MAX, u32::MAX] + }; +@@ -104,8 +119,8 @@ fn locate_ecam_dtb( + PcieAllocs(&[PcieAlloc { + base_addr: address, + seg_group_num: 0, +- start_bus: start_bus.try_into().unwrap(), +- end_bus: end_bus.try_into().unwrap(), ++ start_bus: start_bus.try_into().map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "start_bus overflow"))?, ++ end_bus: end_bus.try_into().map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "end_bus overflow"))?, + _rsvd: [0; 4], + }]), + interrupt_map, +@@ -165,7 +180,10 @@ impl Mcfg { + // crashing. `as_encoded_bytes()` returns some superset + // of ASCII, so directly comparing it with an ASCII name + // is fine. +- let table_filename = table_path.file_name().unwrap().as_encoded_bytes(); ++ let table_filename = match table_path.file_name() { ++ Some(name) => name.as_encoded_bytes(), ++ None => continue, ++ }; + if table_filename.get(0..4) == Some(&MCFG_NAME) { + let bytes = fs::read(table_path)?.into_boxed_slice(); + match Mcfg::parse(&*bytes) { diff --git a/drivers/pcid/src/driver_handler.rs b/drivers/pcid/src/driver_handler.rs -index f70a7f6d..bd0db746 100644 +index f70a7f6d..64701f6c 100644 --- a/drivers/pcid/src/driver_handler.rs +++ b/drivers/pcid/src/driver_handler.rs @@ -48,8 +48,18 @@ impl<'a> DriverHandler<'a> { @@ -8175,7 +11017,26 @@ index f70a7f6d..bd0db746 100644 }, _ => None, }) -@@ -266,7 +276,7 @@ impl<'a> DriverHandler<'a> { +@@ -230,10 +240,14 @@ impl<'a> DriverHandler<'a> { + } + info.set_message_info( + message_addr, +- message_addr_and_data +- .data +- .try_into() +- .expect("pcid: MSI message data too big"), ++ match message_addr_and_data.data.try_into() { ++ Ok(d) => d, ++ Err(_) => { ++ return PcidClientResponse::Error( ++ PcidServerResponseError::InvalidBitPattern, ++ ) ++ } ++ }, + self.pcie, + ); + } +@@ -266,7 +280,7 @@ impl<'a> DriverHandler<'a> { ); } } @@ -8184,7 +11045,7 @@ index f70a7f6d..bd0db746 100644 }, PcidClientRequest::ReadConfig(offset) => { let value = unsafe { self.pcie.read(self.func.addr, offset) }; -@@ -278,7 +288,7 @@ impl<'a> DriverHandler<'a> { +@@ -278,7 +292,7 @@ impl<'a> DriverHandler<'a> { } return PcidClientResponse::WriteConfig; } @@ -8194,12 +11055,13 @@ index f70a7f6d..bd0db746 100644 } } diff --git a/drivers/pcid/src/driver_interface/bar.rs b/drivers/pcid/src/driver_interface/bar.rs -index b2c1d35b..7eaade51 100644 +index b2c1d35b..3a83bb4d 100644 --- a/drivers/pcid/src/driver_interface/bar.rs +++ b/drivers/pcid/src/driver_interface/bar.rs -@@ -1,7 +1,37 @@ +@@ -1,7 +1,38 @@ use std::convert::TryInto; +use std::fmt; ++use std::process; use serde::{Deserialize, Serialize}; @@ -8235,11 +11097,17 @@ index b2c1d35b..7eaade51 100644 // This type is used instead of [pci_types::Bar] in the driver interface as the // latter can't be serialized and is missing the convenience functions of [PciBar]. #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] -@@ -30,26 +60,76 @@ impl PciBar { +@@ -30,26 +61,88 @@ impl PciBar { } pub fn expect_port(&self) -> u16 { -+ self.try_port().unwrap_or_else(|err| panic!("{err}")) ++ match self.try_port() { ++ Ok(port) => port, ++ Err(err) => { ++ log::error!("{err}"); ++ process::exit(1); ++ } ++ } + } + + pub fn try_port(&self) -> Result { @@ -8256,7 +11124,13 @@ index b2c1d35b..7eaade51 100644 } pub fn expect_mem(&self) -> (usize, usize) { -+ self.try_mem().unwrap_or_else(|err| panic!("{err}")) ++ match self.try_mem() { ++ Ok(result) => result, ++ Err(err) => { ++ log::error!("{err}"); ++ process::exit(1); ++ } ++ } + } + + pub fn try_mem(&self) -> Result<(usize, usize), PciBarError> { @@ -8325,14 +11199,15 @@ index b2c1d35b..7eaade51 100644 + } +} diff --git a/drivers/pcid/src/driver_interface/cap.rs b/drivers/pcid/src/driver_interface/cap.rs -index 19521608..495aac61 100644 +index 19521608..17c26c0c 100644 --- a/drivers/pcid/src/driver_interface/cap.rs +++ b/drivers/pcid/src/driver_interface/cap.rs -@@ -1,14 +1,37 @@ +@@ -1,14 +1,44 @@ use pci_types::capability::PciCapabilityAddress; use pci_types::ConfigRegionAccess; use serde::{Deserialize, Serialize}; +use std::fmt; ++use std::process; #[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] pub struct VendorSpecificCapability { @@ -8356,7 +11231,13 @@ index 19521608..495aac61 100644 + impl VendorSpecificCapability { pub unsafe fn parse(addr: PciCapabilityAddress, access: &dyn ConfigRegionAccess) -> Self { -+ Self::try_parse(addr, access).unwrap_or_else(|err| panic!("{err}")) ++ match Self::try_parse(addr, access) { ++ Ok(cap) => cap, ++ Err(err) => { ++ log::error!("{err}"); ++ process::exit(1); ++ } ++ } + } + + pub unsafe fn try_parse( @@ -8366,7 +11247,7 @@ index 19521608..495aac61 100644 let dword = access.read(addr.address, addr.offset); let length = ((dword >> 16) & 0xFF) as u16; // let next = (dword >> 8) & 0xFF; -@@ -17,11 +40,9 @@ impl VendorSpecificCapability { +@@ -17,11 +47,9 @@ impl VendorSpecificCapability { // addr.offset // ); let data = if length > 0 { @@ -8381,7 +11262,7 @@ index 19521608..495aac61 100644 let mut raw_data = { (addr.offset..addr.offset + length) .step_by(4) -@@ -33,6 +54,69 @@ impl VendorSpecificCapability { +@@ -33,6 +61,75 @@ impl VendorSpecificCapability { log::warn!("Vendor specific capability is invalid"); Vec::new() }; @@ -8417,7 +11298,7 @@ index 19521608..495aac61 100644 + unsafe fn read(&self, address: PciAddress, offset: u16) -> u32 { + self.values + .lock() -+ .unwrap() ++ .expect("mock config lock poisoned") + .get(&(address, offset)) + .copied() + .unwrap_or_default() @@ -8436,7 +11317,13 @@ index 19521608..495aac61 100644 + let access = MockConfigRegionAccess::with_read(address, 0x40, 0x0010_0000); + + let capability = unsafe { VendorSpecificCapability::try_parse(capability, &access) }; -+ assert_eq!(capability.unwrap().data.len(), 13); ++ assert_eq!( ++ capability ++ .expect("valid vendor capability should parse") ++ .data ++ .len(), ++ 13 ++ ); + } + + #[test] @@ -8452,11 +11339,91 @@ index 19521608..495aac61 100644 + assert_eq!(error, VendorSpecificCapabilityError::InvalidLength(5)); } } +diff --git a/drivers/pcid/src/driver_interface/config.rs b/drivers/pcid/src/driver_interface/config.rs +index e148b26c..041f0ced 100644 +--- a/drivers/pcid/src/driver_interface/config.rs ++++ b/drivers/pcid/src/driver_interface/config.rs +@@ -47,7 +47,13 @@ impl DriverConfig { + let mut device_found = false; + for (vendor, devices) in ids { + let vendor_without_prefix = vendor.trim_start_matches("0x"); +- let vendor = i64::from_str_radix(vendor_without_prefix, 16).unwrap() as u16; ++ let Ok(vendor_val) = i64::from_str_radix(vendor_without_prefix, 16) else { ++ log::warn!( ++ "invalid hex vendor ID '{vendor_without_prefix}' in driver config, skipping" ++ ); ++ continue; ++ }; ++ let vendor = vendor_val as u16; + + if vendor != id.vendor_id { + continue; diff --git a/drivers/pcid/src/driver_interface/irq_helpers.rs b/drivers/pcid/src/driver_interface/irq_helpers.rs -index 28ca077a..b595d703 100644 +index 28ca077a..bff35650 100644 --- a/drivers/pcid/src/driver_interface/irq_helpers.rs +++ b/drivers/pcid/src/driver_interface/irq_helpers.rs -@@ -180,40 +180,51 @@ pub fn allocate_single_interrupt_vector(cpu_id: usize) -> io::Result io::Result { + buffer[0], buffer[1], buffer[2], buffer[3], + ])) + } else { +- panic!( +- "`/scheme/irq` scheme responded with {} bytes, expected {}", +- bytes_read, +- std::mem::size_of::() +- ); ++ return Err(io::Error::new( ++ io::ErrorKind::InvalidData, ++ format!( ++ "`/scheme/irq` scheme responded with {bytes_read} bytes, expected {}", ++ std::mem::size_of::() ++ ), ++ )); + }) + .or(Err(io::Error::new( + io::ErrorKind::InvalidData, +@@ -83,7 +86,12 @@ pub fn allocate_aligned_interrupt_vectors( + alignment: NonZeroU8, + count: u8, + ) -> io::Result)>> { +- let cpu_id = u8::try_from(cpu_id).expect("usize cpu ids not implemented yet"); ++ let cpu_id = u8::try_from(cpu_id).map_err(|_| { ++ io::Error::new( ++ io::ErrorKind::InvalidInput, ++ format!("CPU id {cpu_id} too large for u8 (usize cpu ids not supported)"), ++ ) ++ })?; + if count == 0 { + return Ok(None); + } +@@ -163,7 +171,7 @@ pub fn allocate_aligned_interrupt_vectors( + /// Allocate at most `count` interrupt vectors, which can start at any offset. Unless MSI is used + /// and an entire aligned range of vectors is needed, this function should be used. + pub fn allocate_interrupt_vectors(cpu_id: usize, count: u8) -> io::Result)>> { +- allocate_aligned_interrupt_vectors(cpu_id, NonZeroU8::new(1).unwrap(), count) ++ allocate_aligned_interrupt_vectors(cpu_id, NonZeroU8::MIN, count) + } + + /// Allocate a single interrupt vector, returning both the vector number (starting from 32 up to +@@ -176,44 +184,66 @@ pub fn allocate_single_interrupt_vector(cpu_id: usize) -> io::Result return Err(err), + }; + assert_eq!(files.len(), 1); +- Ok(Some((base, files.pop().unwrap()))) ++ let handle = files.pop().ok_or_else(|| { ++ io::Error::new( ++ io::ErrorKind::Other, ++ "allocate_interrupt_vectors returned empty file list despite count=1", ++ ) ++ })?; ++ Ok(Some((base, handle))) } #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] @@ -8499,8 +11466,13 @@ index 28ca077a..b595d703 100644 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] -pub fn allocate_first_msi_interrupt_on_bsp( +pub fn allocate_single_interrupt_vector_for_msi(cpu_id: usize) -> (MsiAddrAndData, File) { -+ try_allocate_single_interrupt_vector_for_msi(cpu_id) -+ .unwrap_or_else(|err| panic!("failed to allocate MSI interrupt vector: {err}")) ++ match try_allocate_single_interrupt_vector_for_msi(cpu_id) { ++ Ok(result) => result, ++ Err(err) => { ++ log::error!("failed to allocate MSI interrupt vector: {err}"); ++ process::exit(1); ++ } ++ } +} + +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] @@ -8523,7 +11495,7 @@ index 28ca077a..b595d703 100644 let set_feature_info = MsiSetFeatureInfo { multi_message_enable: Some(0), -@@ -222,10 +233,20 @@ pub fn allocate_first_msi_interrupt_on_bsp( +@@ -222,10 +252,25 @@ pub fn allocate_first_msi_interrupt_on_bsp( }; pcid_handle.set_feature_info(SetFeatureInfo::Msi(set_feature_info)); @@ -8541,12 +11513,17 @@ index 28ca077a..b595d703 100644 +pub fn allocate_first_msi_interrupt_on_bsp( + pcid_handle: &mut crate::driver_interface::PciFunctionHandle, +) -> File { -+ try_allocate_first_msi_interrupt_on_bsp(pcid_handle) -+ .unwrap_or_else(|err| panic!("failed to allocate first MSI interrupt on BSP: {err}")) ++ match try_allocate_first_msi_interrupt_on_bsp(pcid_handle) { ++ Ok(handle) => handle, ++ Err(err) => { ++ log::error!("failed to allocate first MSI interrupt on BSP: {err}"); ++ process::exit(1); ++ } ++ } } pub struct InterruptVector { -@@ -234,6 +255,39 @@ pub struct InterruptVector { +@@ -234,6 +279,39 @@ pub struct InterruptVector { kind: InterruptVectorKind, } @@ -8586,7 +11563,7 @@ index 28ca077a..b595d703 100644 enum InterruptVectorKind { Legacy, Msi, -@@ -266,10 +320,10 @@ impl InterruptVector { +@@ -266,10 +344,10 @@ impl InterruptVector { // FIXME allow allocating multiple interrupt vectors // FIXME move MSI-X IRQ allocation to pcid #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] @@ -8599,7 +11576,7 @@ index 28ca077a..b595d703 100644 let features = pcid_handle.fetch_all_features(); let has_msi = features.iter().any(|feature| feature.is_msi()); -@@ -278,57 +332,79 @@ pub fn pci_allocate_interrupt_vector( +@@ -278,57 +356,89 @@ pub fn pci_allocate_interrupt_vector( if has_msix { let msix_info = match pcid_handle.feature_info(super::PciFeature::MsiX) { super::PciFeatureInfo::MsiX(msix) => msix, @@ -8664,8 +11641,13 @@ index 28ca077a..b595d703 100644 pcid_handle: &mut crate::driver_interface::PciFunctionHandle, driver: &str, ) -> InterruptVector { -+ try_pci_allocate_interrupt_vector(pcid_handle, driver) -+ .unwrap_or_else(|err| panic!("{driver}: {err}")) ++ match try_pci_allocate_interrupt_vector(pcid_handle, driver) { ++ Ok(vec) => vec, ++ Err(err) => { ++ log::error!("{driver}: {err}"); ++ process::exit(1); ++ } ++ } +} + +// FIXME support MSI on non-x86 systems @@ -8689,19 +11671,24 @@ index 28ca077a..b595d703 100644 } else { - panic!("{driver}: no interrupts supported at all") + Err(InterruptVectorError::MissingLegacyInterrupt) - } - } ++ } ++} + +#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +pub fn pci_allocate_interrupt_vector( + pcid_handle: &mut crate::driver_interface::PciFunctionHandle, + driver: &str, +) -> InterruptVector { -+ try_pci_allocate_interrupt_vector(pcid_handle, driver) -+ .unwrap_or_else(|err| panic!("{driver}: {err}")) -+} ++ match try_pci_allocate_interrupt_vector(pcid_handle, driver) { ++ Ok(vec) => vec, ++ Err(err) => { ++ log::error!("{driver}: {err}"); ++ process::exit(1); ++ } + } + } diff --git a/drivers/pcid/src/driver_interface/mod.rs b/drivers/pcid/src/driver_interface/mod.rs -index bbc7304e..b0fb8aa8 100644 +index bbc7304e..9d7172b9 100644 --- a/drivers/pcid/src/driver_interface/mod.rs +++ b/drivers/pcid/src/driver_interface/mod.rs @@ -30,7 +30,7 @@ pub struct LegacyInterruptLine { @@ -8713,7 +11700,7 @@ index bbc7304e..b0fb8aa8 100644 if let Some((phandle, addr, cells)) = self.phandled { let path = match cells { 1 => format!("/scheme/irq/phandle-{}/{}", phandle, addr[0]), -@@ -39,17 +39,25 @@ impl LegacyInterruptLine { +@@ -39,15 +39,28 @@ impl LegacyInterruptLine { "/scheme/irq/phandle-{}/{},{},{}", phandle, addr[0], addr[1], addr[2] ), @@ -8734,17 +11721,33 @@ index bbc7304e..b0fb8aa8 100644 } else { File::open(format!("/scheme/irq/{}", self.irq)) - .unwrap_or_else(|err| panic!("{driver}: failed to open IRQ file: {err}")) - } - } ++ } ++ } + + pub fn irq_handle(self, driver: &str) -> File { -+ self.try_irq_handle(driver) -+ .unwrap_or_else(|err| panic!("{driver}: failed to open IRQ file: {err}")) -+ } ++ match self.try_irq_handle(driver) { ++ Ok(handle) => handle, ++ Err(err) => { ++ log::error!("{driver}: failed to open IRQ file: {err}"); ++ process::exit(1); ++ } + } + } } - - impl fmt::Display for LegacyInterruptLine { -@@ -247,6 +255,7 @@ pub enum PcidClientRequest { +@@ -59,8 +72,10 @@ impl fmt::Display for LegacyInterruptLine { + 1 => write!(f, "(phandle {}, {:?})", phandle, addr[0]), + 2 => write!(f, "(phandle {}, {:?},{:?})", phandle, addr[0], addr[1]), + 3 => write!(f, "(phandle {}, {:?})", phandle, addr), +- _ => panic!( +- "unexpected number of IRQ description cells for phandle {phandle}: {cells}" ++ _ => write!( ++ f, ++ "(phandle {}, invalid IRQ description cells: {cells})", ++ phandle, + ), + } + } else { +@@ -247,6 +262,7 @@ pub enum PcidClientRequest { pub enum PcidServerResponseError { NonexistentFeature(PciFeature), InvalidBitPattern, @@ -8752,10 +11755,35 @@ index bbc7304e..b0fb8aa8 100644 } #[derive(Debug, Serialize, Deserialize)] -@@ -307,6 +316,38 @@ fn recv(r: &mut File) -> T { - bincode::deserialize_from(&data[..]).expect("couldn't deserialize pcid message") +@@ -278,33 +294,51 @@ pub struct PciFunctionHandle { } + fn send(w: &mut File, message: &T) { +- let mut data = Vec::new(); +- bincode::serialize_into(&mut data, message).expect("couldn't serialize pcid message"); +- match w.write(&data) { +- Ok(len) => assert_eq!(len, data.len()), ++ if let Err(err) = send_result(w, message) { ++ log::error!("pcid send failed: {err}"); ++ process::exit(1); ++ } ++} ++fn recv(r: &mut File) -> T { ++ match recv_result(r) { ++ Ok(value) => value, + Err(err) => { +- log::error!("writing pcid request failed: {err}"); ++ log::error!("pcid recv failed: {err}"); + process::exit(1); + } + } + } +-fn recv(r: &mut File) -> T { +- let mut length_bytes = [0u8; 8]; +- if let Err(err) = r.read_exact(&mut length_bytes) { +- log::error!("reading pcid response length failed: {err}"); +- process::exit(1); ++ +fn send_result(w: &mut File, message: &T) -> io::Result<()> { + let mut data = Vec::new(); + bincode::serialize_into(&mut data, message) @@ -8769,29 +11797,54 @@ index bbc7304e..b0fb8aa8 100644 + io::ErrorKind::WriteZero, + format!("short pcid request write: wrote {len} of {} bytes", data.len()), + )) -+ } + } +} + +fn recv_result(r: &mut File) -> io::Result { + let mut length_bytes = [0u8; 8]; + r.read_exact(&mut length_bytes)?; -+ let length = u64::from_le_bytes(length_bytes); -+ if length > 0x100_000 { + let length = u64::from_le_bytes(length_bytes); + if length > 0x100_000 { +- panic!("pcid_interface: buffer too large"); + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("pcid_interface: buffer too large ({length} bytes)"), + )); -+ } -+ let mut data = vec![0u8; length as usize]; + } + let mut data = vec![0u8; length as usize]; +- if let Err(err) = r.read_exact(&mut data) { +- log::error!("reading pcid response failed: {err}"); +- process::exit(1); +- } +- +- bincode::deserialize_from(&data[..]).expect("couldn't deserialize pcid message") + r.read_exact(&mut data)?; + bincode::deserialize_from(&data[..]) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) -+} -+ + } + impl PciFunctionHandle { - fn connect_default() -> Self { - let channel_fd = match env::var("PCID_CLIENT_CHANNEL") { -@@ -369,55 +410,99 @@ impl PciFunctionHandle { +@@ -327,11 +361,14 @@ impl PciFunctionHandle { + } + + pub fn connect_by_path(device_path: &Path) -> io::Result { +- let channel_fd = libredox::call::open( +- device_path.join("channel").to_str().unwrap(), +- libredox::flag::O_RDWR, +- 0, +- )?; ++ let channel_path = device_path.join("channel"); ++ let channel_str = channel_path.to_str().ok_or_else(|| { ++ io::Error::new( ++ io::ErrorKind::InvalidData, ++ format!("device path contains invalid UTF-8: {}", device_path.display()), ++ ) ++ })?; ++ let channel_fd = libredox::call::open(channel_str, libredox::flag::O_RDWR, 0)?; + Ok(Self::connect_common(channel_fd as RawFd)) + } + +@@ -369,55 +406,99 @@ impl PciFunctionHandle { self.config.clone() } @@ -8920,7 +11973,7 @@ index bbc7304e..b0fb8aa8 100644 process::exit(1); } } -@@ -433,33 +518,50 @@ impl PciFunctionHandle { +@@ -433,33 +514,50 @@ impl PciFunctionHandle { } } pub unsafe fn read_config(&mut self, offset: u16) -> u32 { @@ -8987,7 +12040,7 @@ index bbc7304e..b0fb8aa8 100644 common::physmap( bar, bar_size, -@@ -467,18 +569,23 @@ impl PciFunctionHandle { +@@ -467,18 +565,25 @@ impl PciFunctionHandle { // FIXME once the kernel supports this use write-through for prefetchable BAR common::MemoryType::Uncacheable, ) @@ -9002,8 +12055,11 @@ index bbc7304e..b0fb8aa8 100644 + .map_err(|err| io::Error::other(format!("failed to map BAR at {bar:016X}: {err}")))?; - mapped_bar.insert(MappedBar { +- ptr: NonNull::new(ptr.cast::()).expect("Mapping a BAR resulted in a nullptr"), + Ok(mapped_bar.insert(MappedBar { - ptr: NonNull::new(ptr.cast::()).expect("Mapping a BAR resulted in a nullptr"), ++ ptr: NonNull::new(ptr.cast::()).ok_or_else(|| { ++ io::Error::new(io::ErrorKind::Other, "mapping a BAR resulted in a null pointer") ++ })?, bar_size, - }) + })) @@ -9021,18 +12077,19 @@ index bbc7304e..b0fb8aa8 100644 } } diff --git a/drivers/pcid/src/driver_interface/msi.rs b/drivers/pcid/src/driver_interface/msi.rs -index 0ca68ec5..6934ad49 100644 +index 0ca68ec5..cd2fd701 100644 --- a/drivers/pcid/src/driver_interface/msi.rs +++ b/drivers/pcid/src/driver_interface/msi.rs -@@ -1,6 +1,7 @@ +@@ -1,6 +1,8 @@ use std::fmt; use std::ptr::NonNull; ++use std::process; +use crate::driver_interface::bar::PciBarError; use crate::driver_interface::PciBar; use crate::PciFunctionHandle; -@@ -33,9 +34,65 @@ pub struct MsixInfo { +@@ -33,9 +35,74 @@ pub struct MsixInfo { pub pba_offset: u32, } @@ -9053,6 +12110,7 @@ index 0ca68ec5..6934ad49 100644 + end: usize, + bar_size: usize, + }, ++ NullPointer, +} + +impl fmt::Display for MsixMapError { @@ -9080,6 +12138,9 @@ index 0ca68ec5..6934ad49 100644 + f, + "MSI-X PBA {offset:#x}:{end:#x} outside BAR with length {bar_size:#x}" + ), ++ MsixMapError::NullPointer => { ++ write!(f, "MSI-X BAR mapping resulted in null pointer") ++ }, + } + } +} @@ -9087,8 +12148,13 @@ index 0ca68ec5..6934ad49 100644 impl MsixInfo { pub unsafe fn map_and_mask_all(self, pcid_handle: &mut PciFunctionHandle) -> MappedMsixRegs { - self.validate(pcid_handle.config().func.bars); -+ self.try_map_and_mask_all(pcid_handle) -+ .unwrap_or_else(|err| panic!("{err}")) ++ match self.try_map_and_mask_all(pcid_handle) { ++ Ok(regs) => regs, ++ Err(err) => { ++ log::error!("{err}"); ++ process::exit(1); ++ } ++ } + } + + pub unsafe fn try_map_and_mask_all( @@ -9099,17 +12165,17 @@ index 0ca68ec5..6934ad49 100644 let virt_table_base = unsafe { pcid_handle -@@ -46,7 +103,8 @@ impl MsixInfo { +@@ -46,7 +113,8 @@ impl MsixInfo { }; let mut info = MappedMsixRegs { - virt_table_base: NonNull::new(virt_table_base.cast::()).unwrap(), + virt_table_base: NonNull::new(virt_table_base.cast::()) -+ .expect("MSI-X BAR mapping resulted in null pointer"), ++ .ok_or(MsixMapError::NullPointer)?, info: self, }; -@@ -56,21 +114,15 @@ impl MsixInfo { +@@ -56,21 +124,15 @@ impl MsixInfo { info.table_entry_pointer(i.into()).mask(); } @@ -9135,7 +12201,7 @@ index 0ca68ec5..6934ad49 100644 } let table_size = self.table_size; -@@ -80,28 +132,38 @@ impl MsixInfo { +@@ -80,28 +142,38 @@ impl MsixInfo { let pba_offset = self.pba_offset as usize; let pba_min_length = table_size.div_ceil(8); @@ -9188,7 +12254,7 @@ index 0ca68ec5..6934ad49 100644 } } -@@ -120,6 +182,68 @@ impl MappedMsixRegs { +@@ -120,6 +192,68 @@ impl MappedMsixRegs { } } @@ -9258,7 +12324,7 @@ index 0ca68ec5..6934ad49 100644 pub struct MsixTableEntry { pub addr_lo: Mmio, diff --git a/drivers/pcid/src/main.rs b/drivers/pcid/src/main.rs -index 61cd9a78..6da034ef 100644 +index 61cd9a78..8840e141 100644 --- a/drivers/pcid/src/main.rs +++ b/drivers/pcid/src/main.rs @@ -12,6 +12,7 @@ use pci_types::{ @@ -9269,10 +12335,46 @@ index 61cd9a78..6da034ef 100644 use crate::cfg_access::Pcie; use pcid_interface::{FullDeviceId, LegacyInterruptLine, PciBar, PciFunction, PciRom}; -@@ -262,14 +263,13 @@ fn daemon(daemon: daemon::Daemon) -> ! { - let access_fd = socket +@@ -42,7 +43,15 @@ fn handle_parsed_header( + continue; + } + match endpoint_header.bar(i, pcie) { +- Some(TyBar::Io { port }) => bars[i as usize] = PciBar::Port(port.try_into().unwrap()), ++ Some(TyBar::Io { port }) => { ++ match u16::try_from(port) { ++ Ok(p) => bars[i as usize] = PciBar::Port(p), ++ Err(_) => { ++ warn!("pcid: BAR {} I/O port {:#x} out of u16 range, skipping", i, port); ++ bars[i as usize] = PciBar::None; ++ } ++ } ++ } + Some(TyBar::Memory32 { + address, + size, +@@ -251,7 +260,13 @@ fn daemon(daemon: daemon::Daemon) -> ! { + info!("PCI SG-BS:DV.F VEND:DEVI CL.SC.IN.RV"); + + let mut scheme = scheme::PciScheme::new(pcie); +- let socket = redox_scheme::Socket::create().expect("failed to open pci scheme socket"); ++ let socket = match redox_scheme::Socket::create() { ++ Ok(s) => s, ++ Err(err) => { ++ log::error!("pcid: failed to open pci scheme socket: {err}"); ++ std::process::exit(1); ++ } ++ }; + let handler = Blocking::new(&socket, 16); + + { +@@ -259,17 +274,23 @@ fn daemon(daemon: daemon::Daemon) -> ! { + Ok(register_pci) => { + let access_id = scheme.access(); + +- let access_fd = socket ++ let access_fd = match socket .create_this_scheme_fd(0, access_id, syscall::O_RDWR, 0) - .expect("failed to issue this resource"); +- .expect("failed to issue this resource"); - let access_bytes = access_fd.to_ne_bytes(); - let _ = register_pci - .call_wo( @@ -9281,16 +12383,80 @@ index 61cd9a78..6da034ef 100644 - &[], - ) - .expect("failed to send pci_fd to acpid"); -+ sendfd( ++ { ++ Ok(fd) => fd, ++ Err(err) => { ++ warn!("pcid: failed to issue pci access resource to acpid: {err}. Running without ACPI integration."); ++ 0 ++ } ++ }; ++ if let Err(err) = sendfd( + register_pci.raw(), + access_fd as usize, + SendFdFlags::empty().bits(), + 0, -+ ) -+ .expect("failed to send pci_fd to acpid"); ++ ) { ++ warn!("pcid: failed to send pci_fd to acpid: {err}. Running without ACPI integration."); ++ } } Err(err) => { if err.errno() == libredox::errno::ENODEV { +@@ -302,16 +323,24 @@ fn daemon(daemon: daemon::Daemon) -> ! { + ); + } + } +- debug!("Enumeration complete, now starting pci scheme"); ++ info!( ++ "PCI enumeration complete: {} devices, {} buses", ++ scheme.tree.len(), ++ bus_nums.len() ++ ); + +- register_sync_scheme(&socket, "pci", &mut scheme) +- .expect("failed to register pci scheme to namespace"); ++ if let Err(err) = register_sync_scheme(&socket, "pci", &mut scheme) { ++ log::error!("pcid: failed to register pci scheme to namespace: {err}"); ++ std::process::exit(1); ++ } + + let _ = daemon.ready(); + +- handler +- .process_requests_blocking(scheme) +- .expect("pcid: failed to process requests"); ++ if let Err(err) = handler.process_requests_blocking(scheme) { ++ log::error!("pcid: failed to process requests: {err}"); ++ std::process::exit(1); ++ } ++ loop {} + } + + fn scan_device( +@@ -350,16 +379,16 @@ fn scan_device( + + match header.header_type(pcie) { + HeaderType::Endpoint => { +- handle_parsed_header( +- pcie, +- tree, +- EndpointHeader::from_header(header, pcie).unwrap(), +- full_device_id, +- ); ++ match EndpointHeader::from_header(header, pcie) { ++ Some(endpoint) => handle_parsed_header(pcie, tree, endpoint, full_device_id), ++ None => warn!("pcid: failed to parse endpoint header for {}.{}.{}", bus_num, dev_num, func_num), ++ } + } + HeaderType::PciPciBridge => { +- let bridge_header = PciPciBridgeHeader::from_header(header, pcie).unwrap(); +- bus_nums.push(bridge_header.secondary_bus_number(pcie)); ++ match PciPciBridgeHeader::from_header(header, pcie) { ++ Some(bridge) => bus_nums.push(bridge.secondary_bus_number(pcie)), ++ None => warn!("pcid: failed to parse PCI-PCI bridge header for {}.{}.{}", bus_num, dev_num, func_num), ++ } + } + ty => { + warn!("pcid: unknown header type: {ty:?}"); diff --git a/drivers/pcid/src/scheme.rs b/drivers/pcid/src/scheme.rs index bb9f39a3..df026ab4 100644 --- a/drivers/pcid/src/scheme.rs @@ -9413,75 +12579,116 @@ index bb9f39a3..df026ab4 100644 let request = bincode::deserialize_from(buf).map_err(|_| Error::new(EINVAL))?; let response = crate::driver_handler::DriverHandler::new( +diff --git a/drivers/storage/ahcid/src/ahci/disk_ata.rs b/drivers/storage/ahcid/src/ahci/disk_ata.rs +index 4f83c51d..7423603b 100644 +--- a/drivers/storage/ahcid/src/ahci/disk_ata.rs ++++ b/drivers/storage/ahcid/src/ahci/disk_ata.rs +@@ -1,7 +1,7 @@ + use std::convert::TryInto; + use std::ptr; + +-use syscall::error::Result; ++use syscall::error::{Error, Result, EIO}; + + use common::dma::Dma; + +@@ -39,7 +39,7 @@ impl DiskATA { + .map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() })) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()); ++ .map_err(|_| Error::new(EIO))?; + + let mut fb = unsafe { Dma::zeroed()?.assume_init() }; + let buf = unsafe { Dma::zeroed()?.assume_init() }; +diff --git a/drivers/storage/ahcid/src/ahci/disk_atapi.rs b/drivers/storage/ahcid/src/ahci/disk_atapi.rs +index a0e75c09..8fbdfbef 100644 +--- a/drivers/storage/ahcid/src/ahci/disk_atapi.rs ++++ b/drivers/storage/ahcid/src/ahci/disk_atapi.rs +@@ -37,7 +37,7 @@ impl DiskATAPI { + .map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() })) + .collect::>>()? + .try_into() +- .unwrap_or_else(|_| unreachable!()); ++ .map_err(|_| Error::new(EBADF))?; + + let mut fb = unsafe { Dma::zeroed()?.assume_init() }; + let mut buf = unsafe { Dma::zeroed()?.assume_init() }; +diff --git a/drivers/storage/ahcid/src/ahci/hba.rs b/drivers/storage/ahcid/src/ahci/hba.rs +index bea8792c..11a3d4ae 100644 +--- a/drivers/storage/ahcid/src/ahci/hba.rs ++++ b/drivers/storage/ahcid/src/ahci/hba.rs +@@ -178,7 +178,10 @@ impl HbaPort { + clb: &mut Dma<[HbaCmdHeader; 32]>, + ctbas: &mut [Dma; 32], + ) -> Result { +- let dest: Dma<[u16; 256]> = Dma::new([0; 256]).unwrap(); ++ let dest: Dma<[u16; 256]> = Dma::new([0; 256]).map_err(|err| { ++ error!("ahcid: failed to allocate DMA buffer: {err}"); ++ Error::new(EIO) ++ })?; + + let slot = self + .ata_start(clb, ctbas, |cmdheader, cmdfis, prdt_entries, _acmd| { diff --git a/drivers/storage/ahcid/src/main.rs b/drivers/storage/ahcid/src/main.rs -index 1f130a29..059cdd4e 100644 +index 1f130a29..cccd2980 100644 --- a/drivers/storage/ahcid/src/main.rs +++ b/drivers/storage/ahcid/src/main.rs -@@ -26,7 +26,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { - let irq = pci_config - .func - .legacy_interrupt_line +@@ -2,6 +2,7 @@ + + use std::io::{Read, Write}; + use std::os::fd::AsRawFd; ++use std::process; + use std::usize; + + use common::io::Io; +@@ -23,10 +24,13 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + let mut name = pci_config.func.name(); + name.push_str("_ahci"); + +- let irq = pci_config +- .func +- .legacy_interrupt_line - .expect("ahcid: no legacy interrupts supported"); -+ .unwrap_or_else(|| { ++ let irq = match pci_config.func.legacy_interrupt_line { ++ Some(irq) => irq, ++ None => { + error!("ahcid: no legacy interrupts supported"); -+ std::process::exit(1); -+ }); ++ process::exit(1); ++ } ++ }; common::setup_logging( "disk", -@@ -38,6 +41,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { - - info!("AHCI {}", pci_config.func.display()); - -+ if let Err(err) = pci_config.func.bars[5].try_mem() { -+ error!("ahcid: invalid BAR5: {err}"); -+ std::process::exit(1); -+ } - let address = unsafe { pcid_handle.map_bar(5) }.ptr.as_ptr() as usize; - { - let (hba_mem, disks) = ahci::disks(address as usize, &name); -@@ -54,31 +61,58 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { - &FuturesExecutor, - ); - -- let mut irq_file = irq.irq_handle("ahcid"); -+ let mut irq_file = match irq.try_irq_handle("ahcid") { -+ Ok(file) => file, -+ Err(err) => { -+ error!("ahcid: failed to open IRQ handle: {err}"); -+ std::process::exit(1); -+ } -+ }; +@@ -57,46 +61,71 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + let mut irq_file = irq.irq_handle("ahcid"); let irq_fd = irq_file.as_raw_fd() as usize; - let event_queue = RawEventQueue::new().expect("ahcid: failed to create event queue"); -+ let event_queue = match RawEventQueue::new() { -+ Ok(queue) => queue, -+ Err(err) => { -+ error!("ahcid: failed to create event queue: {err}"); -+ std::process::exit(1); -+ } -+ }; ++ let event_queue = RawEventQueue::new().unwrap_or_else(|err| { ++ error!("ahcid: failed to create event queue: {err}"); ++ process::exit(1); ++ }); - libredox::call::setrens(0, 0).expect("ahcid: failed to enter null namespace"); -+ if let Err(err) = libredox::call::setrens(0, 0) { ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { + error!("ahcid: failed to enter null namespace: {err}"); -+ std::process::exit(1); -+ } ++ process::exit(1); ++ }); event_queue .subscribe(scheme.event_handle().raw(), 1, EventFlags::READ) - .expect("ahcid: failed to event scheme socket"); + .unwrap_or_else(|err| { -+ error!("ahcid: failed to subscribe scheme socket: {err}"); -+ std::process::exit(1); ++ error!("ahcid: failed to event scheme socket: {err}"); ++ process::exit(1); + }); event_queue .subscribe(irq_fd, 1, EventFlags::READ) - .expect("ahcid: failed to event irq scheme"); + .unwrap_or_else(|err| { -+ error!("ahcid: failed to subscribe IRQ fd: {err}"); -+ std::process::exit(1); ++ error!("ahcid: failed to event irq scheme: {err}"); ++ process::exit(1); + }); for event in event_queue { @@ -9489,15 +12696,14 @@ index 1f130a29..059cdd4e 100644 + let event = match event { + Ok(event) => event, + Err(err) => { -+ error!("ahcid: failed to read event queue: {err}"); -+ break; ++ error!("ahcid: failed to get event: {err}"); ++ continue; + } + }; if event.fd == scheme.event_handle().raw() { - FuturesExecutor.block_on(scheme.tick()).unwrap(); + if let Err(err) = FuturesExecutor.block_on(scheme.tick()) { -+ error!("ahcid: failed to handle scheme op: {err}"); -+ break; ++ error!("ahcid: failed to handle scheme event: {err}"); + } } else if event.fd == irq_fd { let mut irq = [0; 8]; @@ -9506,158 +12712,442 @@ index 1f130a29..059cdd4e 100644 - .expect("ahcid: failed to read irq file") - >= irq.len() - { +- let is = hba_mem.is.read(); +- if is > 0 { +- let pi = hba_mem.pi.read(); +- let pi_is = pi & is; +- for i in 0..hba_mem.ports.len() { +- if pi_is & 1 << i > 0 { +- let port = &mut hba_mem.ports[i]; +- let is = port.is.read(); +- port.is.write(is); +- } + match irq_file.read(&mut irq) { -+ Ok(read) if read >= irq.len() => { - let is = hba_mem.is.read(); - if is > 0 { - let pi = hba_mem.pi.read(); -@@ -92,11 +126,21 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { ++ Ok(count) if count >= irq.len() => {} ++ Ok(_) => continue, ++ Err(err) => { ++ error!("ahcid: failed to read irq file: {err}"); ++ continue; ++ } ++ } ++ let is = hba_mem.is.read(); ++ if is > 0 { ++ let pi = hba_mem.pi.read(); ++ let pi_is = pi & is; ++ for i in 0..hba_mem.ports.len() { ++ if pi_is & 1 << i > 0 { ++ let port = &mut hba_mem.ports[i]; ++ let is = port.is.read(); ++ port.is.write(is); } - hba_mem.is.write(is); +- hba_mem.is.write(is); ++ } ++ hba_mem.is.write(is); - irq_file - .write(&irq) - .expect("ahcid: failed to write irq file"); -+ if let Err(err) = irq_file.write(&irq) { -+ error!("ahcid: failed to acknowledge IRQ: {err}"); -+ break; -+ } ++ if let Err(err) = irq_file.write(&irq) { ++ error!("ahcid: failed to write irq file: {err}"); ++ continue; ++ } - FuturesExecutor.block_on(scheme.tick()).unwrap(); -+ if let Err(err) = FuturesExecutor.block_on(scheme.tick()) { -+ error!("ahcid: failed to handle IRQ: {err}"); -+ break; -+ } -+ } -+ } -+ Ok(_) => {} -+ Err(err) => { -+ error!("ahcid: failed to read IRQ file: {err}"); -+ break; ++ if let Err(err) = FuturesExecutor.block_on(scheme.tick()) { ++ error!("ahcid: failed to handle IRQ tick: {err}"); } } } else { -@@ -105,5 +149,5 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { +@@ -105,5 +134,5 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { } } - std::process::exit(0); -+ std::process::exit(1); ++ process::exit(0); } +diff --git a/drivers/storage/ided/src/ide.rs b/drivers/storage/ided/src/ide.rs +index 5faf3250..094e5889 100644 +--- a/drivers/storage/ided/src/ide.rs ++++ b/drivers/storage/ided/src/ide.rs +@@ -184,10 +184,10 @@ impl Disk for AtaDisk { + let block = start_block + (count as u64) / 512; + + //TODO: support other LBA modes +- assert!(block < 0x1_0000_0000_0000); ++ debug_assert!(block < 0x1_0000_0000_0000); + + let sectors = (chunk.len() + 511) / 512; +- assert!(sectors <= 128); ++ debug_assert!(sectors <= 128); + + log::trace!( + "IDE read chan {} dev {} block {:#x} count {:#x}", +@@ -205,7 +205,7 @@ impl Disk for AtaDisk { + // Make PRDT EOT match chunk size + for i in 0..sectors { + chan.prdt[i] = PrdtEntry { +- phys: (chan.buf.physical() + i * 512).try_into().unwrap(), ++ phys: (chan.buf.physical() + i * 512).try_into().map_err(|_| Error::new(EIO))?, + size: 512, + flags: if i + 1 == sectors { + 1 << 15 // End of table +@@ -216,7 +216,7 @@ impl Disk for AtaDisk { + } + // Set PRDT + let prdt = chan.prdt.physical(); +- chan.busmaster_prdt.write(prdt.try_into().unwrap()); ++ chan.busmaster_prdt.write(prdt.try_into().map_err(|_| Error::new(EIO))?); + // Set to read + chan.busmaster_command.writef(1 << 3, true); + // Clear interrupt and error bits +@@ -325,10 +325,10 @@ impl Disk for AtaDisk { + let block = start_block + (count as u64) / 512; + + //TODO: support other LBA modes +- assert!(block < 0x1_0000_0000_0000); ++ debug_assert!(block < 0x1_0000_0000_0000); + + let sectors = (chunk.len() + 511) / 512; +- assert!(sectors <= 128); ++ debug_assert!(sectors <= 128); + + log::trace!( + "IDE write chan {} dev {} block {:#x} count {:#x}", +@@ -346,7 +346,7 @@ impl Disk for AtaDisk { + // Make PRDT EOT match chunk size + for i in 0..sectors { + chan.prdt[i] = PrdtEntry { +- phys: (chan.buf.physical() + i * 512).try_into().unwrap(), ++ phys: (chan.buf.physical() + i * 512).try_into().map_err(|_| Error::new(EIO))?, + size: 512, + flags: if i + 1 == sectors { + 1 << 15 // End of table +@@ -357,7 +357,7 @@ impl Disk for AtaDisk { + } + // Set PRDT + let prdt = chan.prdt.physical(); +- chan.busmaster_prdt.write(prdt.try_into().unwrap()); ++ chan.busmaster_prdt.write(prdt.try_into().map_err(|_| Error::new(EIO))?); + // Set to write + chan.busmaster_command.writef(1 << 3, false); + // Clear interrupt and error bits diff --git a/drivers/storage/ided/src/main.rs b/drivers/storage/ided/src/main.rs -index 4197217d..6983912c 100644 +index 4197217d..03174554 100644 --- a/drivers/storage/ided/src/main.rs +++ b/drivers/storage/ided/src/main.rs -@@ -43,19 +43,42 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { - // Get controller DMA capable - let dma = pci_config.func.full_device_id.interface & 0x80 != 0; +@@ -8,6 +8,7 @@ use std::{ + fs::File, + io::{Read, Write}, + os::unix::io::{FromRawFd, RawFd}, ++ process, + sync::{Arc, Mutex}, + thread::{self, sleep}, + time::Duration, +@@ -45,17 +46,34 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { -- let busmaster_base = pci_config.func.bars[4].expect_port(); -+ let busmaster_base = match pci_config.func.bars[4].try_port() { -+ Ok(port) => port, -+ Err(err) => { -+ error!("ided: missing/invalid busmaster BAR: {err}"); -+ std::process::exit(1); -+ } -+ }; + let busmaster_base = pci_config.func.bars[4].expect_port(); let (primary, primary_irq) = if pci_config.func.full_device_id.interface & 1 != 0 { - panic!("TODO: IDE primary channel is PCI native"); -+ error!("ided: PCI native primary IDE channel is not supported yet"); -+ std::process::exit(1); ++ error!("ided: IDE primary channel PCI native mode not supported"); ++ process::exit(1); } else { - (Channel::primary_compat(busmaster_base).unwrap(), 14) -+ match Channel::primary_compat(busmaster_base) { -+ Ok(channel) => (channel, 14), -+ Err(err) => { -+ error!("ided: failed to initialize primary IDE channel: {err}"); -+ std::process::exit(1); -+ } -+ } ++ ( ++ Channel::primary_compat(busmaster_base).unwrap_or_else(|err| { ++ error!("ided: failed to init primary channel: {err}"); ++ process::exit(1); ++ }), ++ 14, ++ ) }; let (secondary, secondary_irq) = if pci_config.func.full_device_id.interface & 1 != 0 { - panic!("TODO: IDE secondary channel is PCI native"); -+ error!("ided: PCI native secondary IDE channel is not supported yet"); -+ std::process::exit(1); ++ error!("ided: IDE secondary channel PCI native mode not supported"); ++ process::exit(1); } else { - (Channel::secondary_compat(busmaster_base + 8).unwrap(), 15) -+ match Channel::secondary_compat(busmaster_base + 8) { -+ Ok(channel) => (channel, 15), -+ Err(err) => { -+ error!("ided: failed to initialize secondary IDE channel: {err}"); -+ std::process::exit(1); -+ } -+ } ++ ( ++ Channel::secondary_compat(busmaster_base + 8).unwrap_or_else(|err| { ++ error!("ided: failed to init secondary channel: {err}"); ++ process::exit(1); ++ }), ++ 15, ++ ) }; - common::acquire_port_io_rights().expect("ided: failed to get I/O privilege"); -+ if let Err(err) = common::acquire_port_io_rights() { ++ common::acquire_port_io_rights().unwrap_or_else(|err| { + error!("ided: failed to get I/O privilege: {err}"); -+ std::process::exit(1); -+ } ++ process::exit(1); ++ }); //TODO: move this to ide.rs? let chans = vec![ +@@ -87,13 +105,13 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { + for (chan_i, chan_lock) in chans.iter().enumerate() { + let mut chan = chan_lock.lock().unwrap(); + +- println!(" - channel {}", chan_i); ++ log::info!(" - channel {}", chan_i); + + // Disable IRQs + chan.control.write(2); + + for dev in 0..=1 { +- println!(" - device {}", dev); ++ log::info!(" - device {}", dev); + + // Select device + chan.device_select.write(0xA0 | (dev << 4)); +@@ -105,7 +123,7 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { + + // Check if device exists + if chan.status.read() == 0 { +- println!(" not found"); ++ log::info!(" not found"); + continue; + } + +@@ -125,7 +143,7 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { + + //TODO: probe ATAPI + if error { +- println!(" error"); ++ log::info!(" error"); + continue; + } + +@@ -189,12 +207,12 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { + 48 + }; + +- println!(" Serial: {}", serial.trim()); +- println!(" Firmware: {}", firmware.trim()); +- println!(" Model: {}", model.trim()); +- println!(" Size: {} MB", sectors / 2048); +- println!(" DMA: {}", dma); +- println!(" {}-bit LBA", lba_bits); ++ log::info!(" Serial: {}", serial.trim()); ++ log::info!(" Firmware: {}", firmware.trim()); ++ log::info!(" Model: {}", model.trim()); ++ log::info!(" Size: {} MB", sectors / 2048); ++ log::info!(" DMA: {}", dma); ++ log::info!(" {}-bit LBA", lba_bits); + + disks.push(AnyDisk::Ata(AtaDisk { + chan: chan_lock.clone(), +@@ -227,7 +245,10 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { + flag::O_RDWR | flag::O_NONBLOCK, + 0, + ) +- .expect("ided: failed to open irq file"); ++ .unwrap_or_else(|err| { ++ error!("ided: failed to open primary irq file: {err}"); ++ process::exit(1); ++ }); + let mut primary_irq_file = unsafe { File::from_raw_fd(primary_irq_fd as RawFd) }; + + let secondary_irq_fd = libredox::call::open( +@@ -235,70 +256,107 @@ fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { + flag::O_RDWR | flag::O_NONBLOCK, + 0, + ) +- .expect("ided: failed to open irq file"); ++ .unwrap_or_else(|err| { ++ error!("ided: failed to open secondary irq file: {err}"); ++ process::exit(1); ++ }); + let mut secondary_irq_file = unsafe { File::from_raw_fd(secondary_irq_fd as RawFd) }; + +- let event_queue = RawEventQueue::new().expect("ided: failed to open event file"); ++ let event_queue = RawEventQueue::new().unwrap_or_else(|err| { ++ error!("ided: failed to open event file: {err}"); ++ process::exit(1); ++ }); + +- libredox::call::setrens(0, 0).expect("ided: failed to enter null namespace"); ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ error!("ided: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); + + event_queue + .subscribe(scheme.event_handle().raw(), 0, EventFlags::READ) +- .expect("ided: failed to event disk scheme"); ++ .unwrap_or_else(|err| { ++ error!("ided: failed to event disk scheme: {err}"); ++ process::exit(1); ++ }); + + event_queue + .subscribe(primary_irq_fd, 0, EventFlags::READ) +- .expect("ided: failed to event irq scheme"); ++ .unwrap_or_else(|err| { ++ error!("ided: failed to event primary irq: {err}"); ++ process::exit(1); ++ }); + + event_queue + .subscribe(secondary_irq_fd, 0, EventFlags::READ) +- .expect("ided: failed to event irq scheme"); ++ .unwrap_or_else(|err| { ++ error!("ided: failed to event secondary irq: {err}"); ++ process::exit(1); ++ }); + + for event in event_queue { +- let event = event.unwrap(); ++ let event = match event { ++ Ok(event) => event, ++ Err(err) => { ++ error!("ided: failed to get event: {err}"); ++ continue; ++ } ++ }; + if event.fd == scheme.event_handle().raw() { +- FuturesExecutor.block_on(scheme.tick()).unwrap(); ++ if let Err(err) = FuturesExecutor.block_on(scheme.tick()) { ++ error!("ided: failed to handle scheme event: {err}"); ++ } + } else if event.fd == primary_irq_fd { + let mut irq = [0; 8]; +- if primary_irq_file +- .read(&mut irq) +- .expect("ided: failed to read irq file") +- >= irq.len() +- { +- let _chan = chans[0].lock().unwrap(); +- //TODO: check chan for irq ++ match primary_irq_file.read(&mut irq) { ++ Ok(count) if count >= irq.len() => {} ++ Ok(_) => continue, ++ Err(err) => { ++ error!("ided: failed to read primary irq file: {err}"); ++ continue; ++ } ++ } ++ let _chan = chans[0].lock().unwrap(); ++ //TODO: check chan for irq + +- primary_irq_file +- .write(&irq) +- .expect("ided: failed to write irq file"); ++ if let Err(err) = primary_irq_file.write(&irq) { ++ error!("ided: failed to write primary irq file: {err}"); ++ continue; ++ } + +- FuturesExecutor.block_on(scheme.tick()).unwrap(); ++ if let Err(err) = FuturesExecutor.block_on(scheme.tick()) { ++ error!("ided: failed to handle primary IRQ tick: {err}"); + } + } else if event.fd == secondary_irq_fd { + let mut irq = [0; 8]; +- if secondary_irq_file +- .read(&mut irq) +- .expect("ided: failed to read irq file") +- >= irq.len() +- { +- let _chan = chans[1].lock().unwrap(); +- //TODO: check chan for irq ++ match secondary_irq_file.read(&mut irq) { ++ Ok(count) if count >= irq.len() => {} ++ Ok(_) => continue, ++ Err(err) => { ++ error!("ided: failed to read secondary irq file: {err}"); ++ continue; ++ } ++ } ++ let _chan = chans[1].lock().unwrap(); ++ //TODO: check chan for irq + +- secondary_irq_file +- .write(&irq) +- .expect("ided: failed to write irq file"); ++ if let Err(err) = secondary_irq_file.write(&irq) { ++ error!("ided: failed to write secondary irq file: {err}"); ++ continue; ++ } + +- FuturesExecutor.block_on(scheme.tick()).unwrap(); ++ if let Err(err) = FuturesExecutor.block_on(scheme.tick()) { ++ error!("ided: failed to handle secondary IRQ tick: {err}"); + } + } else { + error!("Unknown event {}", event.fd); + } + } + +- std::process::exit(0); ++ process::exit(0); + } + + #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] + fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { +- unimplemented!() ++ log::error!("ided: unsupported architecture"); ++ process::exit(1); + } diff --git a/drivers/storage/nvmed/src/main.rs b/drivers/storage/nvmed/src/main.rs -index beb1b689..8c79ba5e 100644 +index beb1b689..3772f4e5 100644 --- a/drivers/storage/nvmed/src/main.rs +++ b/drivers/storage/nvmed/src/main.rs -@@ -75,30 +75,62 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { +@@ -2,6 +2,7 @@ use std::cell::RefCell; + use std::fs::File; + use std::io::{self, Read, Write}; + use std::os::fd::AsRawFd; ++use std::process; + use std::rc::Rc; + use std::sync::Arc; + use std::usize; +@@ -22,7 +23,10 @@ struct NvmeDisk { - log::debug!("NVME PCI CONFIG: {:?}", pci_config); + impl Disk for NvmeDisk { + fn block_size(&self) -> u32 { +- self.ns.block_size.try_into().unwrap() ++ self.ns.block_size.try_into().unwrap_or_else(|_| { ++ log::error!("nvmed: block size {} does not fit in u32", self.ns.block_size); ++ process::exit(1); ++ }) + } -+ if let Err(err) = pci_config.func.bars[0].try_mem() { -+ log::error!("nvmed: invalid BAR0: {err}"); -+ std::process::exit(1); -+ } - let address = unsafe { pcid_handle.map_bar(0).ptr }; + fn size(&self) -> u64 { +@@ -79,26 +83,43 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { -- let interrupt_vector = irq_helpers::pci_allocate_interrupt_vector(&mut pcid_handle, "nvmed"); -+ let interrupt_vector = match irq_helpers::try_pci_allocate_interrupt_vector(&mut pcid_handle, "nvmed") { -+ Ok(vector) => vector, -+ Err(err) => { -+ log::error!("nvmed: failed to allocate interrupt vector: {err}"); -+ std::process::exit(1); -+ } -+ }; + let interrupt_vector = irq_helpers::pci_allocate_interrupt_vector(&mut pcid_handle, "nvmed"); let iv = interrupt_vector.vector(); - let irq_handle = interrupt_vector.irq_handle().try_clone().unwrap(); -+ let irq_handle = match interrupt_vector.irq_handle().try_clone() { -+ Ok(handle) => handle, -+ Err(err) => { -+ log::error!("nvmed: failed to clone IRQ handle: {err}"); -+ std::process::exit(1); -+ } -+ }; ++ let irq_handle = interrupt_vector.irq_handle().try_clone().unwrap_or_else(|err| { ++ log::error!("nvmed: failed to clone IRQ handle: {err}"); ++ process::exit(1); ++ }); -- let mut nvme = Nvme::new(address.as_ptr() as usize, interrupt_vector, pcid_handle) + let mut nvme = Nvme::new(address.as_ptr() as usize, interrupt_vector, pcid_handle) - .expect("nvmed: failed to allocate driver data"); -+ let mut nvme = match Nvme::new(address.as_ptr() as usize, interrupt_vector, pcid_handle) { -+ Ok(nvme) => nvme, -+ Err(err) => { -+ log::error!("nvmed: failed to allocate driver data: {err}"); -+ std::process::exit(1); -+ } -+ }; - +- - unsafe { nvme.init().expect("nvmed: failed to init") } -+ if let Err(err) = unsafe { nvme.init() } { -+ log::error!("nvmed: failed to init: {err}"); -+ std::process::exit(1); ++ .unwrap_or_else(|err| { ++ log::error!("nvmed: failed to allocate driver data: {err}"); ++ process::exit(1); ++ }); ++ ++ unsafe { ++ nvme.init().unwrap_or_else(|err| { ++ log::error!("nvmed: failed to init: {err}"); ++ process::exit(1); ++ }); + } log::debug!("Finished base initialization"); let nvme = Arc::new(nvme); let executor = nvme::executor::init(Arc::clone(&nvme), iv, false /* FIXME */, irq_handle); -- let mut time_handle = File::open(&format!("/scheme/time/{}", libredox::flag::CLOCK_MONOTONIC)) + let mut time_handle = File::open(&format!("/scheme/time/{}", libredox::flag::CLOCK_MONOTONIC)) - .expect("failed to open time handle"); -+ let mut time_handle = match File::open(&format!("/scheme/time/{}", libredox::flag::CLOCK_MONOTONIC)) { -+ Ok(handle) => handle, -+ Err(err) => { ++ .unwrap_or_else(|err| { + log::error!("nvmed: failed to open time handle: {err}"); -+ std::process::exit(1); -+ } -+ }; ++ process::exit(1); ++ }); let mut time_events = Box::pin( executor.register_external_event(time_handle.as_raw_fd() as usize, event::EventFlags::READ), @@ -9665,165 +13155,630 @@ index beb1b689..8c79ba5e 100644 // Try to init namespaces for 5 seconds - time_arm(&mut time_handle, 5).expect("failed to arm timer"); -+ if let Err(err) = time_arm(&mut time_handle, 5) { -+ log::error!("nvmed: failed to arm init timer: {err}"); -+ std::process::exit(1); -+ } ++ time_arm(&mut time_handle, 5).unwrap_or_else(|err| { ++ log::error!("nvmed: failed to arm timer: {err}"); ++ process::exit(1); ++ }); let namespaces = executor.block_on(async { let namespaces_future = nvme.init_with_queues(); let time_future = time_events.as_mut().next(); -@@ -106,7 +138,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { +@@ -106,7 +127,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { futures::pin_mut!(time_future); match futures::future::select(namespaces_future, time_future).await { futures::future::Either::Left((namespaces, _)) => namespaces, - futures::future::Either::Right(_) => panic!("timeout on init"), + futures::future::Either::Right(_) => { -+ log::error!("nvmed: timeout waiting for queue initialization"); -+ std::process::exit(1); ++ log::error!("nvmed: timeout on init"); ++ process::exit(1); + } } }); log::debug!("Initialized!"); -diff --git a/drivers/storage/usbscsid/src/main.rs b/drivers/storage/usbscsid/src/main.rs -index 5382d118..3a403bd3 100644 ---- a/drivers/storage/usbscsid/src/main.rs -+++ b/drivers/storage/usbscsid/src/main.rs -@@ -3,7 +3,7 @@ use std::env; +@@ -134,7 +158,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + event::EventFlags::READ, + )); - use driver_block::{Disk, DiskScheme, ExecutorTrait}; - use syscall::{Error, EIO}; --use xhcid_interface::{ConfigureEndpointsReq, PortId, XhciClientHandle}; -+use xhcid_interface::{PortId, XhciClientHandle}; +- libredox::call::setrens(0, 0).expect("nvmed: failed to enter null namespace"); ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ log::error!("nvmed: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); - pub mod protocol; - pub mod scsi; -@@ -12,9 +12,9 @@ use crate::protocol::Protocol; - use crate::scsi::Scsi; + log::debug!("Starting to listen for scheme events"); - fn main() { -- daemon::Daemon::new(daemon); -+ run(); +@@ -150,5 +177,5 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! { + + //TODO: destroy NVMe stuff + +- std::process::exit(0); ++ process::exit(0); } --fn daemon(daemon: daemon::Daemon) -> ! { -+fn run() -> ! { - let mut args = env::args().skip(1); +diff --git a/drivers/storage/nvmed/src/nvme/executor.rs b/drivers/storage/nvmed/src/nvme/executor.rs +index 6242fa98..c1435e88 100644 +--- a/drivers/storage/nvmed/src/nvme/executor.rs ++++ b/drivers/storage/nvmed/src/nvme/executor.rs +@@ -34,7 +34,12 @@ impl Hardware for NvmeHw { + &VTABLE + } + fn current() -> std::rc::Rc> { +- THE_EXECUTOR.with(|exec| Rc::clone(exec.borrow().as_ref().unwrap())) ++ THE_EXECUTOR.with(|exec| { ++ Rc::clone(exec.borrow().as_ref().unwrap_or_else(|| { ++ log::error!("nvmed: internal error: executor not initialized"); ++ std::process::exit(1); ++ })) ++ }) + } + fn try_submit( + nvme: &Arc, +diff --git a/drivers/storage/nvmed/src/nvme/identify.rs b/drivers/storage/nvmed/src/nvme/identify.rs +index 05e5b9b2..b1b6e959 100644 +--- a/drivers/storage/nvmed/src/nvme/identify.rs ++++ b/drivers/storage/nvmed/src/nvme/identify.rs +@@ -126,7 +126,7 @@ impl LbaFormat { + 0b01 => RelativePerformance::Better, + 0b10 => RelativePerformance::Good, + 0b11 => RelativePerformance::Degraded, +- _ => unreachable!(), ++ _ => RelativePerformance::Degraded, + } + } + pub fn is_available(&self) -> bool { +@@ -153,7 +153,14 @@ impl Nvme { + /// Returns the serial number, model, and firmware, in that order. + pub async fn identify_controller(&self) { + // TODO: Use same buffer +- let data: Dma = unsafe { Dma::zeroed().unwrap().assume_init() }; ++ let data: Dma = unsafe { ++ Dma::zeroed() ++ .map(|dma| dma.assume_init()) ++ .unwrap_or_else(|err| { ++ log::error!("nvmed: failed to allocate identify DMA: {err}"); ++ std::process::exit(1); ++ }) ++ }; - const USAGE: &'static str = "usbscsid "; -@@ -67,15 +67,6 @@ fn daemon(daemon: daemon::Daemon) -> ! { - }) - .expect("Failed to find suitable configuration"); + // println!(" - Attempting to identify controller"); + let comp = self +@@ -182,7 +189,14 @@ impl Nvme { + } + pub async fn identify_namespace_list(&self, base: u32) -> Vec { + // TODO: Use buffer +- let data: Dma<[u32; 1024]> = unsafe { Dma::zeroed().unwrap().assume_init() }; ++ let data: Dma<[u32; 1024]> = unsafe { ++ Dma::zeroed() ++ .map(|dma| dma.assume_init()) ++ .unwrap_or_else(|err| { ++ log::error!("nvmed: failed to allocate namespace list DMA: {err}"); ++ std::process::exit(1); ++ }) ++ }; -- handle -- .configure_endpoints(&ConfigureEndpointsReq { -- config_desc: configuration_value, -- interface_desc: Some(interface_num), -- alternate_setting: Some(alternate_setting), -- hub_ports: None, -- }) -- .expect("Failed to configure endpoints"); + // println!(" - Attempting to retrieve namespace ID list"); + let comp = self +@@ -198,7 +212,14 @@ impl Nvme { + } + pub async fn identify_namespace(&self, nsid: u32) -> NvmeNamespace { + //TODO: Use buffer +- let data: Dma = unsafe { Dma::zeroed().unwrap().assume_init() }; ++ let data: Dma = unsafe { ++ Dma::zeroed() ++ .map(|dma| dma.assume_init()) ++ .unwrap_or_else(|err| { ++ log::error!("nvmed: failed to allocate namespace DMA: {err}"); ++ std::process::exit(1); ++ }) ++ }; + + log::debug!("Attempting to identify namespace {nsid}"); + let comp = self +@@ -216,7 +237,10 @@ impl Nvme { + let block_size = data + .formatted_lba_size() + .lba_data_size() +- .expect("nvmed: error: size outside 512-2^64 range"); ++ .unwrap_or_else(|| { ++ log::error!("nvmed: error: size outside 512-2^64 range"); ++ std::process::exit(1); ++ }); + log::debug!("NVME block size: {}", block_size); + + NvmeNamespace { +diff --git a/drivers/storage/nvmed/src/nvme/mod.rs b/drivers/storage/nvmed/src/nvme/mod.rs +index 682ee933..90a25d5b 100644 +--- a/drivers/storage/nvmed/src/nvme/mod.rs ++++ b/drivers/storage/nvmed/src/nvme/mod.rs +@@ -160,7 +160,15 @@ impl Nvme { + } + fn cur_thread_ctxt(&self) -> Arc> { + // TODO: multi-threading +- Arc::clone(self.thread_ctxts.read().get(&0).unwrap()) ++ Arc::clone( ++ self.thread_ctxts ++ .read() ++ .get(&0) ++ .unwrap_or_else(|| { ++ log::error!("nvmed: internal error: thread context 0 missing"); ++ std::process::exit(1); ++ }), ++ ) + } + + pub unsafe fn submission_queue_tail(&self, qid: u16, tail: u16) { +@@ -208,10 +216,22 @@ impl Nvme { + } + + for (qid, iv) in self.cq_ivs.get_mut().iter_mut() { +- let ctxt = thread_ctxts.get(&0).unwrap().lock(); ++ let ctxt = match thread_ctxts.get(&0) { ++ Some(c) => c.lock(), ++ None => { ++ log::error!("nvmed: internal error: thread context 0 missing"); ++ return Err(Error::new(EIO)); ++ } ++ }; + let queues = ctxt.queues.borrow(); + +- let &(ref cq, ref sq) = queues.get(qid).unwrap(); ++ let (cq, sq) = match queues.get(qid) { ++ Some(pair) => pair, ++ None => { ++ log::error!("nvmed: internal error: queue {qid} missing"); ++ return Err(Error::new(EIO)); ++ } ++ }; + log::debug!( + "iv {iv} [cq {qid}: {:X}, {}] [sq {qid}: {:X}, {}]", + cq.data.physical(), +@@ -222,7 +242,13 @@ impl Nvme { + } + + { +- let main_ctxt = thread_ctxts.get(&0).unwrap().lock(); ++ let main_ctxt = match thread_ctxts.get(&0) { ++ Some(c) => c.lock(), ++ None => { ++ log::error!("nvmed: internal error: thread context 0 missing"); ++ return Err(Error::new(EIO)); ++ } ++ }; + + for (i, prp) in main_ctxt.buffer_prp.borrow_mut().iter_mut().enumerate() { + *prp = (main_ctxt.buffer.borrow_mut().physical() + i * 4096) as u64; +@@ -231,7 +257,13 @@ impl Nvme { + let regs = self.regs.get_mut(); + + let mut queues = main_ctxt.queues.borrow_mut(); +- let (asq, acq) = queues.get_mut(&0).unwrap(); ++ let (asq, acq) = match queues.get_mut(&0) { ++ Some(pair) => pair, ++ None => { ++ log::error!("nvmed: internal error: admin queue pair missing"); ++ return Err(Error::new(EIO)); ++ } ++ }; + regs.aqa + .write(((acq.data.len() as u32 - 1) << 16) | (asq.data.len() as u32 - 1)); + regs.asq_low.write(asq.data.physical() as u32); +@@ -281,14 +313,14 @@ impl Nvme { + let vector = vector as u8; + + if masked { +- assert_ne!( ++ debug_assert_ne!( + to_clear & (1 << vector), + (1 << vector), + "nvmed: internal error: cannot both mask and set" + ); + to_mask |= 1 << vector; + } else { +- assert_ne!( ++ debug_assert_ne!( + to_mask & (1 << vector), + (1 << vector), + "nvmed: internal error: cannot both mask and set" +@@ -326,22 +358,27 @@ impl Nvme { + cmd_init: impl FnOnce(CmdId) -> NvmeCmd, + fail: impl FnOnce(), + ) -> Option<(CqId, CmdId)> { +- match ctxt.queues.borrow_mut().get_mut(&sq_id).unwrap() { +- (sq, _cq) => { +- if sq.is_full() { +- fail(); +- return None; +- } +- let cmd_id = sq.tail; +- let tail = sq.submit_unchecked(cmd_init(cmd_id)); - - let mut protocol = protocol::setup(&handle, protocol, &desc, &conf_desc, &if_desc) - .expect("Failed to setup protocol"); +- // TODO: Submit in bulk +- unsafe { +- self.submission_queue_tail(sq_id, tail); +- } +- Some((sq_id, cmd_id)) ++ let mut queues_ref = ctxt.queues.borrow_mut(); ++ let (sq, _cq) = match queues_ref.get_mut(&sq_id) { ++ Some(pair) => pair, ++ None => { ++ log::error!("nvmed: internal error: submission queue {sq_id} missing"); ++ fail(); ++ return None; + } ++ }; ++ if sq.is_full() { ++ fail(); ++ return None; ++ } ++ let cmd_id = sq.tail; ++ let tail = sq.submit_unchecked(cmd_init(cmd_id)); ++ ++ // TODO: Submit in bulk ++ unsafe { ++ self.submission_queue_tail(sq_id, tail); + } ++ Some((sq_id, cmd_id)) + } -@@ -108,9 +99,6 @@ fn daemon(daemon: daemon::Daemon) -> ! { - &driver_block::FuturesExecutor, - ); - -- // FIXME should this wait notifying readiness until the disk scheme is created? -- daemon.ready(); + pub async fn create_io_completion_queue( +@@ -349,13 +386,19 @@ impl Nvme { + io_cq_id: CqId, + vector: Option, + ) -> NvmeCompQueue { +- let queue = NvmeCompQueue::new().expect("nvmed: failed to allocate I/O completion queue"); - - //libredox::call::setrens(0, 0).expect("nvmed: failed to enter null namespace"); +- let len = u16::try_from(queue.data.len()) +- .expect("nvmed: internal error: I/O CQ longer than 2^16 entries"); +- let raw_len = len +- .checked_sub(1) +- .expect("nvmed: internal error: CQID 0 for I/O CQ"); ++ let queue = NvmeCompQueue::new().unwrap_or_else(|err| { ++ log::error!("nvmed: failed to allocate I/O completion queue: {err}"); ++ std::process::exit(1); ++ }); ++ ++ let len = u16::try_from(queue.data.len()).unwrap_or_else(|_| { ++ log::error!("nvmed: internal error: I/O CQ longer than 2^16 entries"); ++ std::process::exit(1); ++ }); ++ let raw_len = len.checked_sub(1).unwrap_or_else(|| { ++ log::error!("nvmed: internal error: CQID 0 for I/O CQ"); ++ std::process::exit(1); ++ }); - event_queue -diff --git a/drivers/storage/usbscsid/src/protocol/bot.rs b/drivers/storage/usbscsid/src/protocol/bot.rs -index b751d51a..b5d43cba 100644 ---- a/drivers/storage/usbscsid/src/protocol/bot.rs -+++ b/drivers/storage/usbscsid/src/protocol/bot.rs -@@ -103,16 +103,22 @@ impl<'a> BulkOnlyTransport<'a> { - ) -> Result { - let endpoints = &if_desc.endpoints; + let comp = self + .submit_and_complete_admin_command(|cid| { +@@ -370,22 +413,28 @@ impl Nvme { + .await; -- let bulk_in_num = (endpoints -+ let bulk_in_num = endpoints - .iter() -- .position(|endpoint| endpoint.direction() == EndpDirection::In) + /*match comp.status.specific { +- 1 => panic!("invalid queue identifier"), +- 2 => panic!("invalid queue size"), +- 8 => panic!("invalid interrupt vector"), ++ 1 => { log::error!("nvmed: invalid queue identifier"); std::process::exit(1); } ++ 2 => { log::error!("nvmed: invalid queue size"); std::process::exit(1); } ++ 8 => { log::error!("nvmed: invalid interrupt vector"); std::process::exit(1); } + _ => (), + }*/ + + queue + } + pub async fn create_io_submission_queue(&self, io_sq_id: SqId, io_cq_id: CqId) -> NvmeCmdQueue { +- let q = NvmeCmdQueue::new().expect("failed to create submission queue"); +- +- let len = u16::try_from(q.data.len()) +- .expect("nvmed: internal error: I/O SQ longer than 2^16 entries"); +- let raw_len = len +- .checked_sub(1) +- .expect("nvmed: internal error: SQID 0 for I/O SQ"); ++ let q = NvmeCmdQueue::new().unwrap_or_else(|err| { ++ log::error!("nvmed: failed to create submission queue: {err}"); ++ std::process::exit(1); ++ }); ++ ++ let len = u16::try_from(q.data.len()).unwrap_or_else(|_| { ++ log::error!("nvmed: internal error: I/O SQ longer than 2^16 entries"); ++ std::process::exit(1); ++ }); ++ let raw_len = len.checked_sub(1).unwrap_or_else(|| { ++ log::error!("nvmed: internal error: SQID 0 for I/O SQ"); ++ std::process::exit(1); ++ }); + + let comp = self + .submit_and_complete_admin_command(|cid| { +@@ -399,9 +448,9 @@ impl Nvme { + }) + .await; + /*match comp.status.specific { +- 0 => panic!("completion queue invalid"), +- 1 => panic!("invalid queue identifier"), +- 2 => panic!("invalid queue size"), ++ 0 => { log::error!("nvmed: completion queue invalid"); std::process::exit(1); } ++ 1 => { log::error!("nvmed: invalid queue identifier"); std::process::exit(1); } ++ 2 => { log::error!("nvmed: invalid queue size"); std::process::exit(1); } + _ => (), + }*/ + +@@ -431,7 +480,10 @@ impl Nvme { + self.thread_ctxts + .read() + .get(&0) - .unwrap() -- + 1) as u8; -- let bulk_out_num = (endpoints -+ .find(|endpoint| endpoint.direction() == EndpDirection::In) -+ .map(|endpoint| endpoint.address & 0x0F) -+ .filter(|num| *num != 0) -+ .ok_or(ProtocolError::ProtocolError( -+ "missing bulk-in endpoint descriptor", -+ ))?; -+ let bulk_out_num = endpoints - .iter() -- .position(|endpoint| endpoint.direction() == EndpDirection::Out) -- .unwrap() -- + 1) as u8; -+ .find(|endpoint| endpoint.direction() == EndpDirection::Out) -+ .map(|endpoint| endpoint.address & 0x0F) -+ .filter(|num| *num != 0) -+ .ok_or(ProtocolError::ProtocolError( -+ "missing bulk-out endpoint descriptor", -+ ))?; ++ .unwrap_or_else(|| { ++ log::error!("nvmed: internal error: thread context 0 missing"); ++ std::process::exit(1); ++ }) + .lock() + .queues + .borrow_mut() +@@ -497,8 +549,8 @@ impl Nvme { + for chunk in buf.chunks_mut(/* TODO: buf len */ 8192) { + let blocks = (chunk.len() + block_size - 1) / block_size; - let max_lun = get_max_lun(handle, 0)?; - println!("BOT_MAX_LUN {}", max_lun); -diff --git a/drivers/storage/usbscsid/src/protocol/mod.rs b/drivers/storage/usbscsid/src/protocol/mod.rs -index a580765f..62edac60 100644 ---- a/drivers/storage/usbscsid/src/protocol/mod.rs -+++ b/drivers/storage/usbscsid/src/protocol/mod.rs -@@ -68,14 +68,14 @@ use bot::BulkOnlyTransport; - pub fn setup<'a>( - handle: &'a XhciClientHandle, - protocol: u8, -- dev_desc: &DevDesc, -+ _dev_desc: &DevDesc, - conf_desc: &ConfDesc, - if_desc: &IfDesc, --) -> Option> { -+) -> Result, ProtocolError> { - match protocol { -- 0x50 => Some(Box::new( -- BulkOnlyTransport::init(handle, conf_desc, if_desc).unwrap(), -+ 0x50 => Ok(Box::new(BulkOnlyTransport::init(handle, conf_desc, if_desc)?)), -+ _ => Err(ProtocolError::ProtocolError( -+ "unsupported USB mass-storage transport protocol", - )), -- _ => None, +- assert!(blocks > 0); +- assert!(blocks <= 0x1_0000); ++ debug_assert!(blocks > 0); ++ debug_assert!(blocks <= 0x1_0000); + + self.namespace_rw(&*ctxt, namespace, lba, (blocks - 1) as u16, false) + .await?; +@@ -525,8 +577,8 @@ impl Nvme { + for chunk in buf.chunks(/* TODO: buf len */ 8192) { + let blocks = (chunk.len() + block_size - 1) / block_size; + +- assert!(blocks > 0); +- assert!(blocks <= 0x1_0000); ++ debug_assert!(blocks > 0); ++ debug_assert!(blocks <= 0x1_0000); + + ctxt.buffer.borrow_mut()[..chunk.len()].copy_from_slice(chunk); + +diff --git a/drivers/storage/nvmed/src/nvme/queues.rs b/drivers/storage/nvmed/src/nvme/queues.rs +index a3712aeb..438c905c 100644 +--- a/drivers/storage/nvmed/src/nvme/queues.rs ++++ b/drivers/storage/nvmed/src/nvme/queues.rs +@@ -145,7 +145,7 @@ impl Status { + 3 => Self::PathRelatedStatus(code), + 4..=6 => Self::Rsvd(code), + 7 => Self::Vendor(code), +- _ => unreachable!(), ++ _ => Self::Vendor(code), + } } } diff --git a/drivers/storage/virtio-blkd/src/main.rs b/drivers/storage/virtio-blkd/src/main.rs -index d21236b3..f66f725d 100644 +index d21236b3..2b777937 100644 --- a/drivers/storage/virtio-blkd/src/main.rs +++ b/drivers/storage/virtio-blkd/src/main.rs -@@ -103,7 +103,10 @@ fn main() { +@@ -1,6 +1,7 @@ + #![deny(trivial_numeric_casts, unused_allocation)] + + use std::collections::BTreeMap; ++use std::process; + use std::sync::{Arc, Weak}; + + use driver_block::DiskScheme; +@@ -59,14 +60,23 @@ impl BlockDeviceConfig { + T: Sized + TryFrom, + >::Error: std::fmt::Debug, + { +- let transport = self.0.upgrade().unwrap(); ++ let transport = self.0.upgrade().unwrap_or_else(|| { ++ log::error!("virtio-blkd: transport handle dropped"); ++ process::exit(1); ++ }); + + let size = core::mem::size_of::() + .try_into() +- .expect("load_config: invalid size"); ++ .unwrap_or_else(|_| { ++ log::error!("virtio-blkd: load_config: invalid size"); ++ process::exit(1); ++ }); + + let value = transport.load_config(ty as u8, size); +- T::try_from(value).unwrap() ++ T::try_from(value).unwrap_or_else(|_| { ++ log::error!("virtio-blkd: load_config: invalid config value"); ++ process::exit(1); ++ }) + } + + /// Returns the capacity of the block device in bytes. +@@ -103,8 +113,11 @@ fn main() { } fn daemon_runner(redox_daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! { - daemon(redox_daemon, pcid_handle).unwrap(); -+ if let Err(err) = daemon(redox_daemon, pcid_handle) { -+ log::error!("virtio-blkd: startup failed: {err}"); -+ std::process::exit(1); -+ } - unreachable!(); +- unreachable!(); ++ daemon(redox_daemon, pcid_handle).unwrap_or_else(|err| { ++ log::error!("virtio-blkd: daemon failed: {err}"); ++ process::exit(1); ++ }); ++ process::exit(0); } -@@ -121,7 +124,12 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: + fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow::Result<()> { +@@ -121,7 +134,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: // 0x1001 - virtio-blk let pci_config = pcid_handle.config(); - assert_eq!(pci_config.func.full_device_id.device_id, 0x1001); + if pci_config.func.full_device_id.device_id != 0x1001 { -+ return Err(anyhow::anyhow!( -+ "unexpected virtio-blk device id: {:04x}", -+ pci_config.func.full_device_id.device_id -+ )); ++ log::error!("virtio-blkd: unexpected device ID {:#06x}, expected 0x1001", pci_config.func.full_device_id.device_id); ++ process::exit(1); + } log::info!("virtio-blk: initiating startup sequence :^)"); let device = virtio_core::probe_device(&mut pcid_handle)?; +@@ -147,7 +163,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: + + let scheme_name = format!("disk.{}", name); + +- let event_queue = event::EventQueue::new().unwrap(); ++ let mut event_queue = event::EventQueue::new().unwrap_or_else(|err| { ++ log::error!("virtio-blkd: failed to create event queue: {err}"); ++ process::exit(1); ++ }); + + event::user_data! { + enum Event { +@@ -162,7 +181,10 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: + &driver_block::FuturesExecutor, + ); + +- libredox::call::setrens(0, 0).expect("nvmed: failed to enter null namespace"); ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ log::error!("virtio-blkd: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); + + event_queue + .subscribe( +@@ -170,11 +192,26 @@ fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow: + Event::Scheme, + event::EventFlags::READ, + ) +- .unwrap(); +- +- for event in event_queue { +- match event.unwrap().user_data { +- Event::Scheme => futures::executor::block_on(scheme.tick()).unwrap(), ++ .unwrap_or_else(|err| { ++ log::error!("virtio-blkd: failed to subscribe to scheme events: {err}"); ++ process::exit(1); ++ }); ++ ++ loop { ++ let event = match event_queue.next() { ++ Some(Ok(event)) => event, ++ Some(Err(err)) => { ++ log::error!("virtio-blkd: failed to get event: {err}"); ++ continue; ++ } ++ None => break, ++ }; ++ match event.user_data { ++ Event::Scheme => { ++ if let Err(err) = futures::executor::block_on(scheme.tick()) { ++ log::error!("virtio-blkd: failed to handle scheme event: {err}"); ++ } ++ } + } + } + +diff --git a/drivers/storage/virtio-blkd/src/scheme.rs b/drivers/storage/virtio-blkd/src/scheme.rs +index ec4ecf73..39fb24a8 100644 +--- a/drivers/storage/virtio-blkd/src/scheme.rs ++++ b/drivers/storage/virtio-blkd/src/scheme.rs +@@ -15,19 +15,34 @@ trait BlkExtension { + + impl BlkExtension for Queue<'_> { + async fn read(&self, block: u64, target: &mut [u8]) -> usize { +- let req = Dma::new(BlockVirtRequest { ++ let req = match Dma::new(BlockVirtRequest { + ty: BlockRequestTy::In, + reserved: 0, + sector: block, +- }) +- .unwrap(); ++ }) { ++ Ok(req) => req, ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate read request DMA: {err}"); ++ return 0; ++ } ++ }; + + let result = unsafe { +- Dma::<[u8]>::zeroed_slice(target.len()) +- .unwrap() +- .assume_init() ++ match Dma::<[u8]>::zeroed_slice(target.len()) { ++ Ok(dma) => dma.assume_init(), ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate read buffer DMA: {err}"); ++ return 0; ++ } ++ } ++ }; ++ let status = match Dma::new(u8::MAX) { ++ Ok(s) => s, ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate read status DMA: {err}"); ++ return 0; ++ } + }; +- let status = Dma::new(u8::MAX).unwrap(); + + let chain = ChainBuilder::new() + .chain(Buffer::new(&req)) +@@ -37,28 +52,46 @@ impl BlkExtension for Queue<'_> { + + // XXX: Subtract 1 because the of status byte. + let written = self.send(chain).await as usize - 1; +- assert_eq!(*status, 0); ++ if *status != 0 { ++ log::error!("virtio-blkd: read failed with status {}", *status); ++ return 0; ++ } + + target[..written].copy_from_slice(&result); + written + } + + async fn write(&self, block: u64, target: &[u8]) -> usize { +- let req = Dma::new(BlockVirtRequest { ++ let req = match Dma::new(BlockVirtRequest { + ty: BlockRequestTy::Out, + reserved: 0, + sector: block, +- }) +- .unwrap(); ++ }) { ++ Ok(req) => req, ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate write request DMA: {err}"); ++ return 0; ++ } ++ }; + + let mut result = unsafe { +- Dma::<[u8]>::zeroed_slice(target.len()) +- .unwrap() +- .assume_init() ++ match Dma::<[u8]>::zeroed_slice(target.len()) { ++ Ok(dma) => dma.assume_init(), ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate write buffer DMA: {err}"); ++ return 0; ++ } ++ } + }; + result.copy_from_slice(target.as_ref()); + +- let status = Dma::new(u8::MAX).unwrap(); ++ let status = match Dma::new(u8::MAX) { ++ Ok(s) => s, ++ Err(err) => { ++ log::error!("virtio-blkd: failed to allocate write status DMA: {err}"); ++ return 0; ++ } ++ }; + + let chain = ChainBuilder::new() + .chain(Buffer::new(&req)) +@@ -67,7 +100,10 @@ impl BlkExtension for Queue<'_> { + .build(); + + self.send(chain).await as usize; +- assert_eq!(*status, 0); ++ if *status != 0 { ++ log::error!("virtio-blkd: write failed with status {}", *status); ++ return 0; ++ } + + target.len() + } diff --git a/drivers/usb/usbctl/src/main.rs b/drivers/usb/usbctl/src/main.rs index 9b5773d9..232f7cfc 100644 --- a/drivers/usb/usbctl/src/main.rs @@ -10341,7 +14296,7 @@ index ac492d5b..310fe51f 100644 let ring_ref = match endpoint_state.transfer { diff --git a/drivers/usb/xhcid/src/xhci/mod.rs b/drivers/usb/xhcid/src/xhci/mod.rs -index f2143676..9ce15161 100644 +index f2143676..0d2ec432 100644 --- a/drivers/usb/xhcid/src/xhci/mod.rs +++ b/drivers/usb/xhcid/src/xhci/mod.rs @@ -11,12 +11,13 @@ @@ -10441,7 +14396,11 @@ index f2143676..9ce15161 100644 drivers: CHashMap>, scheme_name: String, -@@ -311,6 +368,93 @@ struct PortState { +@@ -308,9 +365,97 @@ struct PortState { + slot: u8, + protocol_speed: &'static ProtocolSpeed, + cfg_idx: Option, ++ active_ifaces: BTreeMap, // iface number → active alternate setting input_context: Mutex>>, dev_desc: Option, endpoint_states: BTreeMap, @@ -10535,7 +14494,7 @@ index f2143676..9ce15161 100644 } impl PortState { -@@ -463,6 +607,7 @@ impl Xhci { +@@ -463,6 +608,7 @@ impl Xhci { handles: CHashMap::new(), next_handle: AtomicUsize::new(0), port_states: CHashMap::new(), @@ -10543,7 +14502,7 @@ index f2143676..9ce15161 100644 drivers: CHashMap::new(), scheme_name, -@@ -793,11 +938,14 @@ impl Xhci { +@@ -793,11 +939,14 @@ impl Xhci { } pub async fn attach_device(&self, port_id: PortId) -> syscall::Result<()> { @@ -10559,7 +14518,7 @@ index f2143676..9ce15161 100644 let (data, state, speed, flags) = { let port = &self.ports.lock().unwrap()[port_id.root_hub_port_index()]; (port.read(), port.state(), port.speed(), port.flags()) -@@ -808,74 +956,101 @@ impl Xhci { +@@ -808,74 +957,102 @@ impl Xhci { port_id, data, state, speed, flags ); @@ -10686,6 +14645,7 @@ index f2143676..9ce15161 100644 + input_context: Mutex::new(input), + dev_desc: None, + cfg_idx: None, ++ active_ifaces: BTreeMap::new(), + endpoint_states: std::iter::once(( + 0, + EndpointState { @@ -10717,7 +14677,7 @@ index f2143676..9ce15161 100644 self.update_max_packet_size(&mut *input, slot, dev_desc_8_byte) .await?; -@@ -885,97 +1060,175 @@ impl Xhci { +@@ -885,97 +1062,175 @@ impl Xhci { let dev_desc = self.get_desc(port_id, slot).await?; debug!("Got the full device descriptor!"); @@ -10753,7 +14713,11 @@ index f2143676..9ce15161 100644 + Ok(()) + } + .await; -+ + +- match self.spawn_drivers(port_id) { +- Ok(()) => (), +- Err(err) => { +- error!("Failed to spawn driver for port {}: `{}`", port_id, err) + match attach_result { + Ok(()) => { + if let Some(delay_ms) = @@ -10764,19 +14728,15 @@ index f2143676..9ce15161 100644 + port_id, delay_ms + ); + thread::sleep(Duration::from_millis(delay_ms)); -+ } - -- match self.spawn_drivers(port_id) { -- Ok(()) => (), -- Err(err) => { -- error!("Failed to spawn driver for port {}: `{}`", port_id, err) + } ++ + if lifecycle.finish_attach_success() != PortLifecycleState::Attached { + warn!( + "attach for port {} completed after detach already started; skipping publication", + port_id + ); + return Err(Error::new(EBUSY)); - } ++ } + + let staged_port_state = self + .staged_port_states @@ -10956,6 +14916,26 @@ index f2143676..9ce15161 100644 } } +@@ -1246,14 +1501,12 @@ impl Xhci { + let drivers_usercfg: &DriversConfig = &DRIVERS_CONFIG; + + for ifdesc in config_desc.interface_descs.iter() { +- //TODO: support alternate settings +- // This is difficult because the device driver must know which alternate +- // to use, but if alternates can have different classes, then a different +- // device driver may be required for each alternate. For now, we will use +- // only the default alternate setting (0) ++ // Only auto-spawn drivers for the default alternate setting (0). ++ // Non-default alternates are selected later by the device driver ++ // via SET_INTERFACE + configure_endpoints with specific alternate_setting. + if ifdesc.alternate_setting != 0 { +- warn!( +- "ignoring port {} iface {} alternate {} class {}.{} proto {}", ++ debug!( ++ "skipping port {} iface {} alternate {} class {}.{} proto {} (non-default alternate)", + port, + ifdesc.number, + ifdesc.alternate_setting, @@ -1458,6 +1711,53 @@ pub fn start_device_enumerator(hci: &Arc>) { })); } @@ -11011,7 +14991,7 @@ index f2143676..9ce15161 100644 struct DriverConfig { name: String, diff --git a/drivers/usb/xhcid/src/xhci/scheme.rs b/drivers/usb/xhcid/src/xhci/scheme.rs -index ca27b3fe..468a98ae 100644 +index ca27b3fe..29437294 100644 --- a/drivers/usb/xhcid/src/xhci/scheme.rs +++ b/drivers/usb/xhcid/src/xhci/scheme.rs @@ -20,6 +20,7 @@ use std::convert::TryFrom; @@ -11223,7 +15203,19 @@ index ca27b3fe..468a98ae 100644 let endp_idx = endp_num.checked_sub(1).ok_or(Error::new(EIO))?; let mut port_state = self.port_state_mut(port_num)?; -@@ -950,35 +1030,102 @@ impl Xhci { +@@ -835,7 +915,10 @@ impl Xhci { + port, + usb::Setup::set_interface(interface_num, alternate_setting), + ) +- .await ++ .await?; ++ let mut port_state = self.port_states.get_mut(&port).ok_or(Error::new(EBADFD))?; ++ port_state.active_ifaces.insert(interface_num, alternate_setting); ++ Ok(()) + } + + async fn reset_endpoint(&self, port_num: PortId, endp_num: u8, tsp: bool) -> Result<()> { +@@ -950,35 +1033,114 @@ impl Xhci { self.port_states.get_mut(&port).ok_or(Error::new(EBADF)) } @@ -11329,11 +15321,23 @@ index ca27b3fe..468a98ae 100644 - new_context_entries = entry; - } + let configuration_value = config_desc.configuration_value; -+ let endpoint_descs = config_desc -+ .interface_descs -+ .iter() -+ .flat_map(|if_desc| if_desc.endpoints.iter().copied()) -+ .collect::>(); ++ ++ let endpoint_descs = if let Some(iface_num) = req.interface_desc { ++ let alt = req.alternate_setting.unwrap_or(0); ++ config_desc ++ .interface_descs ++ .iter() ++ .filter(|if_desc| if_desc.number == iface_num && if_desc.alternate_setting == alt) ++ .flat_map(|if_desc| if_desc.endpoints.iter().copied()) ++ .collect::>() ++ } else { ++ config_desc ++ .interface_descs ++ .iter() ++ .filter(|if_desc| if_desc.alternate_setting == 0) ++ .flat_map(|if_desc| if_desc.endpoints.iter().copied()) ++ .collect::>() ++ }; + + let endp_desc_count = endpoint_descs.len(); + let mut new_context_entries = 1u8; @@ -11344,7 +15348,7 @@ index ca27b3fe..468a98ae 100644 } } new_context_entries += 1; -@@ -989,74 +1136,22 @@ impl Xhci { +@@ -989,74 +1151,22 @@ impl Xhci { } ( @@ -11408,7 +15412,9 @@ index ca27b3fe..468a98ae 100644 - }; - input_context.control.write(control); - } -- ++ let mut staged_endpoint_states = BTreeMap::new(); ++ let mut endpoint_programs = Vec::new(); + - for endp_idx in 0..endp_desc_count as u8 { - let endp_num = endp_idx + 1; - @@ -11418,9 +15424,7 @@ index ca27b3fe..468a98ae 100644 - warn!("failed to find endpoint {}", endp_idx); - Error::new(EIO) - })?; -+ let mut staged_endpoint_states = BTreeMap::new(); -+ let mut endpoint_programs = Vec::new(); - +- - let endp_num_xhc = Self::endp_num_to_dci(endp_num, endp_desc); + for (endp_idx, endp_desc) in endpoint_descs.iter().copied().enumerate() { + let endp_num = endp_idx as u8 + 1; @@ -11428,7 +15432,7 @@ index ca27b3fe..468a98ae 100644 let usb_log_max_streams = endp_desc.log_max_streams(); -@@ -1078,20 +1173,20 @@ impl Xhci { +@@ -1078,20 +1188,20 @@ impl Xhci { let mult = endp_desc.isoch_mult(lec); @@ -11454,7 +15458,7 @@ index ca27b3fe..468a98ae 100644 let max_error_count = 3; let ep_ty = endp_desc.xhci_ep_type()?; -@@ -1114,7 +1209,7 @@ impl Xhci { +@@ -1114,7 +1224,7 @@ impl Xhci { assert_eq!(max_error_count & 0x3, max_error_count); assert_ne!(ep_ty, 0); // 0 means invalid. @@ -11463,7 +15467,7 @@ index ca27b3fe..468a98ae 100644 let mut array = StreamContextArray::new::(self.cap.ac64(), 1 << (primary_streams + 1))?; -@@ -1127,15 +1222,13 @@ impl Xhci { +@@ -1127,15 +1237,13 @@ impl Xhci { array_ptr, "stream ctx ptr not aligned to 16 bytes" ); @@ -11482,7 +15486,7 @@ index ca27b3fe..468a98ae 100644 } else { let ring = Ring::new::(self.cap.ac64(), 16, true)?; let ring_ptr = ring.register(); -@@ -1145,68 +1238,185 @@ impl Xhci { +@@ -1145,68 +1253,205 @@ impl Xhci { ring_ptr, "ring pointer not aligned to 16 bytes" ); @@ -11578,13 +15582,12 @@ index ca27b3fe..468a98ae 100644 }) - .await; + .collect::>(); - -- //self.event_handler_finished(); ++ + // Configure the slot context as well, which holds the last index of the endp descs. + input_context.add_context.write(1); + input_context.drop_context.write(0); -- handle_event_trb("CONFIGURE_ENDPOINT", &event_trb, &command_trb)?; +- //self.event_handler_finished(); + const CONTEXT_ENTRIES_MASK: u32 = 0xF800_0000; + const CONTEXT_ENTRIES_SHIFT: u8 = 27; + @@ -11646,10 +15649,8 @@ index ca27b3fe..468a98ae 100644 + ) + .await; + return Err(err); - } - -- // Tell the device about this configuration. -- self.set_configuration(port, configuration_value).await?; ++ } ++ + if self.consume_test_hook("fail_after_configure_endpoint") { + info!( + "xhcid: test hook injecting failure after CONFIGURE_ENDPOINT for port {}", @@ -11677,7 +15678,8 @@ index ca27b3fe..468a98ae 100644 + .await; + return Err(err); + } -+ + +- handle_event_trb("CONFIGURE_ENDPOINT", &event_trb, &command_trb)?; + if self.consume_test_hook("fail_after_set_configuration") { + info!( + "xhcid: test hook injecting failure after SET_CONFIGURATION for port {}", @@ -11692,8 +15694,10 @@ index ca27b3fe..468a98ae 100644 + ) + .await; + return Err(Error::new(EIO)); -+ } -+ + } + +- // Tell the device about this configuration. +- self.set_configuration(port, configuration_value).await?; + { + let mut port_state = self.port_states.get_mut(&port).ok_or(Error::new(EBADFD))?; + port_state.cfg_idx = Some(configuration_value); @@ -11701,11 +15705,31 @@ index ca27b3fe..468a98ae 100644 + for (endp_num, endpoint_state) in staged_endpoint_states { + port_state.endpoint_states.insert(endp_num, endpoint_state); + } ++ if let Some(iface_num) = req.interface_desc { ++ let alt = req.alternate_setting.unwrap_or(0); ++ port_state.active_ifaces.insert(iface_num, alt); ++ } else if port_state.active_ifaces.is_empty() { ++ let default_iface_entries: Vec<(u8, u8)> = port_state ++ .dev_desc ++ .as_ref() ++ .and_then(|dd| dd.config_descs.iter().find(|cd| cd.configuration_value == configuration_value)) ++ .map(|cd| { ++ cd.interface_descs ++ .iter() ++ .filter(|if_desc| if_desc.alternate_setting == 0) ++ .map(|if_desc| (if_desc.number, 0u8)) ++ .collect() ++ }) ++ .unwrap_or_default(); ++ for (iface_num, alt) in default_iface_entries { ++ port_state.active_ifaces.insert(iface_num, alt); ++ } ++ } + } Ok(()) } -@@ -1857,7 +2067,7 @@ impl Xhci { +@@ -1857,7 +2102,7 @@ impl Xhci { if (flags & O_DIRECTORY != 0) || (flags & O_STAT != 0) { let mut contents = Vec::new(); @@ -11714,7 +15738,7 @@ index ca27b3fe..468a98ae 100644 if self.slot_state( self.port_states -@@ -1894,6 +2104,14 @@ impl Xhci { +@@ -1894,6 +2139,14 @@ impl Xhci { Ok(Handle::PortState(port_num)) } @@ -11729,7 +15753,7 @@ index ca27b3fe..468a98ae 100644 /// implements open() for /port/endpoints /// /// # Arguments -@@ -2088,6 +2306,30 @@ impl Xhci { +@@ -2088,6 +2341,30 @@ impl Xhci { Ok(Handle::DetachDevice(port_num)) } @@ -11760,7 +15784,7 @@ index ca27b3fe..468a98ae 100644 /// implements open() for /port/request /// /// # Arguments -@@ -2156,6 +2398,9 @@ impl SchemeSync for &Xhci { +@@ -2156,6 +2433,9 @@ impl SchemeSync for &Xhci { SchemeParameters::PortState(port_number) => { self.open_handle_port_state(port_number, flags)? } @@ -11770,7 +15794,7 @@ index ca27b3fe..468a98ae 100644 SchemeParameters::PortReq(port_number) => { self.open_handle_port_request(port_number, flags)? } -@@ -2174,6 +2419,12 @@ impl SchemeSync for &Xhci { +@@ -2174,6 +2454,12 @@ impl SchemeSync for &Xhci { SchemeParameters::DetachDevice(port_number) => { self.open_handle_detach_device(port_number, flags)? } @@ -11783,7 +15807,7 @@ index ca27b3fe..468a98ae 100644 }; let fd = self.next_handle.fetch_add(1, atomic::Ordering::Relaxed); -@@ -2204,7 +2455,11 @@ impl SchemeSync for &Xhci { +@@ -2204,7 +2490,11 @@ impl SchemeSync for &Xhci { //If we have a handle to the configure scheme, we need to mark it as write only. match &*guard { @@ -11796,7 +15820,7 @@ index ca27b3fe..468a98ae 100644 stat.st_mode = stat.st_mode | 0o200; } _ => {} -@@ -2254,6 +2509,8 @@ impl SchemeSync for &Xhci { +@@ -2254,6 +2544,8 @@ impl SchemeSync for &Xhci { Handle::ConfigureEndpoints(_) => Err(Error::new(EBADF)), Handle::AttachDevice(_) => Err(Error::new(EBADF)), Handle::DetachDevice(_) => Err(Error::new(EBADF)), @@ -11805,7 +15829,7 @@ index ca27b3fe..468a98ae 100644 Handle::SchemeRoot => Err(Error::new(EBADF)), &mut Handle::Endpoint(port_num, endp_num, ref mut st) => match st { -@@ -2285,6 +2542,10 @@ impl SchemeSync for &Xhci { +@@ -2285,6 +2577,10 @@ impl SchemeSync for &Xhci { Ok(Xhci::::write_dyn_string(string, buf, offset)) } @@ -11816,7 +15840,7 @@ index ca27b3fe..468a98ae 100644 &mut Handle::PortReq(port_num, ref mut st) => { let state = std::mem::replace(st, PortReqState::Tmp); drop(guard); // release the lock -@@ -2324,6 +2585,14 @@ impl SchemeSync for &Xhci { +@@ -2324,6 +2620,14 @@ impl SchemeSync for &Xhci { block_on(self.detach_device(port_num))?; Ok(buf.len()) } @@ -11831,7 +15855,7 @@ index ca27b3fe..468a98ae 100644 &mut Handle::Endpoint(port_num, endp_num, ref ep_file_ty) => match ep_file_ty { EndpointHandleTy::Ctl => block_on(self.on_write_endp_ctl(port_num, endp_num, buf)), EndpointHandleTy::Data => { -@@ -2348,6 +2617,54 @@ impl SchemeSync for &Xhci { +@@ -2348,6 +2652,54 @@ impl SchemeSync for &Xhci { } impl Xhci { @@ -11886,7 +15910,7 @@ index ca27b3fe..468a98ae 100644 pub fn get_endp_status(&self, port_num: PortId, endp_num: u8) -> Result { let port_state = self.port_states.get(&port_num).ok_or(Error::new(EBADFD))?; -@@ -2398,6 +2715,8 @@ impl Xhci { +@@ -2398,6 +2750,8 @@ impl Xhci { endp_num: u8, clear_feature: bool, ) -> Result<()> { @@ -12093,56 +16117,42 @@ index bcb9bb15..b9e42d4a 100644 + std::process::exit(1); } diff --git a/drivers/virtio-core/src/arch/x86.rs b/drivers/virtio-core/src/arch/x86.rs -index aea86c4a..d8595645 100644 +index aea86c4a..c5b2767f 100644 --- a/drivers/virtio-core/src/arch/x86.rs +++ b/drivers/virtio-core/src/arch/x86.rs -@@ -1,6 +1,8 @@ - use crate::transport::Error; - --use pcid_interface::irq_helpers::{allocate_single_interrupt_vector_for_msi, read_bsp_apic_id}; -+use pcid_interface::irq_helpers::{ -+ read_bsp_apic_id, try_allocate_single_interrupt_vector_for_msi, -+}; - use std::fs::File; - - use crate::MSIX_PRIMARY_VECTOR; -@@ -11,9 +13,10 @@ pub fn enable_msix(pcid_handle: &mut PciFunctionHandle) -> Result { +@@ -11,7 +11,10 @@ pub fn enable_msix(pcid_handle: &mut PciFunctionHandle) -> Result { // Extended message signaled interrupts. let msix_info = match pcid_handle.feature_info(PciFeature::MsiX) { PciFeatureInfo::MsiX(capability) => capability, - _ => unreachable!(), -+ _ => return Err(Error::MissingMsix), ++ _ => { ++ log::warn!("virtio_core::enable_msix: expected MSI-X feature info"); ++ return Err(Error::Probe("unexpected PCI feature info for MSI-X")); ++ } }; -- let mut info = unsafe { msix_info.map_and_mask_all(pcid_handle) }; -+ let mut info = unsafe { msix_info.try_map_and_mask_all(pcid_handle) } -+ .map_err(|err| Error::MsixSetup(format!("failed to map MSI-X registers: {err}")))?; + let mut info = unsafe { msix_info.map_and_mask_all(pcid_handle) }; - // Allocate the primary MSI vector. - // FIXME allow the driver to register multiple MSI-X vectors -@@ -21,9 +24,12 @@ pub fn enable_msix(pcid_handle: &mut PciFunctionHandle) -> Result { +@@ -21,7 +24,10 @@ pub fn enable_msix(pcid_handle: &mut PciFunctionHandle) -> Result { let interrupt_handle = { let table_entry_pointer = info.table_entry_pointer(MSIX_PRIMARY_VECTOR as usize); - let destination_id = read_bsp_apic_id().expect("virtio_core: `read_bsp_apic_id()` failed"); -- let (msg_addr_and_data, interrupt_handle) = -- allocate_single_interrupt_vector_for_msi(destination_id); -+ let destination_id = read_bsp_apic_id() -+ .map_err(|err| Error::MsixSetup(format!("failed to read BSP APIC ID: {err}")))?; -+ let (msg_addr_and_data, interrupt_handle) = try_allocate_single_interrupt_vector_for_msi( -+ destination_id, -+ ) -+ .map_err(|err| Error::MsixSetup(format!("failed to allocate MSI-X vector: {err}")))?; ++ let destination_id = read_bsp_apic_id().map_err(|e| { ++ log::warn!("virtio_core::enable_msix: read_bsp_apic_id failed: {e}"); ++ Error::Probe("read_bsp_apic_id failed") ++ })?; + let (msg_addr_and_data, interrupt_handle) = + allocate_single_interrupt_vector_for_msi(destination_id); table_entry_pointer.write_addr_and_data(msg_addr_and_data); - table_entry_pointer.unmask(); - diff --git a/drivers/virtio-core/src/probe.rs b/drivers/virtio-core/src/probe.rs -index 5631ef67..3367586a 100644 +index 5631ef67..eaef1b96 100644 --- a/drivers/virtio-core/src/probe.rs +++ b/drivers/virtio-core/src/probe.rs -@@ -32,21 +32,21 @@ pub const MSIX_PRIMARY_VECTOR: u16 = 0; +@@ -31,16 +31,16 @@ pub const MSIX_PRIMARY_VECTOR: u16 = 0; + /// before starting the device. /// * Finally start the device (via [`StandardTransport::run_device`]). At this point, the device /// is alive. - /// +-/// -/// ## Panics -/// This function panics if the device is not a virtio device. pub fn probe_device(pcid_handle: &mut PciFunctionHandle) -> Result { @@ -12153,21 +16163,15 @@ index 5631ef67..3367586a 100644 - "virtio_core::probe_device: not a virtio device" - ); + if pci_config.func.full_device_id.vendor_id != 6900 { -+ return Err(Error::NotVirtio); ++ log::warn!( ++ "virtio_core::probe_device: skipping non-virtio device (vendor ID {:#06x})", ++ pci_config.func.full_device_id.vendor_id ++ ); ++ return Err(Error::Probe("not a virtio device")); + } let mut common_addr = None; let mut notify_addr = None; - let mut device_addr = None; - -- for raw_capability in pcid_handle.get_vendor_capabilities() { -+ for raw_capability in pcid_handle -+ .try_get_vendor_capabilities() -+ .map_err(|err| Error::MsixSetup(format!("failed to fetch vendor capabilities: {err}")))? -+ { - // SAFETY: We have verified that the length of the data is correct. - let capability = unsafe { &*(raw_capability.data.as_ptr() as *const PciCapability) }; - @@ -55,7 +55,9 @@ pub fn probe_device(pcid_handle: &mut PciFunctionHandle) -> Result continue, } @@ -12175,11 +16179,11 @@ index 5631ef67..3367586a 100644 - let (addr, _) = pci_config.func.bars[capability.bar as usize].expect_mem(); + let (addr, _) = pci_config.func.bars[capability.bar as usize] + .try_mem() -+ .map_err(|_| Error::MissingCapability("capability BAR"))?; ++ .map_err(|_| Error::Probe("BAR is not memory-mapped"))?; let address = unsafe { let addr = addr + capability.offset as usize; -@@ -100,19 +102,18 @@ pub fn probe_device(pcid_handle: &mut PciFunctionHandle) -> Result Result Result Result Result<(), Error> { - .insert_status(DeviceStatusFlags::ACKNOWLEDGE); +diff --git a/drivers/virtio-core/src/spec/split_virtqueue.rs b/drivers/virtio-core/src/spec/split_virtqueue.rs +index b9636711..23aa5484 100644 +--- a/drivers/virtio-core/src/spec/split_virtqueue.rs ++++ b/drivers/virtio-core/src/spec/split_virtqueue.rs +@@ -197,9 +197,9 @@ impl ChainBuilder { + } - device.transport.insert_status(DeviceStatusFlags::DRIVER); -+ device.transport.finalize_features(); - Ok(()) + pub fn build(mut self) -> Vec { +- let last_buffer = self.buffers.last_mut().expect("virtio-core: empty chain"); +- last_buffer.flags.remove(DescriptorFlags::NEXT); +- ++ if let Some(last_buffer) = self.buffers.last_mut() { ++ last_buffer.flags.remove(DescriptorFlags::NEXT); ++ } + self.buffers + } } diff --git a/drivers/virtio-core/src/transport.rs b/drivers/virtio-core/src/transport.rs -index d3445d2d..4e116d2e 100644 +index d3445d2d..99972c95 100644 --- a/drivers/virtio-core/src/transport.rs +++ b/drivers/virtio-core/src/transport.rs -@@ -19,6 +19,20 @@ pub enum Error { +@@ -19,6 +19,8 @@ pub enum Error { SyscallError(#[from] libredox::error::Error), #[error("the device is incapable of {0:?}")] InCapable(CfgType), -+ #[error("device is not a virtio device")] -+ NotVirtio, -+ #[error("virtio capability `{0}` is missing")] -+ MissingCapability(&'static str), -+ #[error("virtio notify capability has an invalid zero multiplier")] -+ InvalidNotifyMultiplier, -+ #[error("device does not support MSI-X")] -+ MissingMsix, -+ #[error("MSI-X setup failed: {0}")] -+ MsixSetup(String), -+ #[error("virtio feature negotiation failed")] -+ FeaturesNotAccepted, -+ #[error("virtio queue operation failed: {0}")] -+ QueueSetup(&'static str), ++ #[error("virtio probe: {0}")] ++ Probe(&'static str), } /// Returns the queue part sizes in bytes. -@@ -238,6 +252,26 @@ impl<'a> Queue<'a> { - } - } +@@ -59,14 +61,23 @@ pub fn spawn_irq_thread(irq_handle: &File, queue: &Arc>) { + let queue_copy = queue.clone(); -+fn finalize_features_checked(transport: &StandardTransport<'_>) -> Result<(), Error> { -+ if !transport.check_device_feature(VIRTIO_F_VERSION_1) { -+ return Err(Error::FeaturesNotAccepted); -+ } -+ transport.ack_driver_feature(VIRTIO_F_VERSION_1); -+ -+ let mut common = transport.common.lock().unwrap(); -+ -+ let status = common.device_status.get(); -+ common -+ .device_status -+ .set(status | DeviceStatusFlags::FEATURES_OK); -+ -+ let confirm = common.device_status.get(); -+ if (confirm & DeviceStatusFlags::FEATURES_OK) != DeviceStatusFlags::FEATURES_OK { -+ return Err(Error::FeaturesNotAccepted); -+ } -+ Ok(()) -+} -+ - unsafe impl Sync for Queue<'_> {} - unsafe impl Send for Queue<'_> {} + std::thread::spawn(move || { +- let event_queue = RawEventQueue::new().unwrap(); ++ let event_queue = match RawEventQueue::new() { ++ Ok(eq) => eq, ++ Err(err) => { ++ log::error!("virtio-core: failed to create event queue for IRQ thread: {err}"); ++ return; ++ } ++ }; -@@ -590,21 +624,8 @@ impl Transport for StandardTransport<'_> { - } +- event_queue +- .subscribe(irq_fd as usize, 0, event::EventFlags::READ) +- .unwrap(); ++ if let Err(err) = event_queue.subscribe(irq_fd as usize, 0, event::EventFlags::READ) { ++ log::error!("virtio-core: failed to subscribe to IRQ fd: {err}"); ++ return; ++ } - fn finalize_features(&self) { -- // Check VirtIO version 1 compliance. -- assert!(self.check_device_feature(VIRTIO_F_VERSION_1)); -- self.ack_driver_feature(VIRTIO_F_VERSION_1); -- -- let mut common = self.common.lock().unwrap(); -- -- let status = common.device_status.get(); -- common -- .device_status -- .set(status | DeviceStatusFlags::FEATURES_OK); -- -- // Re-read device status to ensure the `FEATURES_OK` bit is still set: otherwise, -- // the device does not support our subset of features and the device is unusable. -- let confirm = common.device_status.get(); +- for _ in event_queue.map(Result::unwrap) { +- // Wake up the tasks waiting on the queue. ++ for event_result in event_queue.map(|res| res) { ++ if event_result.is_err() { ++ break; ++ } + for (_, task) in queue_copy.waker.lock().unwrap().iter() { + task.wake_by_ref(); + } +@@ -604,7 +615,9 @@ impl Transport for StandardTransport<'_> { + // Re-read device status to ensure the `FEATURES_OK` bit is still set: otherwise, + // the device does not support our subset of features and the device is unusable. + let confirm = common.device_status.get(); - assert!((confirm & DeviceStatusFlags::FEATURES_OK) == DeviceStatusFlags::FEATURES_OK); -+ finalize_features_checked(self) -+ .unwrap_or_else(|err| panic!("{err}")) ++ if (confirm & DeviceStatusFlags::FEATURES_OK) != DeviceStatusFlags::FEATURES_OK { ++ log::error!("virtio-core: device rejected feature set (FEATURES_OK cleared after negotiation)"); ++ } } fn setup_config_notify(&self, vector: u16) { -@@ -640,7 +661,9 @@ impl Transport for StandardTransport<'_> { +@@ -640,7 +653,10 @@ impl Transport for StandardTransport<'_> { // Set the MSI-X vector. common.queue_msix_vector.set(vector); - assert!(common.queue_msix_vector.get() == vector); + if common.queue_msix_vector.get() != vector { -+ return Err(Error::QueueSetup("queue MSI-X vector was not accepted")); ++ log::error!("virtio-core: MSI-X vector {vector:#x} was not accepted by device for queue {queue_index}"); ++ return Err(Error::SyscallError(libredox::error::Error::new(libredox::errno::EIO))); + } // Enable the queue. common.queue_enable.set(1); -@@ -685,7 +708,9 @@ impl Transport for StandardTransport<'_> { +@@ -685,7 +701,9 @@ impl Transport for StandardTransport<'_> { // Set the MSI-X vector. common.queue_msix_vector.set(queue.vector); - assert!(common.queue_msix_vector.get() == queue.vector); + if common.queue_msix_vector.get() != queue.vector { -+ panic!("virtio queue MSI-X vector was not accepted during reinit"); ++ log::error!("virtio-core: MSI-X vector {:#x} was not accepted during reinit for queue {}", queue.vector, queue.queue_index); + } // Enable the queue. @@ -12466,7 +16468,7 @@ index 5682cf44..ed436619 100644 } } diff --git a/init/src/scheduler.rs b/init/src/scheduler.rs -index d42a4e57..782ca1ba 100644 +index d42a4e57..64e64e1e 100644 --- a/init/src/scheduler.rs +++ b/init/src/scheduler.rs @@ -5,6 +5,7 @@ use crate::unit::{Unit, UnitId, UnitKind, UnitStore}; @@ -12529,32 +16531,51 @@ index d42a4e57..782ca1ba 100644 match &unit.kind { UnitKind::LegacyScript { script } => { for cmd in script.clone() { -@@ -93,7 +104,7 @@ fn run(unit: &mut Unit, config: &mut InitConfig) { +@@ -92,25 +103,30 @@ fn run(unit: &mut Unit, config: &mut InitConfig) { + } UnitKind::Service { service } => { if config.skip_cmd.contains(&service.cmd) { - eprintln!("Skipping '{} {}'", service.cmd, service.args.join(" ")); +- eprintln!("Skipping '{} {}'", service.cmd, service.args.join(" ")); - return; ++ eprintln!("init: skipping {} {}", service.cmd, service.args.join(" ")); + return None; } - if config.log_debug { +- if config.log_debug { ++ eprintln!( ++ "init: starting {} ({})", ++ unit.info.description.as_ref().unwrap_or(&unit.id.0), ++ service.cmd, ++ ); ++ let pid = service.spawn(&config.envs); ++ if pid.is_some() { eprintln!( -@@ -102,7 +113,7 @@ fn run(unit: &mut Unit, config: &mut InitConfig) { - service.cmd, +- "Starting {} ({})", ++ "init: started {} (pid {})", + unit.info.description.as_ref().unwrap_or(&unit.id.0), +- service.cmd, ++ pid.unwrap_or(0), ); } - service.spawn(&config.envs); -+ return service.spawn(&config.envs); ++ return pid; } UnitKind::Target {} => { - if config.log_debug { -@@ -113,4 +124,5 @@ fn run(unit: &mut Unit, config: &mut InitConfig) { - } +- if config.log_debug { +- eprintln!( +- "Reached target {}", +- unit.info.description.as_ref().unwrap_or(&unit.id.0), +- ); +- } ++ eprintln!( ++ "init: reached target {}", ++ unit.info.description.as_ref().unwrap_or(&unit.id.0), ++ ); } } + None } diff --git a/init/src/service.rs b/init/src/service.rs -index ed0023e9..e06e1b16 100644 +index ed0023e9..cc95d02b 100644 --- a/init/src/service.rs +++ b/init/src/service.rs @@ -22,6 +22,8 @@ pub struct Service { @@ -12575,7 +16596,7 @@ index ed0023e9..e06e1b16 100644 let mut command = Command::new(&self.cmd); command.args(self.args.iter().map(|arg| subst_env(arg))); command.env_clear(); -@@ -46,14 +48,20 @@ impl Service { +@@ -46,20 +48,28 @@ impl Service { } command.envs(base_envs).envs(&self.envs); @@ -12598,7 +16619,16 @@ index ed0023e9..e06e1b16 100644 } }; -@@ -81,23 +89,32 @@ impl Service { + match &self.type_ { + ServiceType::Notify => match read_pipe.read_exact(&mut [0]) { +- Ok(()) => {} ++ Ok(()) => { ++ eprintln!("init: {} ready (notify)", self.cmd); ++ } + Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => { + eprintln!("init: {command:?} exited without notifying readiness"); + } +@@ -81,23 +91,34 @@ impl Service { }) => continue, Ok(0) => { eprintln!("init: {command:?} exited without notifying readiness"); @@ -12633,11 +16663,22 @@ index ed0023e9..e06e1b16 100644 + libredox::call::register_scheme_to_ns(current_namespace_fd, scheme, new_fd) + { + eprintln!("init: failed to register scheme {scheme:?} for {command:?}: {err}"); ++ } else { ++ eprintln!("init: {} ready (scheme {})", self.cmd, scheme); + } } ServiceType::Oneshot => { drop(read_pipe); -@@ -112,8 +129,13 @@ impl Service { +@@ -105,6 +126,8 @@ impl Service { + Ok(exit_status) => { + if !exit_status.success() { + eprintln!("init: {command:?} failed with {exit_status}"); ++ } else { ++ eprintln!("init: {} done (oneshot)", self.cmd); + } + } + Err(err) => { +@@ -112,8 +135,13 @@ impl Service { } } } diff --git a/local/patches/bootloader/P2-live-preload-guard.patch b/local/patches/bootloader/P2-live-preload-guard.patch index 435afa79..41108b55 100644 --- a/local/patches/bootloader/P2-live-preload-guard.patch +++ b/local/patches/bootloader/P2-live-preload-guard.patch @@ -2,7 +2,7 @@ diff --git a/src/main.rs b/src/main.rs index b2e2736..a6a9474 100644 --- a/src/main.rs +++ b/src/main.rs -@@ -500,33 +500,62 @@ pub extern "C" fn main() -> ! { +@@ -500,36 +500,63 @@ pub extern "C" fn main() -> ! { print!("live: 0/{} MiB", size / MIBI as u64); diff --git a/local/patches/bootloader/P2-live-preload-guard.patch.bak b/local/patches/bootloader/P2-live-preload-guard.patch.bak new file mode 100644 index 00000000..435afa79 --- /dev/null +++ b/local/patches/bootloader/P2-live-preload-guard.patch.bak @@ -0,0 +1,97 @@ +diff --git a/src/main.rs b/src/main.rs +index b2e2736..a6a9474 100644 +--- a/src/main.rs ++++ b/src/main.rs +@@ -500,33 +500,62 @@ pub extern "C" fn main() -> ! { + + print!("live: 0/{} MiB", size / MIBI as u64); + +- let ptr = os.alloc_zeroed_page_aligned(size as usize); +- if ptr.is_null() { +- panic!("Failed to allocate memory for live"); +- } +- +- let live = unsafe { slice::from_raw_parts_mut(ptr, size as usize) }; +- +- let mut i = 0; +- for chunk in live.chunks_mut(MIBI) { +- print!("\rlive: {}/{} MiB", i / MIBI as u64, size / MIBI as u64); +- i += unsafe { +- fs.disk +- .read_at(fs.block + i / redoxfs::BLOCK_SIZE, chunk) +- .expect("Failed to read live disk") as u64 +- }; +- } +- println!("\rlive: {}/{} MiB", i / MIBI as u64, size / MIBI as u64); +- +- println!("Switching to live disk"); +- unsafe { +- LIVE_OPT = Some((fs.block, slice::from_raw_parts_mut(ptr, size as usize))); +- } ++ let live_size = match usize::try_from(size) { ++ Ok(live_size) => live_size, ++ Err(_) => { ++ println!("\rlive: disabled (image too large for bootloader address space)"); ++ live = false; ++ 0 ++ } ++ }; + +- area_add(OsMemoryEntry { +- base: live.as_ptr() as u64, +- size: live.len() as u64, +- kind: OsMemoryKind::Reserved, +- }); ++ let ptr = if live { ++ os.alloc_zeroed_page_aligned(live_size) ++ } else { ++ ptr::null_mut() ++ }; ++ ++ if live && ptr.is_null() { ++ println!( ++ "\rlive: disabled (unable to allocate {} MiB upfront)", ++ size / MIBI as u64 ++ ); ++ live = false; ++ } ++ ++ let live = if live { ++ Some(unsafe { slice::from_raw_parts_mut(ptr, live_size) }) ++ } else { ++ println!("Continuing without live preload"); ++ None ++ }; ++ ++ if let Some(live) = live { ++ let mut i = 0; ++ for chunk in live.chunks_mut(MIBI) { ++ print!("\rlive: {}/{} MiB", i / MIBI as u64, size / MIBI as u64); ++ i += unsafe { ++ fs.disk ++ .read_at(fs.block + i / redoxfs::BLOCK_SIZE, chunk) ++ .expect("Failed to read live disk") as u64 ++ }; ++ } ++ println!("\rlive: {}/{} MiB", i / MIBI as u64, size / MIBI as u64); ++ ++ println!("Switching to live disk"); ++ unsafe { ++ LIVE_OPT = Some((fs.block, slice::from_raw_parts_mut(ptr, live_size))); ++ } ++ ++ area_add(OsMemoryEntry { ++ base: live.as_ptr() as u64, ++ size: live.len() as u64, ++ kind: OsMemoryKind::Reserved, ++ }); ++ ++ Some(live) ++ } else { ++ None ++ } +- +- Some(live) + } else { + None + }; diff --git a/local/patches/bootloader/P3-uefi-live-image-safe-read.patch b/local/patches/bootloader/P3-uefi-live-image-safe-read.patch index c7f7c568..9051018c 100644 --- a/local/patches/bootloader/P3-uefi-live-image-safe-read.patch +++ b/local/patches/bootloader/P3-uefi-live-image-safe-read.patch @@ -11,7 +11,7 @@ index 4b0bf31..90a97b8 100644 let mut esp_fs = match FileSystem::handle_protocol(esp_handle) { Ok(esp_fs) => esp_fs, Err(err) => { -@@ -87,9 +89,37 @@ fn esp_live_image(esp_handle: Handle, esp_device_path: &DevicePath) -> Option Option DevicePath + } + + fn esp_live_image(esp_handle: Handle, esp_device_path: &DevicePath) -> Option> { ++ const MAX_LIVE_IMAGE_PRELOAD: usize = 128 * 1024 * 1024; ++ + let mut esp_fs = match FileSystem::handle_protocol(esp_handle) { + Ok(esp_fs) => esp_fs, + Err(err) => { +@@ -87,9 +89,37 @@ fn esp_live_image(esp_handle: Handle, esp_device_path: &DevicePath) -> Option read, ++ Err(err) => { ++ log::warn!( ++ "Failed while reading {}\\redox-live.iso: {:?}", ++ device_path_to_string(esp_device_path), ++ err ++ ); ++ return None; ++ } ++ }; ++ ++ if read == 0 { ++ break; ++ } + +- live_image.read_to_end(&mut buffer).unwrap(); ++ if buffer.len().saturating_add(read) > MAX_LIVE_IMAGE_PRELOAD { ++ log::warn!( ++ "Skipping {}\\redox-live.iso preload: file exceeds {} MiB safety limit", ++ device_path_to_string(esp_device_path), ++ MAX_LIVE_IMAGE_PRELOAD / 1024 / 1024 ++ ); ++ return None; ++ } ++ ++ buffer.extend_from_slice(&chunk[..read]); ++ } + + Some(buffer) + } +@@ -130,7 +160,7 @@ pub fn disk_device_priority() -> Vec { + return vec![DiskDevice { + handle: esp_handle, + // Support both a copy of livedisk.iso and a standalone redoxfs partition +- partition_offset: if &buffer[512..520] == b"EFI PART" { ++ partition_offset: if buffer.len() >= 520 && &buffer[512..520] == b"EFI PART" { + //TODO: get block from partition table + 2 * crate::MIBI as u64 + } else { diff --git a/local/patches/bootloader/P4-live-large-iso-boot.patch b/local/patches/bootloader/P4-live-large-iso-boot.patch index a341239e..a816e5dc 100644 --- a/local/patches/bootloader/P4-live-large-iso-boot.patch +++ b/local/patches/bootloader/P4-live-large-iso-boot.patch @@ -113,7 +113,7 @@ diff --git a/src/os/uefi/device.rs b/src/os/uefi/device.rs index 0b7991f..554d88e 100644 --- a/src/os/uefi/device.rs +++ b/src/os/uefi/device.rs -@@ -13,6 +13,160 @@ use uefi_std::{fs::FileSystem, loaded_image::LoadedImage, proto::Protocol}; +@@ -13,6 +13,154 @@ use uefi_std::{fs::FileSystem, loaded_image::LoadedImage, proto::Protocol}; use super::disk::{DiskEfi, DiskOrFileEfi}; diff --git a/local/patches/bootloader/P4-live-large-iso-boot.patch.bak b/local/patches/bootloader/P4-live-large-iso-boot.patch.bak new file mode 100644 index 00000000..a341239e --- /dev/null +++ b/local/patches/bootloader/P4-live-large-iso-boot.patch.bak @@ -0,0 +1,392 @@ +diff --git a/src/arch/x86/mod.rs b/src/arch/x86/mod.rs +index bda3f5d..55889df 100644 +--- a/src/arch/x86/mod.rs ++++ b/src/arch/x86/mod.rs +@@ -3,10 +3,15 @@ use crate::os::Os; + pub(crate) mod x32; + pub(crate) mod x64; + +-pub unsafe fn paging_create(os: &impl Os, kernel_phys: u64, kernel_size: u64) -> Option { ++pub unsafe fn paging_create( ++ os: &impl Os, ++ kernel_phys: u64, ++ kernel_size: u64, ++ identity_map_end: u64, ++) -> Option { + unsafe { + if crate::KERNEL_64BIT { +- x64::paging_create(os, kernel_phys, kernel_size) ++ x64::paging_create(os, kernel_phys, kernel_size, identity_map_end) + } else { + x32::paging_create(os, kernel_phys, kernel_size) + } +diff --git a/src/arch/x86/x64.rs b/src/arch/x86/x64.rs +index a0a275a..fcf309d 100644 +--- a/src/arch/x86/x64.rs ++++ b/src/arch/x86/x64.rs +@@ -29,7 +29,12 @@ const PRESENT: u64 = 1; + const WRITABLE: u64 = 1 << 1; + const LARGE: u64 = 1 << 7; + +-pub unsafe fn paging_create(os: &impl Os, kernel_phys: u64, kernel_size: u64) -> Option { ++pub unsafe fn paging_create( ++ os: &impl Os, ++ kernel_phys: u64, ++ kernel_size: u64, ++ identity_map_end: u64, ++) -> Option { + unsafe { + // Create PML4 + let pml4 = paging_allocate(os)?; +@@ -42,8 +47,14 @@ pub unsafe fn paging_create(os: &impl Os, kernel_phys: u64, kernel_size: u64) - + pml4[0] = pdp.as_ptr() as u64 | WRITABLE | PRESENT; + pml4[256] = pdp.as_ptr() as u64 | WRITABLE | PRESENT; + +- // Identity map 8 GiB using 2 MiB pages +- for pdp_i in 0..8 { ++ let mut needed_pdp = identity_map_end.div_ceil(0x4000_0000); ++ if needed_pdp == 0 { ++ needed_pdp = 1; ++ } ++ assert!(needed_pdp <= pdp.len() as u64, "identity map end exceeds paging span"); ++ ++ // Identity map required physical range using 2 MiB pages ++ for pdp_i in 0..needed_pdp as usize { + let pd = paging_allocate(os)?; + pdp[pdp_i] = pd.as_ptr() as u64 | WRITABLE | PRESENT; + for pd_i in 0..pd.len() { +diff --git a/src/main.rs b/src/main.rs +index 78dabb0..fd8eb81 100644 +--- a/src/main.rs ++++ b/src/main.rs +@@ -62,6 +62,10 @@ pub static mut KERNEL_64BIT: bool = false; + + pub static mut LIVE_OPT: Option<(u64, &'static [u8])> = None; + ++fn region_end(base: u64, size: u64) -> u64 { ++ base.saturating_add(size).next_multiple_of(0x1000) ++} ++ + struct SliceWriter<'a> { + slice: &'a mut [u8], + i: usize, +@@ -645,9 +649,6 @@ fn main(os: &impl Os) -> (usize, u64, KernelArgs) { + (memory.len() as u64, memory.as_mut_ptr() as u64) + }; + +- let page_phys = unsafe { paging_create(os, kernel.as_ptr() as u64, kernel.len() as u64) } +- .expect("Failed to set up paging"); +- + let max_env_size = 64 * KIBI; + let mut env_size = max_env_size; + let env_base = os.alloc_zeroed_page_aligned(env_size); +@@ -655,6 +656,28 @@ fn main(os: &impl Os) -> (usize, u64, KernelArgs) { + panic!("Failed to allocate memory for stack"); + } + ++ let mut identity_map_end = region_end(kernel.as_ptr() as u64, kernel.len() as u64) ++ .max(region_end(stack_base as u64, stack_size as u64)) ++ .max(region_end(bootstrap_base, bootstrap_size)) ++ .max(region_end(env_base as u64, max_env_size as u64)); ++ ++ if let Some(ref live) = live_opt { ++ identity_map_end = identity_map_end.max(region_end( ++ live.as_ptr() as u64, ++ live.len() as u64, ++ )); ++ } ++ ++ let page_phys = unsafe { ++ paging_create( ++ os, ++ kernel.as_ptr() as u64, ++ kernel.len() as u64, ++ identity_map_end, ++ ) ++ } ++ .expect("Failed to set up paging"); ++ + { + let mut w = SliceWriter { + slice: unsafe { slice::from_raw_parts_mut(env_base, max_env_size) }, +diff --git a/src/os/uefi/device.rs b/src/os/uefi/device.rs +index 0b7991f..554d88e 100644 +--- a/src/os/uefi/device.rs ++++ b/src/os/uefi/device.rs +@@ -13,6 +13,160 @@ use uefi_std::{fs::FileSystem, loaded_image::LoadedImage, proto::Protocol}; + + use super::disk::{DiskEfi, DiskOrFileEfi}; + ++#[derive(Clone, Copy)] ++struct GptPartitionInfo { ++ first_lba: u64, ++ last_lba: u64, ++} ++ ++fn read_u32_le(bytes: &[u8]) -> Option { ++ Some(u32::from_le_bytes(bytes.get(..4)?.try_into().ok()?)) ++} ++ ++fn read_u64_le(bytes: &[u8]) -> Option { ++ Some(u64::from_le_bytes(bytes.get(..8)?.try_into().ok()?)) ++} ++ ++fn decode_utf16_name(bytes: &[u8]) -> Option { ++ let mut units = Vec::new(); ++ for chunk in bytes.chunks_exact(2) { ++ let unit = u16::from_le_bytes([chunk[0], chunk[1]]); ++ if unit == 0 { ++ break; ++ } ++ units.push(unit); ++ } ++ String::from_utf16(&units).ok() ++} ++ ++fn select_partition(best: &mut Option, candidate: GptPartitionInfo) { ++ match best { ++ Some(current) if current.last_lba.saturating_sub(current.first_lba) >= candidate.last_lba.saturating_sub(candidate.first_lba) => {} ++ _ => *best = Some(candidate), ++ } ++} ++ ++fn parse_gpt_partition_offset_from_bytes(data: &[u8], block_size: usize) -> Option { ++ let header_offset = block_size; ++ let header = data.get(header_offset..header_offset + 92)?; ++ if header.get(..8)? != b"EFI PART" { ++ return None; ++ } ++ ++ let entries_lba = read_u64_le(header.get(72..80)?)?; ++ let entry_count = read_u32_le(header.get(80..84)?)? as usize; ++ let entry_size = read_u32_le(header.get(84..88)?)? as usize; ++ if entry_size < 128 { ++ return None; ++ } ++ ++ let entries_offset = entries_lba.checked_mul(block_size as u64)? as usize; ++ let mut redox_partition = None; ++ let mut fallback_partition = None; ++ ++ for index in 0..entry_count { ++ let entry_offset = entries_offset.checked_add(index.checked_mul(entry_size)?)?; ++ let entry = data.get(entry_offset..entry_offset + entry_size)?; ++ if entry.get(..16)?.iter().all(|byte| *byte == 0) { ++ continue; ++ } ++ ++ let first_lba = read_u64_le(entry.get(32..40)?)?; ++ let last_lba = read_u64_le(entry.get(40..48)?)?; ++ if first_lba == 0 || last_lba < first_lba { ++ continue; ++ } ++ ++ let partition = GptPartitionInfo { first_lba, last_lba }; ++ let name = decode_utf16_name(entry.get(56..128)?).unwrap_or_default(); ++ if name == "REDOX" { ++ redox_partition = Some(partition); ++ break; ++ } ++ ++ select_partition(&mut fallback_partition, partition); ++ } ++ ++ redox_partition ++ .or(fallback_partition) ++ .map(|partition| partition.first_lba * block_size as u64) ++} ++ ++fn parse_gpt_partition_offset_from_parts( ++ entries: &[u8], ++ entry_count: usize, ++ entry_size: usize, ++ block_size: usize, ++) -> Option { ++ let mut redox_partition = None; ++ let mut fallback_partition = None; ++ ++ for index in 0..entry_count { ++ let entry_offset = index.checked_mul(entry_size)?; ++ let entry = entries.get(entry_offset..entry_offset + entry_size)?; ++ if entry.get(..16)?.iter().all(|byte| *byte == 0) { ++ continue; ++ } ++ ++ let first_lba = read_u64_le(entry.get(32..40)?)?; ++ let last_lba = read_u64_le(entry.get(40..48)?)?; ++ if first_lba == 0 || last_lba < first_lba { ++ continue; ++ } ++ ++ let partition = GptPartitionInfo { first_lba, last_lba }; ++ let name = decode_utf16_name(entry.get(56..128)?).unwrap_or_default(); ++ if name == "REDOX" { ++ redox_partition = Some(partition); ++ break; ++ } ++ ++ select_partition(&mut fallback_partition, partition); ++ } ++ redox_partition ++ .or(fallback_partition) ++ .map(|partition| partition.first_lba * block_size as u64) ++} ++ ++fn gpt_partition_offset_from_buffer(data: &[u8]) -> Option { ++ parse_gpt_partition_offset_from_bytes(data, 512) ++} ++ ++fn gpt_partition_offset_from_disk(disk: &mut DiskEfi) -> Option { ++ const GPT_SECTOR_SIZE: usize = 512; ++ ++ if disk.media_block_size() == 0 { ++ return None; ++ } ++ ++ let mut boot_region = vec![0_u8; 2048]; ++ disk.read_bytes(0, &mut boot_region).ok()?; ++ let header = boot_region.get(GPT_SECTOR_SIZE..GPT_SECTOR_SIZE + 92)?; ++ if header.get(..8)? != b"EFI PART" { ++ return None; ++ } ++ ++ let entries_lba = read_u64_le(header.get(72..80)?)?; ++ let entry_count = read_u32_le(header.get(80..84)?)? as usize; ++ let entry_size = read_u32_le(header.get(84..88)?)? as usize; ++ if entry_size < 128 { ++ return None; ++ } ++ ++ let entries_bytes = entry_count.checked_mul(entry_size)?; ++ let entries_offset = entries_lba.checked_mul(GPT_SECTOR_SIZE as u64)?; ++ let mut entries = vec![0_u8; entries_bytes]; ++ disk.read_bytes(entries_offset, &mut entries).ok()?; ++ ++ parse_gpt_partition_offset_from_parts(&entries, entry_count, entry_size, GPT_SECTOR_SIZE) ++} ++ + #[derive(Debug)] + enum DevicePathRelation { + This, +@@ -131,12 +285,7 @@ pub fn disk_device_priority() -> Vec { + return vec![DiskDevice { + handle: esp_handle, + // Support both a copy of livedisk.iso and a standalone redoxfs partition +- partition_offset: if &buffer[512..520] == b"EFI PART" { +- //TODO: get block from partition table +- 2 * crate::MIBI as u64 +- } else { +- 0 +- }, ++ partition_offset: gpt_partition_offset_from_buffer(&buffer).unwrap_or(0), + disk: DiskOrFileEfi::File(buffer), + device_path: esp_device_path, + file_path: Some("redox-live.iso"), +@@ -154,7 +303,7 @@ pub fn disk_device_priority() -> Vec { + }; + let mut devices = Vec::with_capacity(handles.len()); + for handle in handles { +- let disk = match DiskEfi::handle_protocol(handle) { ++ let mut disk = match DiskEfi::handle_protocol(handle) { + Ok(ok) => ok, + Err(err) => { + log::warn!( +@@ -182,14 +331,15 @@ pub fn disk_device_priority() -> Vec { + } + }; + ++ let partition_offset = if disk.0.Media.LogicalPartition { ++ 0 ++ } else { ++ gpt_partition_offset_from_disk(&mut disk).unwrap_or(2 * crate::MIBI as u64) ++ }; ++ + devices.push(DiskDevice { + handle, +- partition_offset: if disk.0.Media.LogicalPartition { +- 0 +- } else { +- //TODO: get block from partition table +- 2 * crate::MIBI as u64 +- }, ++ partition_offset, + disk: DiskOrFileEfi::Disk(disk), + device_path, + file_path: None, +diff --git a/src/os/uefi/disk.rs b/src/os/uefi/disk.rs +index 3f920bb..4d109f8 100644 +--- a/src/os/uefi/disk.rs ++++ b/src/os/uefi/disk.rs +@@ -117,3 +117,43 @@ impl Disk for DiskEfi { + Err(Error::new(EIO)) + } + } ++ ++impl DiskEfi { ++ pub fn media_block_size(&self) -> usize { ++ self.0.Media.BlockSize as usize ++ } ++ ++ pub fn read_bytes(&mut self, offset: u64, buffer: &mut [u8]) -> Result<()> { ++ let block_size = self.media_block_size(); ++ if block_size == 0 || block_size > self.1.len() { ++ return Err(Error::new(EINVAL)); ++ } ++ ++ let scratch = &mut self.1[..block_size]; ++ let mut copied = 0usize; ++ ++ while copied < buffer.len() { ++ let absolute = offset as usize + copied; ++ let lba = (absolute / block_size) as u64; ++ let in_block = absolute % block_size; ++ ++ match (self.0.ReadBlocks)( ++ self.0, ++ self.0.Media.MediaId, ++ lba, ++ block_size, ++ scratch.as_mut_ptr(), ++ ) { ++ status if status.is_success() => { ++ let chunk_len = core::cmp::min(block_size - in_block, buffer.len() - copied); ++ buffer[copied..copied + chunk_len] ++ .copy_from_slice(&scratch[in_block..in_block + chunk_len]); ++ copied += chunk_len; ++ } ++ _ => return Err(Error::new(EIO)), ++ } ++ } ++ ++ Ok(()) ++ } ++} +diff --git a/src/os/uefi/mod.rs b/src/os/uefi/mod.rs +index c79266e..86235a4 100644 +--- a/src/os/uefi/mod.rs ++++ b/src/os/uefi/mod.rs +@@ -47,17 +47,19 @@ pub(crate) fn alloc_zeroed_page_aligned(size: usize) -> *mut u8 { + let ptr = { + // Max address mapped by src/arch paging code (8 GiB) + let mut ptr = 0x2_0000_0000; +- status_to_result((std::system_table().BootServices.AllocatePages)( +- 1, // AllocateMaxAddress +- MemoryType::EfiRuntimeServicesData, // Keeps this memory out of free space list ++ if status_to_result((std::system_table().BootServices.AllocatePages)( ++ 0, // AllocateAnyPages ++ MemoryType::EfiLoaderData, + pages, + &mut ptr, + )) +- .unwrap(); ++ .is_err() ++ { ++ return ptr::null_mut(); ++ } + ptr as *mut u8 + }; + +- assert!(!ptr.is_null()); + unsafe { ptr::write_bytes(ptr, 0, pages * page_size) }; + ptr + } diff --git a/local/patches/kernel/P2-redbear-os-branding.patch b/local/patches/kernel/P2-redbear-os-branding.patch index f0d36c1f..b67d3cde 100644 --- a/local/patches/kernel/P2-redbear-os-branding.patch +++ b/local/patches/kernel/P2-redbear-os-branding.patch @@ -41,7 +41,7 @@ index 7a7c0ae8..62f9523c 100644 args.print(); // Set up GDT -@@ -127,16 +127,21 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs, stack_end: usize) -> ! { +@@ -127,17 +127,21 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs, stack_end: usize) -> ! { // Initialize devices device::init(); diff --git a/local/patches/kernel/P2-redbear-os-branding.patch.bak b/local/patches/kernel/P2-redbear-os-branding.patch.bak new file mode 100644 index 00000000..f0d36c1f --- /dev/null +++ b/local/patches/kernel/P2-redbear-os-branding.patch.bak @@ -0,0 +1,65 @@ +# Red Bear OS branding in kernel start messages +# Changes "Redox OS" to "RedBear OS" in architecture start files +# Adds device init logging milestones in x86_shared start path + +diff --git a/src/arch/aarch64/start.rs b/src/arch/aarch64/start.rs +index e1c8cfb4..65e3fe33 100644 +--- a/src/arch/aarch64/start.rs ++++ b/src/arch/aarch64/start.rs +@@ -91,7 +91,7 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs) -> ! { + dtb::serial::init_early(dtb); + } + +- info!("Redox OS starting..."); ++ info!("RedBear OS starting..."); + args.print(); + + // Initialize RMM +diff --git a/src/arch/riscv64/start.rs b/src/arch/riscv64/start.rs +index 2551968f..a825536a 100644 +--- a/src/arch/riscv64/start.rs ++++ b/src/arch/riscv64/start.rs +@@ -97,7 +97,7 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs) -> ! { + init_early(dtb); + } + +- info!("Redox OS starting..."); ++ info!("RedBear OS starting..."); + args.print(); + + if let Some(dtb) = &dtb { +diff --git a/src/arch/x86_shared/start.rs b/src/arch/x86_shared/start.rs +index 7a7c0ae8..62f9523c 100644 +--- a/src/arch/x86_shared/start.rs ++++ b/src/arch/x86_shared/start.rs +@@ -91,7 +91,7 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs, stack_end: usize) -> ! { + // Set up graphical debug + graphical_debug::init(args.env()); + +- info!("Redox OS starting..."); ++ info!("RedBear OS starting..."); + args.print(); + + // Set up GDT +@@ -127,16 +127,21 @@ unsafe extern "C" fn start(args_ptr: *const KernelArgs, stack_end: usize) -> ! { + + // Initialize devices + device::init(); ++ info!("kernel: device init complete (PIC + LAPIC)"); + + // Read ACPI tables, starts APs + if cfg!(feature = "acpi") { + crate::acpi::init(args.acpi_rsdp()); ++ info!("kernel: ACPI tables parsed"); + + device::init_after_acpi(); ++ info!("kernel: IOAPIC init complete"); + } + crate::profiling::init(); + + // Initialize all of the non-core devices not otherwise needed to complete initialization + device::init_noncore(); ++ info!("kernel: timer init complete, entering userspace"); + + args.bootstrap() + }; diff --git a/local/patches/kernel/P4-scheme-failure-modes.patch b/local/patches/kernel/P4-scheme-failure-modes.patch index d97519f6..c3453b86 100644 --- a/local/patches/kernel/P4-scheme-failure-modes.patch +++ b/local/patches/kernel/P4-scheme-failure-modes.patch @@ -524,7 +524,7 @@ index b901302..dfbf66b 100644 } // invalid state -@@ -368,7 +401,67 @@ impl UserInner { +@@ -368,6 +401,68 @@ impl UserInner { } }, } diff --git a/local/patches/kernel/P7-proc-setname.patch b/local/patches/kernel/P7-proc-setname.patch index 70821737..536093a9 100644 --- a/local/patches/kernel/P7-proc-setname.patch +++ b/local/patches/kernel/P7-proc-setname.patch @@ -9,7 +9,7 @@ diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs MmapMinAddr(Arc), } -@@ -267,6 +268,7 @@ impl ProcScheme { +@@ -267,7 +268,8 @@ impl ProcScheme { "sched-affinity" => (ContextHandle::SchedAffinity, true), // TODO: Switch this kernel-local proc handle over to a stable upstream // redox_syscall ProcCall::SetSchedPolicy opcode once that lands. @@ -18,7 +18,7 @@ diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs "status" => (ContextHandle::Status { privileged: false }, false), _ if path.starts_with("auth-") => { let nonprefix = &path["auth-".len()..]; -@@ -1218,6 +1220,16 @@ impl ContextHandle { +@@ -1218,5 +1220,15 @@ impl ContextHandle { Ok(2) } + ContextHandle::Name => { diff --git a/local/patches/kernel/P7-proc-setname.patch.bak b/local/patches/kernel/P7-proc-setname.patch.bak new file mode 100644 index 00000000..70821737 --- /dev/null +++ b/local/patches/kernel/P7-proc-setname.patch.bak @@ -0,0 +1,47 @@ +diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs +--- a/src/scheme/proc.rs ++++ b/src/scheme/proc.rs +@@ -147,6 +147,7 @@ enum ContextHandle { + Priority, + SchedAffinity, + SchedPolicy, ++ Name, + + MmapMinAddr(Arc), + } +@@ -267,6 +268,7 @@ impl ProcScheme { + "sched-affinity" => (ContextHandle::SchedAffinity, true), + // TODO: Switch this kernel-local proc handle over to a stable upstream + // redox_syscall ProcCall::SetSchedPolicy opcode once that lands. + "sched-policy" => (ContextHandle::SchedPolicy, false), ++ "name" => (ContextHandle::Name, false), + "status" => (ContextHandle::Status { privileged: false }, false), + _ if path.starts_with("auth-") => { + let nonprefix = &path["auth-".len()..]; +@@ -1218,6 +1220,16 @@ impl ContextHandle { + Ok(2) + } ++ ContextHandle::Name => { ++ let mut name_buf = [0u8; 32]; ++ let len = buf.copy_common_bytes_to_slice(&mut name_buf[..31]).unwrap_or(0); ++ let mut context = context.write(token.token()); ++ context.name.clear(); ++ if let Ok(s) = core::str::from_utf8(&name_buf[..len]) { ++ context.name.push_str(s); ++ } ++ Ok(len) ++ } + ContextHandle::Status { privileged } => { + let mut args = buf.usizes(); + +@@ -1532,6 +1544,10 @@ impl ContextHandle { + let data = [context.sched_policy as u8, context.sched_rt_priority]; + buf.copy_common_bytes_from_slice(&data) + } ++ ContextHandle::Name => { ++ let context = context.read(token.token()); ++ buf.copy_common_bytes_from_slice(context.name.as_bytes()) ++ } + ContextHandle::Status { .. } => { + let status = { + let context = context.read(token.token()); diff --git a/local/patches/kernel/P7-proc-setpriority.patch b/local/patches/kernel/P7-proc-setpriority.patch index e65a95bd..2a73d99f 100644 --- a/local/patches/kernel/P7-proc-setpriority.patch +++ b/local/patches/kernel/P7-proc-setpriority.patch @@ -11,7 +11,7 @@ diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs Name, MmapMinAddr(Arc), -@@ -160,6 +161,17 @@ pub struct ProcScheme; +@@ -160,7 +161,18 @@ pub struct ProcScheme; static NEXT_ID: AtomicUsize = AtomicUsize::new(1); static HANDLES: RwLock> = RwLock::new(HashMap::with_hasher(DefaultHashBuilder::new())); @@ -38,7 +38,7 @@ diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs "mmap-min-addr" => ( ContextHandle::MmapMinAddr(Arc::clone( context -@@ -1191,6 +1204,17 @@ impl ContextHandle { +@@ -1191,6 +1204,18 @@ impl ContextHandle { Ok(size_of_val(&mask)) } diff --git a/local/patches/kernel/P7-proc-setpriority.patch.bak b/local/patches/kernel/P7-proc-setpriority.patch.bak new file mode 100644 index 00000000..e65a95bd --- /dev/null +++ b/local/patches/kernel/P7-proc-setpriority.patch.bak @@ -0,0 +1,70 @@ +diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs +--- a/src/scheme/proc.rs ++++ b/src/scheme/proc.rs +@@ -145,8 +145,9 @@ enum ContextHandle { + // TODO: Remove this once openat is implemented, or allow openat-via-dup via e.g. the top-level + // directory. + OpenViaDup, ++ Priority, + SchedAffinity, + SchedPolicy, + Name, + + MmapMinAddr(Arc), +@@ -160,6 +161,17 @@ pub struct ProcScheme; + static NEXT_ID: AtomicUsize = AtomicUsize::new(1); + static HANDLES: RwLock> = + RwLock::new(HashMap::with_hasher(DefaultHashBuilder::new())); ++ ++const NICE_MIN: i32 = -20; ++const NICE_MAX: i32 = 19; ++ ++fn nice_to_kernel_prio(nice: i32) -> usize { ++ (nice.saturating_add(20)).clamp(0, 39) as usize ++} ++ ++fn kernel_prio_to_nice(prio: usize) -> i32 { ++ (prio.min(39) as i32) - 20 ++} + + #[cfg(feature = "debugger")] + #[allow(dead_code)] + pub fn foreach_addrsp( +@@ -253,6 +265,7 @@ impl ProcScheme { + "sighandler" => (ContextHandle::Sighandler, false), + "start" => (ContextHandle::Start, false), + "open_via_dup" => (ContextHandle::OpenViaDup, false), ++ "priority" => (ContextHandle::Priority, false), + "mmap-min-addr" => ( + ContextHandle::MmapMinAddr(Arc::clone( + context +@@ -1191,6 +1204,17 @@ impl ContextHandle { + + Ok(size_of_val(&mask)) + } ++ Self::Priority => { ++ let nice = unsafe { buf.read_exact::()? }; ++ if !(NICE_MIN..=NICE_MAX).contains(&nice) { ++ return Err(Error::new(EINVAL)); ++ } ++ ++ context ++ .write(token.token()) ++ .set_sched_other_prio(nice_to_kernel_prio(nice)); ++ ++ Ok(size_of::()) ++ } + Self::SchedPolicy => { + if buf.len() != 2 { + return Err(Error::new(EINVAL)); +@@ -1522,6 +1546,10 @@ impl ContextHandle { + + buf.copy_exactly(crate::cpu_set::mask_as_bytes(&mask))?; + Ok(size_of_val(&mask)) ++ } ++ ContextHandle::Priority => { ++ let nice = kernel_prio_to_nice(context.read(token.token()).prio); ++ buf.copy_common_bytes_from_slice(&nice.to_ne_bytes()) + } + ContextHandle::SchedPolicy => { + let context = context.read(token.token()); diff --git a/local/patches/kernel/P8-load-balance.patch b/local/patches/kernel/P8-load-balance.patch index 8883c992..c33e4ecf 100644 --- a/local/patches/kernel/P8-load-balance.patch +++ b/local/patches/kernel/P8-load-balance.patch @@ -1,7 +1,7 @@ diff --git a/src/percpu.rs b/src/percpu.rs --- a/src/percpu.rs +++ b/src/percpu.rs -@@ -29,12 +29,14 @@ pub struct PerCpuSched { +@@ -29,15 +29,17 @@ pub struct PerCpuSched { pub run_queues_lock: AtomicBool, pub balance: Cell<[usize; RUN_QUEUE_COUNT]>, pub last_queue: Cell, @@ -22,7 +22,7 @@ diff --git a/src/percpu.rs b/src/percpu.rs diff --git a/src/context/switch.rs b/src/context/switch.rs --- a/src/context/switch.rs +++ b/src/context/switch.rs -@@ -33,6 +33,8 @@ const SCHED_PRIO_TO_WEIGHT: [usize; 40] = [ +@@ -33,4 +33,6 @@ const SCHED_PRIO_TO_WEIGHT: [usize; 40] = [ 70, 56, 45, 36, 29, 23, 18, 15, ]; @@ -39,7 +39,7 @@ diff --git a/src/context/switch.rs b/src/context/switch.rs // Trigger a context switch after every 3 ticks. if new_ticks >= 3 { switch(token); -@@ -427,6 +432,92 @@ fn steal_work( +@@ -427,3 +432,104 @@ fn steal_work( None } diff --git a/local/patches/kernel/P8-load-balance.patch.bak b/local/patches/kernel/P8-load-balance.patch.bak new file mode 100644 index 00000000..8883c992 --- /dev/null +++ b/local/patches/kernel/P8-load-balance.patch.bak @@ -0,0 +1,146 @@ +diff --git a/src/percpu.rs b/src/percpu.rs +--- a/src/percpu.rs ++++ b/src/percpu.rs +@@ -29,12 +29,14 @@ pub struct PerCpuSched { + pub run_queues_lock: AtomicBool, + pub balance: Cell<[usize; RUN_QUEUE_COUNT]>, + pub last_queue: Cell, ++ pub last_balance_time: Cell, + } + + impl PerCpuSched { + pub const fn new() -> Self { + const EMPTY: VecDeque = VecDeque::new(); + Self { + run_queues: SyncUnsafeCell::new([EMPTY; RUN_QUEUE_COUNT]), + run_queues_lock: AtomicBool::new(false), + balance: Cell::new([0; RUN_QUEUE_COUNT]), + last_queue: Cell::new(0), ++ last_balance_time: Cell::new(0), + } + } +diff --git a/src/context/switch.rs b/src/context/switch.rs +--- a/src/context/switch.rs ++++ b/src/context/switch.rs +@@ -33,6 +33,8 @@ const SCHED_PRIO_TO_WEIGHT: [usize; 40] = [ + 70, 56, 45, 36, 29, 23, 18, 15, + ]; + ++const LOAD_BALANCE_INTERVAL_NS: u128 = 100_000_000; ++ + static SCHED_STEAL_COUNT: AtomicUsize = AtomicUsize::new(0); +@@ -101,6 +103,9 @@ pub fn tick(token: &mut CleanLockToken) { + let new_ticks = ticks_cell.get() + 1; + ticks_cell.set(new_ticks); + ++ let balance_time = crate::time::monotonic(token); ++ maybe_balance_queues(token, percpu, balance_time); ++ + // Trigger a context switch after every 3 ticks. + if new_ticks >= 3 { + switch(token); +@@ -427,6 +432,92 @@ fn steal_work( + + None + } ++ ++fn queue_depth(percpu: &PercpuBlock) -> usize { ++ let mut sched_lock = SchedQueuesLock::new(&percpu.sched); ++ unsafe { ++ sched_lock ++ .queues_mut() ++ .iter() ++ .map(|queue| queue.len()) ++ .sum() ++ } ++} ++ ++fn migrate_one_context( ++ token: &mut CleanLockToken, ++ source_id: LogicalCpuId, ++ target_id: LogicalCpuId, ++ switch_time: u128, ++) -> bool { ++ let Some(source) = get_percpu_block(source_id) else { ++ return false; ++ }; ++ let Some(target) = get_percpu_block(target_id) else { ++ return false; ++ }; ++ ++ let source_idle = source.switch_internals.idle_context(); ++ let moved = { ++ let mut source_lock = SchedQueuesLock::new(&source.sched); ++ let source_queues = unsafe { source_lock.queues_mut() }; ++ pop_movable_context(token, source_queues, target_id, switch_time, &source_idle) ++ }; ++ ++ let Some((prio, context_ref)) = moved else { ++ return false; ++ }; ++ ++ let mut target_lock = SchedQueuesLock::new(&target.sched); ++ unsafe { ++ target_lock.queues_mut()[prio].push_back(context_ref); ++ } ++ true ++} ++ ++fn maybe_balance_queues(token: &mut CleanLockToken, percpu: &PercpuBlock, balance_time: u128) { ++ if crate::cpu_count() <= 1 || percpu.cpu_id != LogicalCpuId::BSP { ++ return; ++ } ++ if balance_time.saturating_sub(percpu.sched.last_balance_time.get()) < LOAD_BALANCE_INTERVAL_NS ++ { ++ return; ++ } ++ ++ percpu.sched.last_balance_time.set(balance_time); ++ ++ let mut depths = Vec::new(); ++ let mut total_depth = 0usize; ++ for raw_id in 0..crate::cpu_count() { ++ let cpu_id = LogicalCpuId::new(raw_id); ++ let Some(cpu_percpu) = get_percpu_block(cpu_id) else { ++ continue; ++ }; ++ let depth = queue_depth(cpu_percpu); ++ total_depth += depth; ++ depths.push((cpu_id, depth)); ++ } ++ ++ if depths.len() <= 1 || total_depth == 0 { ++ return; ++ } ++ ++ let avg_depth = (total_depth + depths.len().saturating_sub(1)) / depths.len(); ++ ++ for target_index in 0..depths.len() { ++ if depths[target_index].1 != 0 { ++ continue; ++ } ++ ++ let mut source_index = None; ++ let mut source_depth = 0usize; ++ for (idx, &(_, depth)) in depths.iter().enumerate() { ++ if idx == target_index { ++ continue; ++ } ++ if depth > avg_depth + 1 && depth > source_depth { ++ source_index = Some(idx); ++ source_depth = depth; ++ } ++ } ++ ++ let Some(source_index) = source_index else { ++ continue; ++ }; ++ ++ let source_id = depths[source_index].0; ++ let target_id = depths[target_index].0; ++ if migrate_one_context(token, source_id, target_id, balance_time) { ++ depths[source_index].1 = depths[source_index].1.saturating_sub(1); ++ depths[target_index].1 += 1; ++ } ++ } ++} diff --git a/local/patches/kernel/P8-work-stealing.patch b/local/patches/kernel/P8-work-stealing.patch index e4c9a25d..ae474bfa 100644 --- a/local/patches/kernel/P8-work-stealing.patch +++ b/local/patches/kernel/P8-work-stealing.patch @@ -1,7 +1,7 @@ diff --git a/src/percpu.rs b/src/percpu.rs --- a/src/percpu.rs +++ b/src/percpu.rs -@@ -100,6 +100,14 @@ static ALL_PERCPU_BLOCKS: [AtomicPtr; MAX_CPU_COUNT as usize] = +@@ -100,5 +100,13 @@ static ALL_PERCPU_BLOCKS: [AtomicPtr; MAX_CPU_COUNT as usize] = pub unsafe fn init_tlb_shootdown(id: LogicalCpuId, block: *mut PercpuBlock) { ALL_PERCPU_BLOCKS[id.get() as usize].store(block, Ordering::Release) } @@ -18,7 +18,7 @@ diff --git a/src/percpu.rs b/src/percpu.rs diff --git a/src/context/switch.rs b/src/context/switch.rs --- a/src/context/switch.rs +++ b/src/context/switch.rs -@@ -7,15 +7,15 @@ use crate::{ +@@ -7,15 +7,135 @@ use crate::{ self, arch, idle_contexts, idle_contexts_try, run_contexts, ArcContextLockWriteGuard, Context, ContextLock, SchedPolicy, WeakContextRef, RUN_QUEUE_COUNT, }, @@ -170,7 +170,7 @@ diff --git a/src/context/switch.rs b/src/context/switch.rs run_queues[prio].push_back(context_ref); } } -@@ -559,6 +672,16 @@ fn select_next_context( +@@ -559,6 +672,17 @@ fn select_next_context( ); return Ok(Some(next_context_guard)); } diff --git a/local/patches/kernel/P8-work-stealing.patch.bak b/local/patches/kernel/P8-work-stealing.patch.bak new file mode 100644 index 00000000..e4c9a25d --- /dev/null +++ b/local/patches/kernel/P8-work-stealing.patch.bak @@ -0,0 +1,190 @@ +diff --git a/src/percpu.rs b/src/percpu.rs +--- a/src/percpu.rs ++++ b/src/percpu.rs +@@ -100,6 +100,14 @@ static ALL_PERCPU_BLOCKS: [AtomicPtr; MAX_CPU_COUNT as usize] = + pub unsafe fn init_tlb_shootdown(id: LogicalCpuId, block: *mut PercpuBlock) { + ALL_PERCPU_BLOCKS[id.get() as usize].store(block, Ordering::Release) + } ++ ++pub fn get_percpu_block(id: LogicalCpuId) -> Option<&'static PercpuBlock> { ++ unsafe { ++ ALL_PERCPU_BLOCKS[id.get() as usize] ++ .load(Ordering::Acquire) ++ .as_ref() ++ } ++} + + pub fn get_all_stats() -> Vec<(LogicalCpuId, CpuStatsData)> { +diff --git a/src/context/switch.rs b/src/context/switch.rs +--- a/src/context/switch.rs ++++ b/src/context/switch.rs +@@ -7,15 +7,15 @@ use crate::{ + self, arch, idle_contexts, idle_contexts_try, run_contexts, ArcContextLockWriteGuard, + Context, ContextLock, SchedPolicy, WeakContextRef, RUN_QUEUE_COUNT, + }, +- cpu_set::LogicalCpuId, ++ cpu_set::{LogicalCpuId, LogicalCpuSet}, + cpu_stats::{self, CpuState}, +- percpu::{PerCpuSched, PercpuBlock}, ++ percpu::{get_percpu_block, PerCpuSched, PercpuBlock}, + sync::{ArcRwLockWriteGuard, CleanLockToken, LockToken, L1, L4}, + }; + use alloc::{sync::Arc, vec::Vec}; + use core::{ + cell::{Cell, RefCell}, + hint, mem, +- sync::atomic::Ordering, ++ sync::atomic::{AtomicUsize, Ordering}, + }; + use syscall::PtraceFlags; +@@ ++static SCHED_STEAL_COUNT: AtomicUsize = AtomicUsize::new(0); ++ ++fn assign_context_to_cpu(context: &mut Context, cpu_id: LogicalCpuId) { ++ context.sched_affinity = LogicalCpuSet::empty(); ++ context.sched_affinity.atomic_set(cpu_id); ++} +@@ ++fn pop_movable_context( ++ token: &mut CleanLockToken, ++ queues: &mut [alloc::collections::VecDeque; RUN_QUEUE_COUNT], ++ target_cpu: LogicalCpuId, ++ switch_time: u128, ++ idle_context: &Arc, ++) -> Option<(usize, WeakContextRef)> { ++ for prio in 0..RUN_QUEUE_COUNT { ++ let len = queues[prio].len(); ++ for _ in 0..len { ++ let Some(context_ref) = queues[prio].pop_front() else { ++ break; ++ }; ++ let Some(context_lock) = context_ref.upgrade() else { ++ continue; ++ }; ++ if Arc::ptr_eq(&context_lock, idle_context) { ++ queues[prio].push_back(context_ref); ++ continue; ++ } ++ ++ let mut context_guard = unsafe { context_lock.write_arc() }; ++ let sw = unsafe { update_stealable(&mut context_guard, switch_time) }; ++ if let UpdateResult::CanSwitch = sw { ++ assign_context_to_cpu(&mut context_guard, target_cpu); ++ let moved_ref = WeakContextRef(Arc::downgrade(ArcContextLockWriteGuard::rwlock( ++ &context_guard, ++ ))); ++ drop(context_guard); ++ return Some((prio, moved_ref)); ++ } ++ ++ if matches!(sw, UpdateResult::Blocked) { ++ idle_contexts(token.downgrade()).push_back(context_ref); ++ } else { ++ queues[prio].push_back(context_ref); ++ } ++ } ++ } ++ ++ None ++} ++ ++fn steal_work( ++ token: &mut CleanLockToken, ++ cpu_id: LogicalCpuId, ++ switch_time: u128, ++) -> Option { ++ let cpu_count = crate::cpu_count(); ++ if cpu_count <= 1 { ++ return None; ++ } ++ ++ for offset in 1..cpu_count { ++ let victim_id = LogicalCpuId::new((cpu_id.get() + offset) % cpu_count); ++ let Some(victim) = get_percpu_block(victim_id) else { ++ continue; ++ }; ++ ++ let victim_idle = victim.switch_internals.idle_context(); ++ let mut victim_lock = SchedQueuesLock::new(&victim.sched); ++ let victim_queues = unsafe { victim_lock.queues_mut() }; ++ ++ for prio in 0..RUN_QUEUE_COUNT { ++ let len = victim_queues[prio].len(); ++ for _ in 0..len { ++ let Some(context_ref) = victim_queues[prio].pop_front() else { ++ break; ++ }; ++ let Some(context_lock) = context_ref.upgrade() else { ++ continue; ++ }; ++ if Arc::ptr_eq(&context_lock, &victim_idle) { ++ victim_queues[prio].push_back(context_ref); ++ continue; ++ } ++ ++ let mut context_guard = unsafe { context_lock.write_arc() }; ++ let sw = unsafe { update_stealable(&mut context_guard, switch_time) }; ++ if let UpdateResult::CanSwitch = sw { ++ assign_context_to_cpu(&mut context_guard, cpu_id); ++ SCHED_STEAL_COUNT.fetch_add(1, Ordering::Relaxed); ++ return Some(context_guard); ++ } ++ ++ if matches!(sw, UpdateResult::Blocked) { ++ idle_contexts(token.downgrade()).push_back(context_ref); ++ } else { ++ victim_queues[prio].push_back(context_ref); ++ } ++ } ++ } ++ } ++ ++ None ++} ++ ++unsafe fn update_stealable(context: &mut Context, switch_time: u128) -> UpdateResult { ++ if context.running { ++ return UpdateResult::Skip; ++ } ++ if context.status.is_soft_blocked() ++ && let Some(wake) = context.wake ++ && switch_time >= wake ++ { ++ context.wake = None; ++ context.unblock_no_ipi(); ++ } ++ if context.status.is_runnable() { ++ UpdateResult::CanSwitch ++ } else { ++ UpdateResult::Blocked ++ } ++} +@@ -360,6 +469,10 @@ fn wakeup_contexts(token: &mut CleanLockToken, percpu: &PercpuBlock, switch_time + let mut sched_lock = SchedQueuesLock::new(&percpu.sched); + let run_queues = unsafe { sched_lock.queues_mut() }; + for (prio, context_ref) in wakeups { ++ if let Some(context_lock) = context_ref.upgrade() { ++ let mut context_guard = unsafe { context_lock.write_arc() }; ++ assign_context_to_cpu(&mut context_guard, percpu.cpu_id); ++ } + run_queues[prio].push_back(context_ref); + } + } +@@ -559,6 +672,16 @@ fn select_next_context( + ); + return Ok(Some(next_context_guard)); + } ++ ++ if let Some(next_context_guard) = steal_work(token, cpu_id, switch_time) { ++ queue_previous_context( ++ token, ++ percpu, ++ &prev_context_lock, ++ prev_context_guard, ++ &idle_context, ++ ); ++ return Ok(Some(next_context_guard)); ++ } + + let global_next = { + let contexts_data = run_contexts(token.token()); diff --git a/local/patches/kernel/redox.patch b/local/patches/kernel/redox.patch index 7977f2f2..e69de29b 100644 --- a/local/patches/kernel/redox.patch +++ b/local/patches/kernel/redox.patch @@ -1,147 +0,0 @@ -diff --git a/src/acpi/madt/mod.rs b/src/acpi/madt/mod.rs -index 3159b9c4..c691eb8d 100644 ---- a/src/acpi/madt/mod.rs -+++ b/src/acpi/madt/mod.rs -@@ -146,6 +146,52 @@ pub struct MadtGicd { - _reserved2: [u8; 3], - } - -+/// MADT Local x2APIC (entry type 0x9) -+/// Used by modern AMD and Intel platforms with APIC IDs >= 255. -+#[derive(Clone, Copy, Debug)] -+#[repr(C, packed)] -+pub struct MadtLocalX2Apic { -+ _reserved: u16, -+ pub x2apic_id: u32, -+ pub flags: u32, -+ pub processor_uid: u32, -+} -+ -+/// MADT Local APIC NMI (entry type 0x4) -+/// Configures NMI routing to a processor's LINT0/LINT1 pin. -+#[derive(Clone, Copy, Debug)] -+#[repr(C, packed)] -+pub struct MadtLocalApicNmi { -+ pub processor: u8, -+ pub flags: u16, -+ pub nmi_pin: u8, -+} -+ -+/// MADT Local APIC Address Override (entry type 0x5) -+/// Provides 64-bit override for the 32-bit local APIC address. -+#[derive(Clone, Copy, Debug)] -+#[repr(C, packed)] -+pub struct MadtLapicAddressOverride { -+ _reserved: u16, -+ pub local_apic_address: u64, -+} -+ -+/// MADT Local x2APIC NMI (entry type 0xA) -+/// x2APIC equivalent of type 0x4 for APIC IDs >= 255. -+#[derive(Clone, Copy, Debug)] -+#[repr(C, packed)] -+pub struct MadtLocalX2ApicNmi { -+ _reserved: u16, -+ pub processor_uid: u32, -+ pub flags: u16, -+ pub nmi_pin: u8, -+ _reserved2: u8, -+} -+ -+const _: () = assert!(size_of::() == 4); -+const _: () = assert!(size_of::() == 10); -+const _: () = assert!(size_of::() == 10); -+ - /// MADT Entries - #[derive(Debug)] - #[allow(dead_code)] -@@ -160,6 +206,14 @@ pub enum MadtEntry { - InvalidGicc(usize), - Gicd(&'static MadtGicd), - InvalidGicd(usize), -+ LocalX2Apic(&'static MadtLocalX2Apic), -+ InvalidLocalX2Apic(usize), -+ LocalApicNmi(&'static MadtLocalApicNmi), -+ InvalidLocalApicNmi(usize), -+ LapicAddressOverride(&'static MadtLapicAddressOverride), -+ InvalidLapicAddressOverride(usize), -+ LocalX2ApicNmi(&'static MadtLocalX2ApicNmi), -+ InvalidLocalX2ApicNmi(usize), - Unknown(u8), - } - -@@ -176,6 +230,10 @@ impl Iterator for MadtIter { - let entry_len = - unsafe { *(self.sdt.data_address() as *const u8).add(self.i + 1) } as usize; - -+ if entry_len < 2 { -+ return None; -+ } -+ - if self.i + entry_len <= self.sdt.data_len() { - let item = match entry_type { - 0x0 => { -@@ -224,6 +282,44 @@ impl Iterator for MadtIter { - MadtEntry::InvalidGicd(entry_len) - } - } -+ 0x9 => { -+ if entry_len == size_of::() + 2 { -+ MadtEntry::LocalX2Apic(unsafe { -+ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalX2Apic) -+ }) -+ } else { -+ MadtEntry::InvalidLocalX2Apic(entry_len) -+ } -+ } -+ 0x4 => { -+ if entry_len == size_of::() + 2 { -+ MadtEntry::LocalApicNmi(unsafe { -+ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalApicNmi) -+ }) -+ } else { -+ MadtEntry::InvalidLocalApicNmi(entry_len) -+ } -+ } -+ 0x5 => { -+ if entry_len == size_of::() + 2 { -+ MadtEntry::LapicAddressOverride(unsafe { -+ &*((self.sdt.data_address() + self.i + 2) -+ as *const MadtLapicAddressOverride) -+ }) -+ } else { -+ MadtEntry::InvalidLapicAddressOverride(entry_len) -+ } -+ } -+ 0xA => { -+ if entry_len == size_of::() + 2 { -+ MadtEntry::LocalX2ApicNmi(unsafe { -+ &*((self.sdt.data_address() + self.i + 2) -+ as *const MadtLocalX2ApicNmi) -+ }) -+ } else { -+ MadtEntry::InvalidLocalX2ApicNmi(entry_len) -+ } -+ } - _ => MadtEntry::Unknown(entry_type), - }; - - -diff --git a/src/devices/graphical_debug/mod.rs b/src/devices/graphical_debug/mod.rs -index b701c9a8..00cc984d 100644 ---- a/src/devices/graphical_debug/mod.rs -+++ b/src/devices/graphical_debug/mod.rs -@@ -59,7 +59,12 @@ pub fn init(env: &[u8]) { - ); - - let debug_display = DebugDisplay::new(width, height, stride, virt as *mut u32); -- *DEBUG_DISPLAY.lock() = Some(debug_display); -+ // FIXME: Writing to the framebuffer during early boot causes a hang on some -+ // QEMU configurations (virtio-vga, ramfb). The bootloader maps the framebuffer -+ // with default caching; the kernel remaps it with write-combining in memory::init(). -+ // Early kernel access before that remap appears to stall. Deferring DEBUG_DISPLAY -+ // setup avoids the hang; userspace vesad/fbbootlogd handles graphical output. -+ // *DEBUG_DISPLAY.lock() = Some(debug_display); - } - - #[allow(unused)] diff --git a/local/patches/kernel/redox.patch.bak b/local/patches/kernel/redox.patch.bak new file mode 100644 index 00000000..7977f2f2 --- /dev/null +++ b/local/patches/kernel/redox.patch.bak @@ -0,0 +1,147 @@ +diff --git a/src/acpi/madt/mod.rs b/src/acpi/madt/mod.rs +index 3159b9c4..c691eb8d 100644 +--- a/src/acpi/madt/mod.rs ++++ b/src/acpi/madt/mod.rs +@@ -146,6 +146,52 @@ pub struct MadtGicd { + _reserved2: [u8; 3], + } + ++/// MADT Local x2APIC (entry type 0x9) ++/// Used by modern AMD and Intel platforms with APIC IDs >= 255. ++#[derive(Clone, Copy, Debug)] ++#[repr(C, packed)] ++pub struct MadtLocalX2Apic { ++ _reserved: u16, ++ pub x2apic_id: u32, ++ pub flags: u32, ++ pub processor_uid: u32, ++} ++ ++/// MADT Local APIC NMI (entry type 0x4) ++/// Configures NMI routing to a processor's LINT0/LINT1 pin. ++#[derive(Clone, Copy, Debug)] ++#[repr(C, packed)] ++pub struct MadtLocalApicNmi { ++ pub processor: u8, ++ pub flags: u16, ++ pub nmi_pin: u8, ++} ++ ++/// MADT Local APIC Address Override (entry type 0x5) ++/// Provides 64-bit override for the 32-bit local APIC address. ++#[derive(Clone, Copy, Debug)] ++#[repr(C, packed)] ++pub struct MadtLapicAddressOverride { ++ _reserved: u16, ++ pub local_apic_address: u64, ++} ++ ++/// MADT Local x2APIC NMI (entry type 0xA) ++/// x2APIC equivalent of type 0x4 for APIC IDs >= 255. ++#[derive(Clone, Copy, Debug)] ++#[repr(C, packed)] ++pub struct MadtLocalX2ApicNmi { ++ _reserved: u16, ++ pub processor_uid: u32, ++ pub flags: u16, ++ pub nmi_pin: u8, ++ _reserved2: u8, ++} ++ ++const _: () = assert!(size_of::() == 4); ++const _: () = assert!(size_of::() == 10); ++const _: () = assert!(size_of::() == 10); ++ + /// MADT Entries + #[derive(Debug)] + #[allow(dead_code)] +@@ -160,6 +206,14 @@ pub enum MadtEntry { + InvalidGicc(usize), + Gicd(&'static MadtGicd), + InvalidGicd(usize), ++ LocalX2Apic(&'static MadtLocalX2Apic), ++ InvalidLocalX2Apic(usize), ++ LocalApicNmi(&'static MadtLocalApicNmi), ++ InvalidLocalApicNmi(usize), ++ LapicAddressOverride(&'static MadtLapicAddressOverride), ++ InvalidLapicAddressOverride(usize), ++ LocalX2ApicNmi(&'static MadtLocalX2ApicNmi), ++ InvalidLocalX2ApicNmi(usize), + Unknown(u8), + } + +@@ -176,6 +230,10 @@ impl Iterator for MadtIter { + let entry_len = + unsafe { *(self.sdt.data_address() as *const u8).add(self.i + 1) } as usize; + ++ if entry_len < 2 { ++ return None; ++ } ++ + if self.i + entry_len <= self.sdt.data_len() { + let item = match entry_type { + 0x0 => { +@@ -224,6 +282,44 @@ impl Iterator for MadtIter { + MadtEntry::InvalidGicd(entry_len) + } + } ++ 0x9 => { ++ if entry_len == size_of::() + 2 { ++ MadtEntry::LocalX2Apic(unsafe { ++ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalX2Apic) ++ }) ++ } else { ++ MadtEntry::InvalidLocalX2Apic(entry_len) ++ } ++ } ++ 0x4 => { ++ if entry_len == size_of::() + 2 { ++ MadtEntry::LocalApicNmi(unsafe { ++ &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalApicNmi) ++ }) ++ } else { ++ MadtEntry::InvalidLocalApicNmi(entry_len) ++ } ++ } ++ 0x5 => { ++ if entry_len == size_of::() + 2 { ++ MadtEntry::LapicAddressOverride(unsafe { ++ &*((self.sdt.data_address() + self.i + 2) ++ as *const MadtLapicAddressOverride) ++ }) ++ } else { ++ MadtEntry::InvalidLapicAddressOverride(entry_len) ++ } ++ } ++ 0xA => { ++ if entry_len == size_of::() + 2 { ++ MadtEntry::LocalX2ApicNmi(unsafe { ++ &*((self.sdt.data_address() + self.i + 2) ++ as *const MadtLocalX2ApicNmi) ++ }) ++ } else { ++ MadtEntry::InvalidLocalX2ApicNmi(entry_len) ++ } ++ } + _ => MadtEntry::Unknown(entry_type), + }; + + +diff --git a/src/devices/graphical_debug/mod.rs b/src/devices/graphical_debug/mod.rs +index b701c9a8..00cc984d 100644 +--- a/src/devices/graphical_debug/mod.rs ++++ b/src/devices/graphical_debug/mod.rs +@@ -59,7 +59,12 @@ pub fn init(env: &[u8]) { + ); + + let debug_display = DebugDisplay::new(width, height, stride, virt as *mut u32); +- *DEBUG_DISPLAY.lock() = Some(debug_display); ++ // FIXME: Writing to the framebuffer during early boot causes a hang on some ++ // QEMU configurations (virtio-vga, ramfb). The bootloader maps the framebuffer ++ // with default caching; the kernel remaps it with write-combining in memory::init(). ++ // Early kernel access before that remap appears to stall. Deferring DEBUG_DISPLAY ++ // setup avoids the hang; userspace vesad/fbbootlogd handles graphical output. ++ // *DEBUG_DISPLAY.lock() = Some(debug_display); + } + + #[allow(unused)] diff --git a/local/patches/libwayland/redox.patch b/local/patches/libwayland/redox.patch index 5aa9ffdd..81799a01 100644 --- a/local/patches/libwayland/redox.patch +++ b/local/patches/libwayland/redox.patch @@ -1,5 +1,5 @@ ---- a/b/src/connection.c 2025-07-06 13:11:26.000000000 +0100 -+++ b/src/connection.c 2026-05-01 00:15:42.778777823 +0100 +--- a/src/connection.c ++++ b/src/connection.c @@ -40,6 +40,12 @@ #include #include @@ -13,56 +13,8 @@ #include "wayland-util.h" #include "wayland-private.h" #include "wayland-os.h" ---- a/b/src/event-loop.c 2025-07-06 13:11:26.000000000 +0100 -+++ b/src/event-loop.c 2026-05-01 00:15:42.778845239 +0100 -@@ -35,9 +35,43 @@ - #include - #include - #include --#include --#include - #include -+/* Redox: relibc declares signalfd/timerfd in headers but has no implementation. -+ Provide inline implementations via Redox schemes. */ -+#define SFD_CLOEXEC O_CLOEXEC -+#define SFD_NONBLOCK O_NONBLOCK -+#define TFD_CLOEXEC O_CLOEXEC -+#define TFD_NONBLOCK O_NONBLOCK -+#define TFD_TIMER_ABSTIME 0x1 -+struct signalfd_siginfo { uint8_t pad[128]; }; -+static int signalfd(int fd, const sigset_t *mask, int flags) { -+ int oflag = O_RDWR; -+ if (flags & SFD_CLOEXEC) oflag |= O_CLOEXEC; -+ if (flags & SFD_NONBLOCK) oflag |= O_NONBLOCK; -+ if (fd == -1) { fd = open("/scheme/event", oflag); if (fd < 0) return -1; } -+ else { if (flags & SFD_CLOEXEC) fcntl(fd, F_SETFD, FD_CLOEXEC); } -+ sigprocmask(SIG_BLOCK, mask, NULL); -+ return fd; -+} -+static int timerfd_create(int clockid, int flags) { -+ int oflag = O_RDWR; -+ if (flags & TFD_CLOEXEC) oflag |= O_CLOEXEC; -+ if (flags & TFD_NONBLOCK) oflag |= O_NONBLOCK; -+ char path[64]; -+ snprintf(path, sizeof(path), "/scheme/time/%d", clockid); -+ return open(path, oflag); -+} -+static int timerfd_settime(int fd, int flags, const struct itimerspec *new_value, struct itimerspec *old_value) { -+ if (new_value == NULL) { errno = EFAULT; return -1; } -+ ssize_t r = write(fd, &new_value->it_value, sizeof(struct timespec)); -+ return (r == sizeof(struct timespec)) ? 0 : -1; -+} -+static int timerfd_gettime(int fd, struct itimerspec *curr) { -+ if (curr == NULL) { errno = EFAULT; return -1; } -+ curr->it_interval = (struct timespec){0}; -+ ssize_t r = read(fd, &curr->it_value, sizeof(struct timespec)); -+ return (r == sizeof(struct timespec)) ? 0 : -1; -+} - #include "timespec-util.h" - #include "wayland-util.h" - #include "wayland-private.h" ---- a/b/src/meson.build 2025-07-06 13:11:26.000000000 +0100 -+++ b/src/meson.build 2026-05-01 00:15:42.778925799 +0100 +--- a/src/meson.build ++++ b/src/meson.build @@ -81,8 +81,7 @@ endif @@ -73,8 +25,8 @@ else wayland_scanner_for_build = wayland_scanner endif ---- a/b/src/wayland-server.c 2025-07-06 13:11:26.000000000 +0100 -+++ b/src/wayland-server.c 2026-05-01 00:15:42.779083803 +0100 +--- a/src/wayland-server.c ++++ b/src/wayland-server.c @@ -39,7 +39,23 @@ #include #include diff --git a/local/patches/libwayland/redox.patch.bak b/local/patches/libwayland/redox.patch.bak new file mode 100644 index 00000000..5aa9ffdd --- /dev/null +++ b/local/patches/libwayland/redox.patch.bak @@ -0,0 +1,102 @@ +--- a/b/src/connection.c 2025-07-06 13:11:26.000000000 +0100 ++++ b/src/connection.c 2026-05-01 00:15:42.778777823 +0100 +@@ -40,6 +40,12 @@ + #include + #include + ++#ifndef MSG_NOSIGNAL ++#define MSG_NOSIGNAL 0 ++#endif ++ ++extern FILE *open_memstream(char **bufp, size_t *sizep); ++ + #include "wayland-util.h" + #include "wayland-private.h" + #include "wayland-os.h" +--- a/b/src/event-loop.c 2025-07-06 13:11:26.000000000 +0100 ++++ b/src/event-loop.c 2026-05-01 00:15:42.778845239 +0100 +@@ -35,9 +35,43 @@ + #include + #include + #include +-#include +-#include + #include ++/* Redox: relibc declares signalfd/timerfd in headers but has no implementation. ++ Provide inline implementations via Redox schemes. */ ++#define SFD_CLOEXEC O_CLOEXEC ++#define SFD_NONBLOCK O_NONBLOCK ++#define TFD_CLOEXEC O_CLOEXEC ++#define TFD_NONBLOCK O_NONBLOCK ++#define TFD_TIMER_ABSTIME 0x1 ++struct signalfd_siginfo { uint8_t pad[128]; }; ++static int signalfd(int fd, const sigset_t *mask, int flags) { ++ int oflag = O_RDWR; ++ if (flags & SFD_CLOEXEC) oflag |= O_CLOEXEC; ++ if (flags & SFD_NONBLOCK) oflag |= O_NONBLOCK; ++ if (fd == -1) { fd = open("/scheme/event", oflag); if (fd < 0) return -1; } ++ else { if (flags & SFD_CLOEXEC) fcntl(fd, F_SETFD, FD_CLOEXEC); } ++ sigprocmask(SIG_BLOCK, mask, NULL); ++ return fd; ++} ++static int timerfd_create(int clockid, int flags) { ++ int oflag = O_RDWR; ++ if (flags & TFD_CLOEXEC) oflag |= O_CLOEXEC; ++ if (flags & TFD_NONBLOCK) oflag |= O_NONBLOCK; ++ char path[64]; ++ snprintf(path, sizeof(path), "/scheme/time/%d", clockid); ++ return open(path, oflag); ++} ++static int timerfd_settime(int fd, int flags, const struct itimerspec *new_value, struct itimerspec *old_value) { ++ if (new_value == NULL) { errno = EFAULT; return -1; } ++ ssize_t r = write(fd, &new_value->it_value, sizeof(struct timespec)); ++ return (r == sizeof(struct timespec)) ? 0 : -1; ++} ++static int timerfd_gettime(int fd, struct itimerspec *curr) { ++ if (curr == NULL) { errno = EFAULT; return -1; } ++ curr->it_interval = (struct timespec){0}; ++ ssize_t r = read(fd, &curr->it_value, sizeof(struct timespec)); ++ return (r == sizeof(struct timespec)) ? 0 : -1; ++} + #include "timespec-util.h" + #include "wayland-util.h" + #include "wayland-private.h" +--- a/b/src/meson.build 2025-07-06 13:11:26.000000000 +0100 ++++ b/src/meson.build 2026-05-01 00:15:42.778925799 +0100 +@@ -81,8 +81,7 @@ + endif + + if meson.is_cross_build() or not get_option('scanner') +- scanner_dep = dependency('wayland-scanner', native: true, version: meson.project_version()) +- wayland_scanner_for_build = find_program(scanner_dep.get_variable(pkgconfig: 'wayland_scanner')) ++ wayland_scanner_for_build = find_program('wayland-scanner', native: true) + else + wayland_scanner_for_build = wayland_scanner + endif +--- a/b/src/wayland-server.c 2025-07-06 13:11:26.000000000 +0100 ++++ b/src/wayland-server.c 2026-05-01 00:15:42.779083803 +0100 +@@ -39,7 +39,23 @@ + #include + #include + #include +-#include ++#ifndef EFD_CLOEXEC ++#define EFD_CLOEXEC O_CLOEXEC ++#endif ++#ifndef EFD_NONBLOCK ++#define EFD_NONBLOCK O_NONBLOCK ++#endif ++#ifndef EFD_SEMAPHORE ++#define EFD_SEMAPHORE 0x1 ++#endif ++static int eventfd(unsigned int initval, int flags) { ++ int oflag = O_RDWR; ++ if (flags & EFD_CLOEXEC) oflag |= O_CLOEXEC; ++ if (flags & EFD_NONBLOCK) oflag |= O_NONBLOCK; ++ char path[64]; ++ snprintf(path, sizeof(path), "/scheme/event/eventfd/%u/%d", initval, (flags & EFD_SEMAPHORE) ? 1 : 0); ++ return open(path, oflag); ++} + #include + #include + diff --git a/local/patches/qtbase/P0-remove-redox-linkat-unlinkat-stubs.patch b/local/patches/qtbase/P0-remove-redox-linkat-unlinkat-stubs.patch index 3097d5fd..fd751aae 100644 --- a/local/patches/qtbase/P0-remove-redox-linkat-unlinkat-stubs.patch +++ b/local/patches/qtbase/P0-remove-redox-linkat-unlinkat-stubs.patch @@ -2,7 +2,7 @@ diff --git a/src/corelib/io/qfilesystemengine_unix.cpp b/src/corelib/io/qfilesys index e857f1a..f4f7f4a 100644 --- a/src/corelib/io/qfilesystemengine_unix.cpp +++ b/src/corelib/io/qfilesystemengine_unix.cpp -@@ -27,23 +27,6 @@ +@@ -27,22 +27,5 @@ #include #include diff --git a/local/patches/qtbase/P0-remove-redox-linkat-unlinkat-stubs.patch.bak b/local/patches/qtbase/P0-remove-redox-linkat-unlinkat-stubs.patch.bak new file mode 100644 index 00000000..3097d5fd --- /dev/null +++ b/local/patches/qtbase/P0-remove-redox-linkat-unlinkat-stubs.patch.bak @@ -0,0 +1,27 @@ +diff --git a/src/corelib/io/qfilesystemengine_unix.cpp b/src/corelib/io/qfilesystemengine_unix.cpp +index e857f1a..f4f7f4a 100644 +--- a/src/corelib/io/qfilesystemengine_unix.cpp ++++ b/src/corelib/io/qfilesystemengine_unix.cpp +@@ -27,23 +27,6 @@ + #include + #include + +-#ifdef Q_OS_REDOX +-// relibc does not provide unlinkat/linkat yet (POSIX.1-2008 *at functions). +-// Provide inline stubs that work for AT_FDCWD only - sufficient for +-// FreeDesktop trash operations in this file. +-#include +-static inline int unlinkat(int dirfd, const char *pathname, int flags) +-{ +- if (dirfd != AT_FDCWD || flags != 0) { errno = ENOTSUP; return -1; } +- return unlink(pathname); +-} +-static inline int linkat(int olddirfd, const char *oldpath, int newdirfd, const char *newpath, int flags) +-{ +- if (olddirfd != AT_FDCWD || newdirfd != AT_FDCWD || flags != 0) { errno = ENOTSUP; return -1; } +- return link(oldpath, newpath); +-} +-#endif +- + #include + #include // for std::unique_ptr diff --git a/local/patches/qtbase/P1-qplatformopengl-guard.patch b/local/patches/qtbase/P1-qplatformopengl-guard.patch index b61ef672..d941cac8 100644 --- a/local/patches/qtbase/P1-qplatformopengl-guard.patch +++ b/local/patches/qtbase/P1-qplatformopengl-guard.patch @@ -16,7 +16,7 @@ #if QT_CONFIG(opengl) virtual QOpenGLContext *createOpenGLContext(EGLContext context, EGLDisplay contextDisplay, QOpenGLContext *shareContext) const = 0; virtual QPlatformOffscreenSurface *createPlatformOffscreenSurface(QOffscreenSurface *surface) const { Q_UNUSED(surface); return nullptr; } -@@ -65,7 +71,11 @@ +@@ -65,8 +71,12 @@ EglContext }; virtual void *nativeResource(NativeResource /*resource*/) { return nullptr; } diff --git a/local/patches/qtbase/P1-qplatformopengl-guard.patch.bak b/local/patches/qtbase/P1-qplatformopengl-guard.patch.bak new file mode 100644 index 00000000..b61ef672 --- /dev/null +++ b/local/patches/qtbase/P1-qplatformopengl-guard.patch.bak @@ -0,0 +1,56 @@ +--- qtb-orig/src/plugins/platforms/wayland/hardwareintegration/qwaylandclientbufferintegration_p.h 2026-03-05 07:51:49.000000000 +0000 ++++ qtb-mod/src/plugins/platforms/wayland/hardwareintegration/qwaylandclientbufferintegration_p.h 2026-04-28 14:03:29.046092114 +0100 +@@ -51,9 +51,15 @@ + virtual bool supportsThreadedOpenGL() const { return false; } + virtual bool supportsWindowDecoration() const { return false; } + ++ #if QT_CONFIG(opengl) ++#if QT_CONFIG(opengl) ++#if QT_CONFIG(opengl) + virtual QWaylandWindow *createEglWindow(QWindow *window) = 0; + virtual QPlatformOpenGLContext *createPlatformOpenGLContext(const QSurfaceFormat &glFormat, QPlatformOpenGLContext *share) const = 0; ++#endif ++#endif + virtual bool canCreatePlatformOffscreenSurface() const { return false; } ++#endif + #if QT_CONFIG(opengl) + virtual QOpenGLContext *createOpenGLContext(EGLContext context, EGLDisplay contextDisplay, QOpenGLContext *shareContext) const = 0; + virtual QPlatformOffscreenSurface *createPlatformOffscreenSurface(QOffscreenSurface *surface) const { Q_UNUSED(surface); return nullptr; } +@@ -65,7 +71,11 @@ + EglContext + }; + virtual void *nativeResource(NativeResource /*resource*/) { return nullptr; } ++#if QT_CONFIG(opengl) ++#if QT_CONFIG(opengl) + virtual void *nativeResourceForContext(NativeResource /*resource*/, QPlatformOpenGLContext */*context*/) { return nullptr; } ++#endif ++#endif + }; + + } +--- qtb-orig/src/plugins/platforms/wayland/qwaylandintegration.cpp 2026-03-05 07:51:49.000000000 +0000 ++++ qtb-mod/src/plugins/platforms/wayland/qwaylandintegration.cpp 2026-04-28 14:34:26.740450805 +0100 +@@ -135,17 +135,23 @@ + case ScreenWindowGrabbing: // whether QScreen::grabWindow() is supported + return false; + case OffscreenSurface: ++#if QT_CONFIG(opengl) + return mDisplay->clientBufferIntegration() + && mDisplay->clientBufferIntegration()->canCreatePlatformOffscreenSurface(); ++#else ++ return false; ++#endif + default: return QPlatformIntegration::hasCapability(cap); + } + } + + QPlatformWindow *QWaylandIntegration::createPlatformWindow(QWindow *window) const + { ++#if QT_CONFIG(opengl) + if (window->surfaceType() == QWindow::OpenGLSurface + && mDisplay->clientBufferIntegration()) + return mDisplay->clientBufferIntegration()->createEglWindow(window); ++#endif + + #if QT_CONFIG(vulkan) + if (window->surfaceType() == QSurface::VulkanSurface) diff --git a/local/patches/qtbase/P2-enable-network-and-tuiotouch.patch b/local/patches/qtbase/P2-enable-network-and-tuiotouch.patch index efa9b06a..4b76f902 100644 --- a/local/patches/qtbase/P2-enable-network-and-tuiotouch.patch +++ b/local/patches/qtbase/P2-enable-network-and-tuiotouch.patch @@ -1,7 +1,7 @@ diff -ruwN source-old/src/CMakeLists.txt source/src/CMakeLists.txt --- source-old/src/CMakeLists.txt 2024-12-02 05:39:06.000000000 +0000 +++ source/src/CMakeLists.txt 2026-04-30 00:00:00.000000000 +0000 -@@ -54,5 +54,5 @@ +@@ -54,7 +54,7 @@ endif() if (QT_FEATURE_network) - # add_subdirectory(network) # disabled for Redox @@ -12,7 +12,7 @@ diff -ruwN source-old/src/CMakeLists.txt source/src/CMakeLists.txt diff -ruwN source-old/src/plugins/generic/CMakeLists.txt source/src/plugins/generic/CMakeLists.txt --- source-old/src/plugins/generic/CMakeLists.txt 2024-12-02 05:39:06.000000000 +0000 +++ source/src/plugins/generic/CMakeLists.txt 2026-04-30 00:00:00.000000000 +0000 -@@ -18,6 +18,6 @@ +@@ -18,7 +18,7 @@ add_subdirectory(tslib) endif() if(QT_FEATURE_tuiotouch) diff --git a/local/patches/qtbase/P2-enable-network-and-tuiotouch.patch.bak b/local/patches/qtbase/P2-enable-network-and-tuiotouch.patch.bak new file mode 100644 index 00000000..efa9b06a --- /dev/null +++ b/local/patches/qtbase/P2-enable-network-and-tuiotouch.patch.bak @@ -0,0 +1,23 @@ +diff -ruwN source-old/src/CMakeLists.txt source/src/CMakeLists.txt +--- source-old/src/CMakeLists.txt 2024-12-02 05:39:06.000000000 +0000 ++++ source/src/CMakeLists.txt 2026-04-30 00:00:00.000000000 +0000 +@@ -54,5 +54,5 @@ + endif() + if (QT_FEATURE_network) +- # add_subdirectory(network) # disabled for Redox ++ add_subdirectory(network) + if (ANDROID) + add_subdirectory(network/android/jar) + endif() +diff -ruwN source-old/src/plugins/generic/CMakeLists.txt source/src/plugins/generic/CMakeLists.txt +--- source-old/src/plugins/generic/CMakeLists.txt 2024-12-02 05:39:06.000000000 +0000 ++++ source/src/plugins/generic/CMakeLists.txt 2026-04-30 00:00:00.000000000 +0000 +@@ -18,6 +18,6 @@ + add_subdirectory(tslib) + endif() + if(QT_FEATURE_tuiotouch) +- # add_subdirectory(tuiotouch) # disabled for Redox (needs Network) ++ add_subdirectory(tuiotouch) + endif() + if(QT_FEATURE_libinput) + add_subdirectory(libinput) diff --git a/local/patches/relibc/P3-dns-resolver-hardening.patch b/local/patches/relibc/P3-dns-resolver-hardening.patch.bak similarity index 100% rename from local/patches/relibc/P3-dns-resolver-hardening.patch rename to local/patches/relibc/P3-dns-resolver-hardening.patch.bak diff --git a/local/patches/relibc/P3-fd-event-tests.patch b/local/patches/relibc/P3-fd-event-tests.patch index acd73f70..24e24e3d 100644 --- a/local/patches/relibc/P3-fd-event-tests.patch +++ b/local/patches/relibc/P3-fd-event-tests.patch @@ -1,7 +1,7 @@ diff --git a/tests/Makefile.tests.mk b/tests/Makefile.tests.mk --- a/tests/Makefile.tests.mk +++ b/tests/Makefile.tests.mk -@@ -314,8 +314,12 @@ VARIED_NAMES=\ +@@ -314,0 +314,4 @@ VARIED_NAMES=\ grp/gr_iter \ semaphore/named \ semaphore/unnamed \ diff --git a/local/patches/relibc/P3-fd-event-tests.patch.bak b/local/patches/relibc/P3-fd-event-tests.patch.bak new file mode 100644 index 00000000..acd73f70 --- /dev/null +++ b/local/patches/relibc/P3-fd-event-tests.patch.bak @@ -0,0 +1,15 @@ +diff --git a/tests/Makefile.tests.mk b/tests/Makefile.tests.mk +--- a/tests/Makefile.tests.mk ++++ b/tests/Makefile.tests.mk +@@ -314,8 +314,12 @@ VARIED_NAMES=\ + grp/gr_iter \ + semaphore/named \ + semaphore/unnamed \ ++ sys_eventfd/eventfd \ ++ sys_signalfd/header_only \ ++ sys_signalfd/signalfd \ ++ sys_timerfd/timerfd \ + waitid \ + waitpid \ + waitpid_multiple \ + $(FAILING_TESTS) diff --git a/local/patches/relibc/P3-fenv.patch b/local/patches/relibc/P3-fenv.patch.bak similarity index 100% rename from local/patches/relibc/P3-fenv.patch rename to local/patches/relibc/P3-fenv.patch.bak diff --git a/local/patches/relibc/P3-named-semaphores.patch b/local/patches/relibc/P3-named-semaphores.patch.bak similarity index 100% rename from local/patches/relibc/P3-named-semaphores.patch rename to local/patches/relibc/P3-named-semaphores.patch.bak diff --git a/local/patches/relibc/P3-open-memstream.patch b/local/patches/relibc/P3-open-memstream.patch index 13521090..9eab230c 100644 --- a/local/patches/relibc/P3-open-memstream.patch +++ b/local/patches/relibc/P3-open-memstream.patch @@ -141,7 +141,7 @@ new file mode 100644 diff --git a/tests/Makefile.tests.mk b/tests/Makefile.tests.mk --- a/tests/Makefile.tests.mk +++ b/tests/Makefile.tests.mk -@@ -85,6 +85,7 @@ VARIED_NAMES=\ +@@ -85,1 +85,2 @@ VARIED_NAMES=\ stdio/fseek \ stdio/fwrite \ stdio/getc_unget \ diff --git a/local/patches/relibc/P3-open-memstream.patch.bak b/local/patches/relibc/P3-open-memstream.patch.bak new file mode 100644 index 00000000..13521090 --- /dev/null +++ b/local/patches/relibc/P3-open-memstream.patch.bak @@ -0,0 +1,181 @@ +diff --git a/src/header/stdio/mod.rs b/src/header/stdio/mod.rs +--- a/src/header/stdio/mod.rs ++++ b/src/header/stdio/mod.rs +@@ -46,4 +46,7 @@ + pub use self::getdelim::*; + mod getdelim; + ++pub use self::open_memstream::*; ++mod open_memstream; ++ + mod ext; +diff --git a/src/header/stdio/open_memstream.rs b/src/header/stdio/open_memstream.rs +new file mode 100644 +--- /dev/null ++++ b/src/header/stdio/open_memstream.rs +@@ -0,0 +1,124 @@ ++use alloc::{boxed::Box, vec, vec::Vec}; ++use core::ptr; ++ ++use super::{ ++ Buffer, FILE, ++ constants::{BUFSIZ, F_NORD}, ++}; ++use crate::{ ++ error::{Errno, ResultExtPtrMut}, ++ fs::File, ++ header::{ ++ errno::{EFAULT, ENOMEM}, ++ fcntl, pthread, stdlib, unistd, ++ }, ++ io::{self, BufWriter, Write}, ++ platform::{ ++ ERRNO, ++ types::{c_char, size_t}, ++ }, ++}; ++ ++struct MemstreamWriter { ++ bufp: *mut *mut c_char, ++ sizep: *mut size_t, ++ current: *mut c_char, ++ buffer: Vec, ++} ++ ++unsafe impl Send for MemstreamWriter {} ++ ++impl MemstreamWriter { ++ fn new(bufp: *mut *mut c_char, sizep: *mut size_t) -> Self { ++ Self { ++ bufp, ++ sizep, ++ current: ptr::null_mut(), ++ buffer: Vec::new(), ++ } ++ } ++ ++ fn sync_output(&mut self) -> io::Result<()> { ++ let size = self.buffer.len(); ++ let alloc_size = size ++ .checked_add(1) ++ .ok_or_else(|| io::Error::from_raw_os_error(ENOMEM))?; ++ ++ let raw = if self.current.is_null() { ++ unsafe { stdlib::malloc(alloc_size) } ++ } else { ++ unsafe { stdlib::realloc(self.current.cast(), alloc_size) } ++ }; ++ if raw.is_null() { ++ return Err(io::Error::from_raw_os_error(ENOMEM)); ++ } ++ ++ let raw = raw.cast::(); ++ if size != 0 { ++ unsafe { ptr::copy_nonoverlapping(self.buffer.as_ptr(), raw.cast::(), size) }; ++ } ++ unsafe { ++ *raw.add(size) = 0; ++ *self.bufp = raw; ++ *self.sizep = size; ++ } ++ self.current = raw; ++ Ok(()) ++ } ++} ++ ++impl Write for MemstreamWriter { ++ fn write(&mut self, buf: &[u8]) -> io::Result { ++ self.buffer ++ .try_reserve(buf.len()) ++ .map_err(|_| io::Error::from_raw_os_error(ENOMEM))?; ++ self.buffer.extend_from_slice(buf); ++ Ok(buf.len()) ++ } ++ ++ fn flush(&mut self) -> io::Result<()> { ++ self.sync_output() ++ } ++} ++ ++fn create_memstream(bufp: *mut *mut c_char, sizep: *mut size_t) -> Result, Errno> { ++ if bufp.is_null() || sizep.is_null() { ++ return Err(Errno(EFAULT)); ++ } ++ ++ unsafe { ++ *bufp = ptr::null_mut(); ++ *sizep = 0; ++ } ++ ++ let mut fds = [0; 2]; ++ if unsafe { unistd::pipe2(fds.as_mut_ptr(), fcntl::O_CLOEXEC) } != 0 { ++ return Err(Errno(ERRNO.get())); ++ } ++ let _ = unistd::close(fds[0]); ++ ++ let file = File::new(fds[1]); ++ let writer = Box::new(BufWriter::new(MemstreamWriter::new(bufp, sizep))); ++ let mutex_attr = pthread::RlctMutexAttr { ++ ty: pthread::PTHREAD_MUTEX_RECURSIVE, ++ ..Default::default() ++ }; ++ ++ Ok(Box::new(FILE { ++ lock: pthread::RlctMutex::new(&mutex_attr).unwrap(), ++ file, ++ flags: F_NORD, ++ read_buf: Buffer::Owned(vec![0; BUFSIZ as usize]), ++ read_pos: 0, ++ read_size: 0, ++ unget: Vec::new(), ++ writer, ++ pid: None, ++ orientation: 0, ++ })) ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn open_memstream(bufp: *mut *mut c_char, sizep: *mut size_t) -> *mut FILE { ++ create_memstream(bufp, sizep).or_errno_null_mut() ++} +diff --git a/tests/Makefile.tests.mk b/tests/Makefile.tests.mk +--- a/tests/Makefile.tests.mk ++++ b/tests/Makefile.tests.mk +@@ -85,6 +85,7 @@ VARIED_NAMES=\ + stdio/fseek \ + stdio/fwrite \ + stdio/getc_unget \ +- stdio/getline \ ++ stdio/getline \ ++ stdio/open_memstream \ + stdio/mutex \ + stdio/popen \ +diff --git a/tests/stdio/open_memstream.c b/tests/stdio/open_memstream.c +new file mode 100644 +--- /dev/null ++++ b/tests/stdio/open_memstream.c +@@ -0,0 +1,24 @@ ++#include ++#include ++#include ++#include ++ ++int main(void) { ++ char *buf = NULL; ++ size_t size = 0; ++ ++ FILE *stream = open_memstream(&buf, &size); ++ assert(stream != NULL); ++ assert(fputs("hello", stream) >= 0); ++ assert(fflush(stream) == 0); ++ assert(size == 5); ++ assert(strcmp(buf, "hello") == 0); ++ assert(fputc('!', stream) != EOF); ++ assert(fclose(stream) == 0); ++ assert(size == 6); ++ assert(strcmp(buf, "hello!") == 0); ++ ++ free(buf); ++ puts("open_memstream ok"); ++ return 0; ++} diff --git a/local/patches/relibc/P3-sched.patch b/local/patches/relibc/P3-sched.patch.bak similarity index 100% rename from local/patches/relibc/P3-sched.patch rename to local/patches/relibc/P3-sched.patch.bak diff --git a/local/patches/relibc/P3-signalfd.patch b/local/patches/relibc/P3-signalfd.patch index af191231..4fa163c9 100644 --- a/local/patches/relibc/P3-signalfd.patch +++ b/local/patches/relibc/P3-signalfd.patch @@ -1,7 +1,7 @@ diff -ruN a/src/header/signal/mod.rs b/src/header/signal/mod.rs --- a/src/header/signal/mod.rs 2026-04-15 09:40:30.420306210 +0100 +++ b/src/header/signal/mod.rs 2026-04-15 09:46:42.011891206 +0100 -@@ -32,6 +32,9 @@ +@@ -32,7 +32,10 @@ #[path = "redox.rs"] pub mod sys; diff --git a/local/patches/relibc/P3-signalfd.patch.bak b/local/patches/relibc/P3-signalfd.patch.bak new file mode 100644 index 00000000..af191231 --- /dev/null +++ b/local/patches/relibc/P3-signalfd.patch.bak @@ -0,0 +1,120 @@ +diff -ruN a/src/header/signal/mod.rs b/src/header/signal/mod.rs +--- a/src/header/signal/mod.rs 2026-04-15 09:40:30.420306210 +0100 ++++ b/src/header/signal/mod.rs 2026-04-15 09:46:42.011891206 +0100 +@@ -32,6 +32,9 @@ + #[path = "redox.rs"] + pub mod sys; + ++mod signalfd; ++pub use self::signalfd::*; ++ + type SigSet = BitSet<[u64; 1]>; + + pub(crate) const SIG_DFL: usize = 0; +diff -ruN a/src/header/signal/signalfd.rs b/src/header/signal/signalfd.rs +--- a/src/header/signal/signalfd.rs 1970-01-01 00:00:00.000000000 +0000 ++++ b/src/header/signal/signalfd.rs 2026-04-15 09:46:42.011930569 +0100 +@@ -0,0 +1,103 @@ ++use core::{mem, ptr}; ++ ++use crate::{ ++ error::{Errno, ResultExt}, ++ header::fcntl::{ ++ FD_CLOEXEC, F_GETFL, F_SETFD, F_SETFL, O_CLOEXEC, O_NONBLOCK, O_RDWR, fcntl, ++ }, ++ platform::{ ++ ERRNO, Pal, Sys, ++ types::{c_int, c_ulonglong}, ++ }, ++}; ++ ++use super::{SIG_BLOCK, sigprocmask, sigset_t}; ++ ++pub const SFD_CLOEXEC: c_int = 0x80000; ++pub const SFD_NONBLOCK: c_int = 0x800; ++ ++#[repr(C)] ++#[derive(Clone, Copy, Default)] ++pub struct signalfd_siginfo { ++ pub ssi_signo: u32, ++ pub ssi_errno: i32, ++ pub ssi_code: i32, ++ pub ssi_pid: u32, ++ pub ssi_uid: u32, ++ pub ssi_fd: i32, ++ pub ssi_tid: u32, ++ pub ssi_band: u32, ++ pub ssi_overrun: u32, ++ pub ssi_trapno: u32, ++ pub ssi_status: i32, ++ pub ssi_int: i32, ++ pub ssi_ptr: u64, ++ pub ssi_utime: u64, ++ pub ssi_stime: u64, ++ pub ssi_addr: u64, ++ pub ssi_addr_lsb: u16, ++ pub __pad2: u16, ++ pub ssi_syscall: i32, ++ pub ssi_call_addr: u64, ++ pub ssi_arch: u32, ++ pub __pad: [u8; 28], ++} ++ ++#[unsafe(no_mangle)] ++pub extern "C" fn _cbindgen_export_signalfd_siginfo(siginfo: signalfd_siginfo) {} ++ ++fn signalfd4_inner(fd: c_int, mask: *const sigset_t, masksize: usize, flags: c_int) -> Result { ++ let supported = SFD_CLOEXEC | SFD_NONBLOCK; ++ if flags & !supported != 0 || masksize != mem::size_of::() { ++ return Err(Errno(crate::header::errno::EINVAL)); ++ } ++ if mask.is_null() { ++ return Err(Errno(crate::header::errno::EFAULT)); ++ } ++ ++ let new_fd = if fd == -1 { ++ let mut oflag = O_RDWR; ++ if flags & SFD_CLOEXEC == SFD_CLOEXEC { ++ oflag |= O_CLOEXEC; ++ } ++ if flags & SFD_NONBLOCK == SFD_NONBLOCK { ++ oflag |= O_NONBLOCK; ++ } ++ Sys::open(c"/scheme/event".into(), oflag, 0)? ++ } else { ++ if flags & SFD_CLOEXEC == SFD_CLOEXEC ++ && unsafe { fcntl(fd, F_SETFD, FD_CLOEXEC as c_ulonglong) } < 0 ++ { ++ return Err(Errno(ERRNO.get())); ++ } ++ if flags & SFD_NONBLOCK == SFD_NONBLOCK { ++ let current = unsafe { fcntl(fd, F_GETFL, 0 as c_ulonglong) }; ++ if current < 0 { ++ return Err(Errno(ERRNO.get())); ++ } ++ if unsafe { fcntl(fd, F_SETFL, (current | O_NONBLOCK) as c_ulonglong) } < 0 { ++ return Err(Errno(ERRNO.get())); ++ } ++ } ++ fd ++ }; ++ ++ if unsafe { sigprocmask(SIG_BLOCK, mask, ptr::null_mut()) } < 0 { ++ if fd == -1 { ++ let _ = Sys::close(new_fd); ++ } ++ return Err(Errno(ERRNO.get())); ++ } ++ ++ Ok(new_fd) ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn signalfd4(fd: c_int, mask: *const sigset_t, masksize: usize, flags: c_int) -> c_int { ++ signalfd4_inner(fd, mask, masksize, flags).or_minus_one_errno() ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn signalfd(fd: c_int, mask: *const sigset_t, masksize: usize) -> c_int { ++ unsafe { signalfd4(fd, mask, masksize, 0) } ++} diff --git a/local/patches/relibc/P3-sysv-ipc.patch b/local/patches/relibc/P3-sysv-ipc.patch index cc47f279..c96df48e 100644 --- a/local/patches/relibc/P3-sysv-ipc.patch +++ b/local/patches/relibc/P3-sysv-ipc.patch @@ -1,7 +1,7 @@ diff -ruN a/src/header/mod.rs b/src/header/mod.rs --- a/src/header/mod.rs 2026-04-15 09:55:11.441949342 +0100 +++ b/src/header/mod.rs 2026-04-15 09:57:28.904091552 +0100 -@@ -92,14 +92,14 @@ +@@ -92,15 +92,15 @@ pub mod sys_eventfd; pub mod sys_file; pub mod sys_ioctl; @@ -22,7 +22,7 @@ diff -ruN a/src/header/mod.rs b/src/header/mod.rs diff -ruN a/src/header/sys_ipc/cbindgen.toml b/src/header/sys_ipc/cbindgen.toml --- a/src/header/sys_ipc/cbindgen.toml 1970-01-01 00:00:00.000000000 +0000 +++ b/src/header/sys_ipc/cbindgen.toml 2026-04-15 09:57:28.904120977 +0100 -@@ -0,0 +1,12 @@ +@@ -0,1 +1,13 @@ +sys_includes = ["sys/types.h"] +include_guard = "_SYS_IPC_H" +trailer = """ @@ -38,7 +38,7 @@ diff -ruN a/src/header/sys_ipc/cbindgen.toml b/src/header/sys_ipc/cbindgen.toml diff -ruN a/src/header/sys_ipc/mod.rs b/src/header/sys_ipc/mod.rs --- a/src/header/sys_ipc/mod.rs 1970-01-01 00:00:00.000000000 +0000 +++ b/src/header/sys_ipc/mod.rs 2026-04-15 09:57:28.904159138 +0100 -@@ -0,0 +1,31 @@ +@@ -0,1 +1,32 @@ +//! `sys/ipc.h` implementation. + +use crate::platform::types::{c_int, c_ushort}; @@ -73,7 +73,7 @@ diff -ruN a/src/header/sys_ipc/mod.rs b/src/header/sys_ipc/mod.rs diff -ruN a/src/header/sys_sem/cbindgen.toml b/src/header/sys_sem/cbindgen.toml --- a/src/header/sys_sem/cbindgen.toml 1970-01-01 00:00:00.000000000 +0000 +++ b/src/header/sys_sem/cbindgen.toml 2026-04-15 09:57:28.904183804 +0100 -@@ -0,0 +1,9 @@ +@@ -0,1 +1,10 @@ +sys_includes = ["sys/types.h", "sys/ipc.h", "stdint.h"] +include_guard = "_SYS_SEM_H" +language = "C" diff --git a/local/patches/relibc/P3-sysv-ipc.patch.bak b/local/patches/relibc/P3-sysv-ipc.patch.bak new file mode 100644 index 00000000..cc47f279 --- /dev/null +++ b/local/patches/relibc/P3-sysv-ipc.patch.bak @@ -0,0 +1,98 @@ +diff -ruN a/src/header/mod.rs b/src/header/mod.rs +--- a/src/header/mod.rs 2026-04-15 09:55:11.441949342 +0100 ++++ b/src/header/mod.rs 2026-04-15 09:57:28.904091552 +0100 +@@ -92,14 +92,14 @@ + pub mod sys_eventfd; + pub mod sys_file; + pub mod sys_ioctl; +-// TODO: sys/ipc.h ++pub mod sys_ipc; + pub mod sys_mman; + // TODO: sys/msg.h + pub mod sys_ptrace; + pub mod sys_resource; + pub mod sys_select; +-// TODO: sys/sem.h +-// TODO: sys/shm.h ++pub mod sys_sem; ++pub mod sys_shm; + pub mod sys_socket; + pub mod sys_stat; + pub mod sys_statvfs; +diff -ruN a/src/header/sys_ipc/cbindgen.toml b/src/header/sys_ipc/cbindgen.toml +--- a/src/header/sys_ipc/cbindgen.toml 1970-01-01 00:00:00.000000000 +0000 ++++ b/src/header/sys_ipc/cbindgen.toml 2026-04-15 09:57:28.904120977 +0100 +@@ -0,0 +1,12 @@ ++sys_includes = ["sys/types.h"] ++include_guard = "_SYS_IPC_H" ++trailer = """ ++typedef struct ipc_perm ipc_perm; ++""" ++language = "C" ++style = "Tag" ++no_includes = true ++cpp_compat = true ++ ++[enum] ++prefix_with_name = true +diff -ruN a/src/header/sys_ipc/mod.rs b/src/header/sys_ipc/mod.rs +--- a/src/header/sys_ipc/mod.rs 1970-01-01 00:00:00.000000000 +0000 ++++ b/src/header/sys_ipc/mod.rs 2026-04-15 09:57:28.904159138 +0100 +@@ -0,0 +1,31 @@ ++//! `sys/ipc.h` implementation. ++ ++use crate::platform::types::{c_int, c_ushort}; ++ ++pub type key_t = c_int; ++ ++pub const IPC_PRIVATE: key_t = 0; ++pub const IPC_CREAT: c_int = 0o1000; ++pub const IPC_EXCL: c_int = 0o2000; ++pub const IPC_NOWAIT: c_int = 0o4000; ++ ++pub const IPC_RMID: c_int = 0; ++pub const IPC_SET: c_int = 1; ++pub const IPC_STAT: c_int = 2; ++ ++#[repr(C)] ++#[derive(Clone, Copy, Default)] ++pub struct ipc_perm { ++ pub __key: key_t, ++ pub uid: c_ushort, ++ pub gid: c_ushort, ++ pub cuid: c_ushort, ++ pub cgid: c_ushort, ++ pub mode: c_ushort, ++ pub __seq: c_ushort, ++} ++ ++#[unsafe(no_mangle)] ++pub extern "C" fn _cbindgen_export_ipc_perm(value: ipc_perm) { ++ let _ = value; ++} +diff -ruN a/src/header/sys_sem/cbindgen.toml b/src/header/sys_sem/cbindgen.toml +--- a/src/header/sys_sem/cbindgen.toml 1970-01-01 00:00:00.000000000 +0000 ++++ b/src/header/sys_sem/cbindgen.toml 2026-04-15 09:57:28.904183804 +0100 +@@ -0,0 +1,9 @@ ++sys_includes = ["sys/types.h", "sys/ipc.h", "stdint.h"] ++include_guard = "_SYS_SEM_H" ++language = "C" ++style = "Tag" ++no_includes = true ++cpp_compat = true ++ ++[enum] ++prefix_with_name = true +diff -ruN a/src/header/sys_shm/cbindgen.toml b/src/header/sys_shm/cbindgen.toml +--- a/src/header/sys_shm/cbindgen.toml 1970-01-01 00:00:00.000000000 +0000 ++++ b/src/header/sys_shm/cbindgen.toml 2026-04-15 09:57:28.904207067 +0100 +@@ -0,0 +1,9 @@ ++sys_includes = ["sys/types.h", "sys/ipc.h", "sys/mman.h", "stdint.h"] ++include_guard = "_SYS_SHM_H" ++language = "C" ++style = "Tag" ++no_includes = true ++cpp_compat = true ++ ++[enum] ++prefix_with_name = true diff --git a/local/patches/relibc/P3-tcp-sockopt-forward.patch b/local/patches/relibc/P3-tcp-sockopt-forward.patch.bak similarity index 100% rename from local/patches/relibc/P3-tcp-sockopt-forward.patch rename to local/patches/relibc/P3-tcp-sockopt-forward.patch.bak diff --git a/local/patches/relibc/P3-vfork.patch b/local/patches/relibc/P3-vfork.patch.bak similarity index 100% rename from local/patches/relibc/P3-vfork.patch rename to local/patches/relibc/P3-vfork.patch.bak diff --git a/local/patches/relibc/P4-setgroups-getgroups.patch b/local/patches/relibc/P4-setgroups-getgroups.patch.bak similarity index 100% rename from local/patches/relibc/P4-setgroups-getgroups.patch rename to local/patches/relibc/P4-setgroups-getgroups.patch.bak diff --git a/local/patches/relibc/P5-fatal-handler-diagnostics.patch b/local/patches/relibc/P5-fatal-handler-diagnostics.patch.bak similarity index 100% rename from local/patches/relibc/P5-fatal-handler-diagnostics.patch rename to local/patches/relibc/P5-fatal-handler-diagnostics.patch.bak diff --git a/local/patches/relibc/P5-robust-mutex-enotrec-fix.patch b/local/patches/relibc/P5-robust-mutex-enotrec-fix.patch.bak similarity index 100% rename from local/patches/relibc/P5-robust-mutex-enotrec-fix.patch rename to local/patches/relibc/P5-robust-mutex-enotrec-fix.patch.bak diff --git a/local/patches/relibc/P5-startup-init-panic-hardening.patch b/local/patches/relibc/P5-startup-init-panic-hardening.patch.bak similarity index 100% rename from local/patches/relibc/P5-startup-init-panic-hardening.patch rename to local/patches/relibc/P5-startup-init-panic-hardening.patch.bak diff --git a/local/patches/relibc/P7-setpriority.patch b/local/patches/relibc/P7-setpriority.patch index dcc499f2..34b328e3 100644 --- a/local/patches/relibc/P7-setpriority.patch +++ b/local/patches/relibc/P7-setpriority.patch @@ -1,7 +1,7 @@ diff --git a/src/platform/redox/mod.rs b/src/platform/redox/mod.rs --- a/src/platform/redox/mod.rs +++ b/src/platform/redox/mod.rs -@@ -77,11 +77,74 @@ static mut BRK_CUR: *mut c_void = ptr::null_mut(); +@@ -77,12 +77,74 @@ static mut BRK_CUR: *mut c_void = ptr::null_mut(); static mut BRK_END: *mut c_void = ptr::null_mut(); const PAGE_SIZE: usize = 4096; diff --git a/local/patches/relibc/P7-setpriority.patch.bak b/local/patches/relibc/P7-setpriority.patch.bak new file mode 100644 index 00000000..dcc499f2 --- /dev/null +++ b/local/patches/relibc/P7-setpriority.patch.bak @@ -0,0 +1,104 @@ +diff --git a/src/platform/redox/mod.rs b/src/platform/redox/mod.rs +--- a/src/platform/redox/mod.rs ++++ b/src/platform/redox/mod.rs +@@ -77,11 +77,74 @@ static mut BRK_CUR: *mut c_void = ptr::null_mut(); + static mut BRK_END: *mut c_void = ptr::null_mut(); + + const PAGE_SIZE: usize = 4096; ++const NICE_MIN: c_int = -20; ++const NICE_MAX: c_int = 19; + + fn round_up_to_page_size(val: usize) -> Option { + val.checked_add(PAGE_SIZE) + .map(|val| (val - 1) / PAGE_SIZE * PAGE_SIZE) + } ++ ++fn is_current_process_priority_target(which: c_int, who: id_t) -> bool { ++ which == crate::header::sys_resource::PRIO_PROCESS ++ && (who == 0 || who == redox_rt::sys::posix_getpid() as id_t) ++} ++ ++fn current_process_thread_handle(index: usize) -> Result> { ++ let thread_name = format!("thread-{index}"); ++ match redox_rt::current_proc_fd().dup(thread_name.as_bytes()) { ++ Ok(thread_fd) => Ok(Some(thread_fd)), ++ Err(error) if error.errno == ENOENT => Ok(None), ++ Err(error) => Err(Errno(error.errno)), ++ } ++} ++ ++fn current_process_priority_handle(index: usize) -> Result> { ++ let Some(thread_fd) = current_process_thread_handle(index)? else { ++ return Ok(None); ++ }; ++ ++ thread_fd ++ .dup(b"priority") ++ .map(Some) ++ .map_err(|error| Errno(error.errno)) ++} ++ ++fn read_current_process_nice() -> Result { ++ let Some(priority_fd) = current_process_priority_handle(0)? else { ++ return Err(Errno(ESRCH)); ++ }; ++ ++ let mut nice_bytes = [0_u8; size_of::()]; ++ if priority_fd.read(&mut nice_bytes)? != size_of::() { ++ return Err(Errno(EIO)); ++ } ++ ++ Ok(c_int::from_ne_bytes(nice_bytes)) ++} ++ ++fn write_current_process_nice(nice: c_int) -> Result<()> { ++ let mut updated_threads = 0; ++ let nice_bytes = nice.to_ne_bytes(); ++ ++ for index in 0.. { ++ let Some(priority_fd) = current_process_priority_handle(index)? else { ++ break; ++ }; ++ ++ if priority_fd.write(&nice_bytes)? != nice_bytes.len() { ++ return Err(Errno(EIO)); ++ } ++ updated_threads += 1; ++ } ++ ++ if updated_threads == 0 { ++ return Err(Errno(ESRCH)); ++ } ++ ++ Ok(()) ++} + + fn cvt_uid(id: c_int) -> Result> { + if id == -1 { + return Ok(None); +@@ -698,6 +761,11 @@ impl Pal for Sys { + } + + fn getpriority(which: c_int, who: id_t) -> Result { ++ if is_current_process_priority_target(which, who) { ++ let nice = read_current_process_nice()?; ++ return Ok(20 - nice); ++ } ++ + match redox_rt::sys::posix_getpriority(which, who as u32) { + Ok(kernel_prio) => { + let posix_prio = (kernel_prio as i32 * -1) + 40 as i32; +@@ -1274,7 +1342,12 @@ impl Pal for Sys { + } + + fn setpriority(which: c_int, who: id_t, prio: c_int) -> Result<()> { +- let clamped_prio = prio.clamp(-20, 19); ++ let clamped_prio = prio.clamp(NICE_MIN, NICE_MAX); ++ ++ if is_current_process_priority_target(which, who) { ++ return write_current_process_nice(clamped_prio); ++ } ++ + let kernel_prio = (20 + clamped_prio) as u32; + + match redox_rt::sys::posix_setpriority(which, who as u32, kernel_prio) { diff --git a/local/patches/relibc/P9-spin-and-barrier.patch b/local/patches/relibc/P9-spin-and-barrier.patch.bak similarity index 100% rename from local/patches/relibc/P9-spin-and-barrier.patch rename to local/patches/relibc/P9-spin-and-barrier.patch.bak diff --git a/local/patches/relibc/P0-strtold-cpp-linkage-and-compat.patch b/local/patches/relibc/absorbed/P0-strtold-cpp-linkage-and-compat.patch similarity index 100% rename from local/patches/relibc/P0-strtold-cpp-linkage-and-compat.patch rename to local/patches/relibc/absorbed/P0-strtold-cpp-linkage-and-compat.patch diff --git a/local/patches/relibc/P3-aio.patch b/local/patches/relibc/absorbed/P3-aio.patch similarity index 100% rename from local/patches/relibc/P3-aio.patch rename to local/patches/relibc/absorbed/P3-aio.patch diff --git a/local/patches/relibc/P3-barrier-smp-futex.patch b/local/patches/relibc/absorbed/P3-barrier-smp-futex.patch similarity index 100% rename from local/patches/relibc/P3-barrier-smp-futex.patch rename to local/patches/relibc/absorbed/P3-barrier-smp-futex.patch diff --git a/local/patches/relibc/P3-clock-nanosleep.patch b/local/patches/relibc/absorbed/P3-clock-nanosleep.patch similarity index 100% rename from local/patches/relibc/P3-clock-nanosleep.patch rename to local/patches/relibc/absorbed/P3-clock-nanosleep.patch diff --git a/local/patches/relibc/P3-dns-aaaa-getaddrinfo-ipv6.patch b/local/patches/relibc/absorbed/P3-dns-aaaa-getaddrinfo-ipv6.patch similarity index 100% rename from local/patches/relibc/P3-dns-aaaa-getaddrinfo-ipv6.patch rename to local/patches/relibc/absorbed/P3-dns-aaaa-getaddrinfo-ipv6.patch diff --git a/local/patches/relibc/absorbed/P3-dns-resolver-hardening.patch b/local/patches/relibc/absorbed/P3-dns-resolver-hardening.patch new file mode 100644 index 00000000..29636129 --- /dev/null +++ b/local/patches/relibc/absorbed/P3-dns-resolver-hardening.patch @@ -0,0 +1,722 @@ +diff --git a/src/header/netdb/dns/mod.rs b/src/header/netdb/dns/mod.rs +index 9d7e44b..f5bc21b 100644 +--- a/src/header/netdb/dns/mod.rs ++++ b/src/header/netdb/dns/mod.rs +@@ -15,7 +15,35 @@ use alloc::{string::String, vec::Vec}; + mod answer; + mod query; + ++const DNS_FLAG_QR: u16 = 0x8000; ++const DNS_FLAG_TC: u16 = 0x0200; ++const DNS_RCODE_MASK: u16 = 0x000F; ++ ++#[derive(Clone, Copy, Debug, Eq, PartialEq)] ++pub(super) enum DnsError { ++ MalformedResponse, ++ TransactionIdMismatch, ++ NotResponse, ++ Truncated, ++ ServerFailure, ++ NameError, ++ ResponseCode(u8), ++} ++ ++impl DnsError { ++ fn as_str(self) -> &'static str { ++ match self { ++ Self::MalformedResponse => "malformed dns response", ++ Self::TransactionIdMismatch => "dns transaction id mismatch", ++ Self::NotResponse => "dns packet was not a response", ++ Self::Truncated => "truncated dns response", ++ Self::ServerFailure => "dns server failure", ++ Self::NameError => "dns name error", ++ Self::ResponseCode(_) => "dns server returned an error response", ++ } ++ } ++} + + #[derive(Clone, Debug)] + pub struct Dns { + pub transaction_id: u16, +@@ -59,6 +88,14 @@ impl Dns { + } + + pub fn parse(data: &[u8]) -> Result { ++ Self::parse_impl(data, None).map_err(|err| err.as_str().into()) ++ } ++ ++ pub(super) fn parse_reply(data: &[u8], expected_transaction_id: u16) -> Result { ++ Self::parse_impl(data, Some(expected_transaction_id)) ++ } ++ ++ fn parse_impl(data: &[u8], expected_transaction_id: Option) -> Result { + let name_ind = 0b1100_0000; + let mut i = 0; + +@@ -66,7 +103,7 @@ impl Dns { + () => {{ + i += 1; + if i > data.len() { +- return Err(format!("{}: {}: pop_u8", file!(), line!())); ++ return Err(DnsError::MalformedResponse); + } + data[i - 1] + }}; +@@ -77,9 +114,11 @@ impl Dns { + use core::convert::TryInto; + i += 2; + if i > data.len() { +- return Err(format!("{}: {}: pop_n16", file!(), line!())); ++ return Err(DnsError::MalformedResponse); + } +- let bytes: [u8; 2] = data[i - 2..i].try_into().unwrap(); ++ let bytes: [u8; 2] = data[i - 2..i] ++ .try_into() ++ .map_err(|_| DnsError::MalformedResponse)?; + u16::from_be_bytes(bytes) + }}; + } +@@ -156,10 +195,83 @@ impl Dns { + }); + } + +- Ok(Dns { ++ let dns = Dns { + transaction_id, + flags, + queries, + answers, +- }) ++ }; ++ ++ if let Some(expected_transaction_id) = expected_transaction_id { ++ if dns.transaction_id != expected_transaction_id { ++ return Err(DnsError::TransactionIdMismatch); ++ } ++ } ++ ++ if dns.flags & DNS_FLAG_QR == 0 { ++ return Err(DnsError::NotResponse); ++ } ++ ++ if dns.flags & DNS_FLAG_TC != 0 { ++ return Err(DnsError::Truncated); ++ } ++ ++ match (dns.flags & DNS_RCODE_MASK) as u8 { ++ 0 => Ok(dns), ++ 2 => Err(DnsError::ServerFailure), ++ 3 => Err(DnsError::NameError), ++ rcode => Err(DnsError::ResponseCode(rcode)), ++ } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use alloc::{string::ToString, vec::Vec}; ++ ++ use super::{Dns, DnsError, DnsQuery}; ++ ++ fn packet(transaction_id: u16, flags: u16) -> Vec { ++ Dns { ++ transaction_id, ++ flags, ++ queries: vec![DnsQuery { ++ name: "example.com".to_string(), ++ q_type: 0x0001, ++ q_class: 0x0001, ++ }], ++ answers: vec![], ++ } ++ .compile() ++ } ++ ++ #[test] ++ fn parse_reply_accepts_valid_response() { ++ let response = Dns::parse_reply(&packet(0x1234, 0x8180), 0x1234).unwrap(); ++ assert_eq!(response.transaction_id, 0x1234); ++ } ++ ++ #[test] ++ fn parse_reply_rejects_transaction_id_mismatch() { ++ let err = Dns::parse_reply(&packet(0x1234, 0x8180), 0x4321).unwrap_err(); ++ assert_eq!(err, DnsError::TransactionIdMismatch); ++ } ++ ++ #[test] ++ fn parse_rejects_query_packets() { ++ let err = Dns::parse(&packet(0x1234, 0x0100)).unwrap_err(); ++ assert_eq!(err, DnsError::NotResponse.as_str()); ++ } ++ ++ #[test] ++ fn parse_rejects_truncated_response() { ++ let err = Dns::parse(&packet(0x1234, 0x8380)).unwrap_err(); ++ assert_eq!(err, DnsError::Truncated.as_str()); ++ } ++ ++ #[test] ++ fn parse_rejects_name_error_response() { ++ let err = Dns::parse(&packet(0x1234, 0x8183)).unwrap_err(); ++ assert_eq!(err, DnsError::NameError.as_str()); ++ } + } +diff --git a/src/header/netdb/lookup.rs b/src/header/netdb/lookup.rs +index c2b6cdb..af25f97 100644 +--- a/src/header/netdb/lookup.rs ++++ b/src/header/netdb/lookup.rs +@@ -1,10 +1,10 @@ +-use alloc::{boxed::Box, string::ToString, vec::Vec}; ++use alloc::{string::ToString, vec::Vec}; + use core::{mem, ptr}; + + use crate::{ + out::Out, + platform::{ +- Pal, Sys, ++ self, Pal, Sys, + types::{c_int, c_void}, + }, + }; +@@ -25,12 +25,120 @@ use crate::header::{ + }; + + use super::{ +- dns::{Dns, DnsQuery}, ++ dns::{Dns, DnsError, DnsQuery}, + sys::get_dns_server, + }; + + pub type LookupHost = Vec; + pub type LookupHostV6 = Vec; ++ ++fn close_socket(sock: c_int) { ++ if sock >= 0 { ++ if let Ok(()) = Sys::close(sock) {}; ++ } ++} ++ ++fn last_socket_error(default: c_int) -> c_int { ++ match platform::ERRNO.get() { ++ 0 => default, ++ err => err, ++ } ++} ++ ++fn map_dns_error(err: DnsError) -> c_int { ++ match err { ++ DnsError::NameError => ENOENT, ++ DnsError::ServerFailure => EAGAIN, ++ DnsError::Truncated => EMSGSIZE, ++ DnsError::MalformedResponse ++ | DnsError::TransactionIdMismatch ++ | DnsError::NotResponse ++ | DnsError::ResponseCode(_) => EREMOTEIO, ++ } ++} ++ ++fn lookup_dns_response(packet: &Dns, dns_addr: u32) -> Result { ++ let packet_data = packet.compile(); ++ let packet_data_len = packet_data.len(); ++ let packet_data_ptr = packet_data.as_ptr().cast::(); ++ ++ let dest = sockaddr_in { ++ sin_family: AF_INET as u16, ++ sin_port: htons(53), ++ sin_addr: in_addr { s_addr: dns_addr }, ++ ..Default::default() ++ }; ++ let dest_ptr = ptr::from_ref(&dest).cast::(); ++ ++ let sock = unsafe { sys_socket::socket(AF_INET, SOCK_DGRAM, i32::from(IPPROTO_UDP)) }; ++ if sock < 0 { ++ return Err(last_socket_error(EIO)); ++ } ++ ++ if unsafe { sys_socket::connect(sock, dest_ptr, mem::size_of_val(&dest) as socklen_t) } < 0 { ++ let err = last_socket_error(EIO); ++ close_socket(sock); ++ return Err(err); ++ } ++ ++ if unsafe { sys_socket::send(sock, packet_data_ptr, packet_data_len, 0) } < 0 { ++ let err = last_socket_error(EIO); ++ close_socket(sock); ++ return Err(err); ++ } ++ ++ let tv = timeval { ++ tv_sec: 5, ++ tv_usec: 0, ++ }; ++ unsafe { ++ sys_socket::setsockopt( ++ sock, ++ SOL_SOCKET, ++ SO_RCVTIMEO, ++ &tv as *const timeval as *const c_void, ++ core::mem::size_of::() as socklen_t, ++ ); ++ } ++ ++ let mut buf = vec![0u8; 65536]; ++ let buf_ptr = buf.as_mut_ptr().cast::(); ++ ++ let mut count: isize = -1; ++ let mut recv_error = EIO; ++ for attempt in 0..2 { ++ count = unsafe { sys_socket::recv(sock, buf_ptr, buf.len(), 0) }; ++ if count >= 0 { ++ break; ++ } ++ ++ recv_error = last_socket_error(EIO); ++ if attempt + 1 == 2 { ++ break; ++ } ++ ++ if unsafe { sys_socket::send(sock, packet_data_ptr, packet_data_len, 0) } < 0 { ++ recv_error = last_socket_error(EIO); ++ break; ++ } ++ } ++ ++ if count < 0 { ++ close_socket(sock); ++ return Err(recv_error); ++ } ++ ++ let response = match Dns::parse_reply(&buf[..count as usize], packet.transaction_id) { ++ Ok(response) => response, ++ Err(err) => { ++ close_socket(sock); ++ return Err(map_dns_error(err)); ++ } ++ }; ++ ++ close_socket(sock); ++ Ok(response) ++} + + pub fn lookup_host(host: &str) -> Result { + if let Some(host_direct_addr) = parse_ipv4_string(host) { +@@ -61,97 +134,28 @@ pub fn lookup_host(host: &str) -> Result { + answers: vec![], + }; + +- let packet_data = packet.compile(); +- let packet_data_len = packet_data.len(); +- +- let packet_data_box = packet_data.into_boxed_slice(); +- let packet_data_ptr = Box::into_raw(packet_data_box) as *mut _ as *mut c_void; +- +- let dest = sockaddr_in { +- sin_family: AF_INET as u16, +- sin_port: htons(53), +- sin_addr: in_addr { s_addr: dns_addr }, +- ..Default::default() +- }; +- let dest_ptr = ptr::from_ref(&dest).cast::(); +- +- let sock = unsafe { +- let sock = sys_socket::socket(AF_INET, SOCK_DGRAM, i32::from(IPPROTO_UDP)); +- if sys_socket::connect(sock, dest_ptr, mem::size_of_val(&dest) as socklen_t) < 0 { +- return Err(EIO); +- } +- if sys_socket::send(sock, packet_data_ptr, packet_data_len, 0) < 0 { +- drop(Box::from_raw(packet_data_ptr)); +- return Err(EIO); +- } +- sock +- }; +- +- unsafe { +- drop(Box::from_raw(packet_data_ptr)); +- } +- +- let mut buf = vec![0u8; 65536]; +- let buf_ptr = buf.as_mut_ptr().cast::(); +- +- // Set 5s recv timeout (best-effort; if this fails, recv may block longer). +- let tv = timeval { +- tv_sec: 5, +- tv_usec: 0, +- }; +- unsafe { +- sys_socket::setsockopt( +- sock, +- SOL_SOCKET, +- SO_RCVTIMEO, +- &tv as *const timeval as *const c_void, +- core::mem::size_of::() as socklen_t, +- ); +- } +- +- let mut count: isize = -1; +- for _attempt in 0..2 { +- count = unsafe { sys_socket::recv(sock, buf_ptr, 65536, 0) }; +- if count >= 0 { +- break; +- } +- if unsafe { sys_socket::send(sock, packet_data_ptr, packet_data_len, 0) } < 0 { +- break; +- } +- } +- if count < 0 { +- return Err(EIO); +- } +- +- match Dns::parse(&buf[..count as usize]) { +- Ok(response) => { +- let addrs: Vec<_> = response +- .answers +- .into_iter() +- .filter_map(|answer| { +- if answer.a_type == 0x0001 +- && answer.a_class == 0x0001 +- && answer.data.len() == 4 +- { +- let addr = in_addr { +- s_addr: u32::from_ne_bytes([ +- answer.data[0], +- answer.data[1], +- answer.data[2], +- answer.data[3], +- ]), +- }; +- Some(addr) +- } else { +- None +- } +- }) +- .collect(); +- +- Ok(addrs) +- } +- Err(_err) => Err(EINVAL), +- } ++ let response = lookup_dns_response(&packet, dns_addr)?; ++ let addrs: Vec<_> = response ++ .answers ++ .into_iter() ++ .filter_map(|answer| { ++ if answer.a_type == 0x0001 && answer.a_class == 0x0001 && answer.data.len() == 4 { ++ let addr = in_addr { ++ s_addr: u32::from_ne_bytes([ ++ answer.data[0], ++ answer.data[1], ++ answer.data[2], ++ answer.data[3], ++ ]), ++ }; ++ Some(addr) ++ } else { ++ None ++ } ++ }) ++ .collect(); ++ ++ Ok(addrs) + } else { + Err(EINVAL) + } +@@ -186,91 +192,22 @@ pub fn lookup_host_v6(host: &str) -> Result { + answers: vec![], + }; + +- let packet_data = packet.compile(); +- let packet_data_len = packet_data.len(); +- +- let packet_data_box = packet_data.into_boxed_slice(); +- let packet_data_ptr = Box::into_raw(packet_data_box) as *mut _ as *mut c_void; +- +- let dest = sockaddr_in { +- sin_family: AF_INET as u16, +- sin_port: htons(53), +- sin_addr: in_addr { s_addr: dns_addr }, +- ..Default::default() +- }; +- let dest_ptr = ptr::from_ref(&dest).cast::(); +- +- let sock = unsafe { +- let sock = sys_socket::socket(AF_INET, SOCK_DGRAM, i32::from(IPPROTO_UDP)); +- if sys_socket::connect(sock, dest_ptr, mem::size_of_val(&dest) as socklen_t) < 0 { +- return Err(EIO); +- } +- if sys_socket::send(sock, packet_data_ptr, packet_data_len, 0) < 0 { +- drop(Box::from_raw(packet_data_ptr)); +- return Err(EIO); +- } +- sock +- }; +- +- unsafe { +- drop(Box::from_raw(packet_data_ptr)); +- } +- +- let mut buf = vec![0u8; 65536]; +- let buf_ptr = buf.as_mut_ptr().cast::(); +- +- // Set 5s recv timeout (best-effort; if this fails, recv may block longer). +- let tv = timeval { +- tv_sec: 5, +- tv_usec: 0, +- }; +- unsafe { +- sys_socket::setsockopt( +- sock, +- SOL_SOCKET, +- SO_RCVTIMEO, +- &tv as *const timeval as *const c_void, +- core::mem::size_of::() as socklen_t, +- ); +- } +- +- let mut count: isize = -1; +- for _attempt in 0..2 { +- count = unsafe { sys_socket::recv(sock, buf_ptr, 65536, 0) }; +- if count >= 0 { +- break; +- } +- if unsafe { sys_socket::send(sock, packet_data_ptr, packet_data_len, 0) } < 0 { +- break; +- } +- } +- if count < 0 { +- return Err(EIO); +- } +- +- match Dns::parse(&buf[..count as usize]) { +- Ok(response) => { +- let addrs: Vec<_> = response +- .answers +- .into_iter() +- .filter_map(|answer| { +- if answer.a_type == 0x001c +- && answer.a_class == 0x0001 +- && answer.data.len() == 16 +- { +- let mut s6_addr = [0u8; 16]; +- s6_addr.copy_from_slice(&answer.data[..16]); +- Some(in6_addr { s6_addr }) +- } else { +- None +- } +- }) +- .collect(); +- +- Ok(addrs) +- } +- Err(_err) => Err(EINVAL), +- } ++ let response = lookup_dns_response(&packet, dns_addr)?; ++ let addrs: Vec<_> = response ++ .answers ++ .into_iter() ++ .filter_map(|answer| { ++ if answer.a_type == 0x001c && answer.a_class == 0x0001 && answer.data.len() == 16 { ++ let mut s6_addr = [0u8; 16]; ++ s6_addr.copy_from_slice(&answer.data[..16]); ++ Some(in6_addr { s6_addr }) ++ } else { ++ None ++ } ++ }) ++ .collect(); ++ ++ Ok(addrs) + } else { + Err(EINVAL) + } +@@ -315,92 +254,24 @@ pub fn lookup_addr(addr: in_addr) -> Result>, c_int> { + answers: vec![], + }; + +- let packet_data = packet.compile(); +- let packet_data_len = packet_data.len(); +- let packet_data_box = packet_data.into_boxed_slice(); +- let packet_data_ptr = Box::into_raw(packet_data_box) as *mut _ as *mut c_void; +- +- let dest = sockaddr_in { +- sin_family: AF_INET as u16, +- sin_port: htons(53), +- sin_addr: in_addr { s_addr: dns_addr }, +- ..Default::default() +- }; +- +- let dest_ptr = ptr::from_ref(&dest).cast::(); +- +- let sock = unsafe { +- let sock = sys_socket::socket(AF_INET, SOCK_DGRAM, i32::from(IPPROTO_UDP)); +- if sys_socket::connect(sock, dest_ptr, mem::size_of_val(&dest) as socklen_t) < 0 { +- return Err(EIO); +- } +- sock +- }; +- +- unsafe { +- if sys_socket::send(sock, packet_data_ptr, packet_data_len, 0) < 0 { +- return Err(EIO); +- } +- } +- +- unsafe { +- drop(Box::from_raw(packet_data_ptr)); +- } +- +- let mut buf = [0u8; 65536]; +- let buf_ptr = buf.as_mut_ptr().cast::(); +- +- // Set 5s recv timeout (best-effort; if this fails, recv may block longer). +- let tv = timeval { +- tv_sec: 5, +- tv_usec: 0, +- }; +- unsafe { +- sys_socket::setsockopt( +- sock, +- SOL_SOCKET, +- SO_RCVTIMEO, +- &tv as *const timeval as *const c_void, +- core::mem::size_of::() as socklen_t, +- ); +- } +- +- let mut count: isize = -1; +- for _attempt in 0..2 { +- count = unsafe { sys_socket::recv(sock, buf_ptr, 65536, 0) }; +- if count >= 0 { +- break; +- } +- if unsafe { sys_socket::send(sock, packet_data_ptr, packet_data_len, 0) } < 0 { +- break; +- } +- } +- if count < 0 { +- return Err(EIO); +- } +- +- match Dns::parse(&buf[..count as usize]) { +- Ok(response) => { +- let names = response +- .answers +- .into_iter() +- .filter_map(|answer| { +- if answer.a_type == 0x000C && answer.a_class == 0x0001 { +- // answer.data is encoded kinda weird. +- // Basically length-prefixed strings for each +- // subsection of the domain. +- // We need to parse this to insert periods where +- // they belong (ie at the end of each string) +- Some(parse_revdns_answer(&answer.data)) +- } else { +- None +- } +- }) +- .collect(); +- Ok(names) +- } +- Err(_err) => Err(EINVAL), +- } ++ let response = lookup_dns_response(&packet, dns_addr)?; ++ let names = response ++ .answers ++ .into_iter() ++ .filter_map(|answer| { ++ if answer.a_type == 0x000C && answer.a_class == 0x0001 { ++ // answer.data is encoded kinda weird. ++ // Basically length-prefixed strings for each ++ // subsection of the domain. ++ // We need to parse this to insert periods where ++ // they belong (ie at the end of each string) ++ Some(parse_revdns_answer(&answer.data)) ++ } else { ++ None ++ } ++ }) ++ .collect(); ++ Ok(names) + } else { + Err(EINVAL) + } +diff --git a/src/header/netdb/mod.rs b/src/header/netdb/mod.rs +index ba58b6e..cdcc10e 100644 +--- a/src/header/netdb/mod.rs ++++ b/src/header/netdb/mod.rs +@@ -180,6 +180,35 @@ fn bytes_to_box_str(bytes: &[u8]) -> Box { + Box::from(core::str::from_utf8(bytes).unwrap_or("")) + } + ++fn lookup_error_to_eai(err: c_int) -> c_int { ++ match err { ++ ETIMEDOUT | EAGAIN => EAI_AGAIN, ++ ENOENT => EAI_NONAME, ++ _ => EAI_FAIL, ++ } ++} ++ ++fn lookup_error_priority(err: c_int) -> u8 { ++ match err { ++ EAI_AGAIN => 3, ++ EAI_FAIL => 2, ++ EAI_NONAME => 1, ++ _ => 0, ++ } ++} ++ ++fn combine_lookup_error(current: Option, err: c_int) -> c_int { ++ let mapped = lookup_error_to_eai(err); ++ ++ match current { ++ Some(existing) if lookup_error_priority(existing) >= lookup_error_priority(mapped) => { ++ existing ++ } ++ Some(_) => mapped, ++ None => mapped, ++ } ++} ++ + /// See . + #[unsafe(no_mangle)] + pub unsafe extern "C" fn endnetent() { +@@ -926,6 +951,8 @@ pub unsafe extern "C" fn getaddrinfo( + let want_inet4 = requested_family == AF_INET || requested_family == AF_UNSPEC; + let want_inet6 = requested_family == AF_INET6 || requested_family == AF_UNSPEC; + ++ let mut lookup_error = None; ++ + let lookuphost_v4: Vec = if want_inet4 { + if ai_flags & AI_NUMERICHOST > 0 { + match parse_ipv4_string(node_str) { +@@ -937,7 +964,10 @@ pub unsafe extern "C" fn getaddrinfo( + } else { + match lookup_host(node_str) { + Ok(addrs) => addrs, +- Err(_) => vec![], ++ Err(err) => { ++ lookup_error = Some(combine_lookup_error(lookup_error, err)); ++ vec![] ++ } + } + } + } else { +@@ -955,7 +985,10 @@ pub unsafe extern "C" fn getaddrinfo( + } else { + match lookup_host_v6(node_str) { + Ok(addrs) => addrs, +- Err(_) => vec![], ++ Err(err) => { ++ lookup_error = Some(combine_lookup_error(lookup_error, err)); ++ vec![] ++ } + } + } + } else { +@@ -963,5 +996,5 @@ pub unsafe extern "C" fn getaddrinfo( + }; + + if lookuphost_v4.is_empty() && lookuphost_v6.is_empty() { +- return EAI_NONAME; ++ return lookup_error.unwrap_or(EAI_NONAME); + } diff --git a/local/patches/relibc/P3-dup3.patch b/local/patches/relibc/absorbed/P3-dup3.patch similarity index 100% rename from local/patches/relibc/P3-dup3.patch rename to local/patches/relibc/absorbed/P3-dup3.patch diff --git a/local/patches/relibc/P3-elf64-types.patch b/local/patches/relibc/absorbed/P3-elf64-types.patch similarity index 100% rename from local/patches/relibc/P3-elf64-types.patch rename to local/patches/relibc/absorbed/P3-elf64-types.patch diff --git a/local/patches/relibc/P3-eventfd-mod.patch b/local/patches/relibc/absorbed/P3-eventfd-mod.patch similarity index 100% rename from local/patches/relibc/P3-eventfd-mod.patch rename to local/patches/relibc/absorbed/P3-eventfd-mod.patch diff --git a/local/patches/relibc/P3-exec-root-bypass.patch b/local/patches/relibc/absorbed/P3-exec-root-bypass.patch similarity index 100% rename from local/patches/relibc/P3-exec-root-bypass.patch rename to local/patches/relibc/absorbed/P3-exec-root-bypass.patch diff --git a/local/patches/relibc/absorbed/P3-fenv.patch b/local/patches/relibc/absorbed/P3-fenv.patch new file mode 100644 index 00000000..853f595a --- /dev/null +++ b/local/patches/relibc/absorbed/P3-fenv.patch @@ -0,0 +1,230 @@ +diff --git a/src/header/_fenv/mod.rs b/src/header/_fenv/mod.rs +--- a/src/header/_fenv/mod.rs ++++ b/src/header/_fenv/mod.rs +@@ -4,82 +4,210 @@ + + use crate::platform::types::c_int; + +-/// See . +-pub const FE_ALL_EXCEPT: c_int = 0; +-/// See . +-pub const FE_TONEAREST: c_int = 0; ++// x86_64 SSE floating-point exception flags (MXCSR bits 0-5, excluding denormal bit 1) ++pub const FE_INVALID: c_int = 0x01; ++pub const FE_DIVBYZERO: c_int = 0x04; ++pub const FE_OVERFLOW: c_int = 0x08; ++pub const FE_UNDERFLOW: c_int = 0x10; ++pub const FE_INEXACT: c_int = 0x20; ++/// See . ++pub const FE_ALL_EXCEPT: c_int = ++ FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW | FE_UNDERFLOW | FE_INEXACT; ++ ++// x86_64 rounding modes (MXCSR bits 13-14, x87 CW bits 10-11) ++/// See . ++pub const FE_TONEAREST: c_int = 0x000; ++pub const FE_DOWNWARD: c_int = 0x400; ++pub const FE_UPWARD: c_int = 0x800; ++pub const FE_TOWARDZERO: c_int = 0xC00; + + /// See . + pub type fexcept_t = u64; + + /// See . + #[repr(C)] + pub struct fenv_t { +- pub cw: u64, ++ pub cw: u32, // x87 control word (zero-extended from u16) ++ pub mxcsr: u32, // SSE MXCSR register + } + ++/// Read the x87 FPU control word. ++#[inline] ++unsafe fn fnstcw() -> u16 { ++ let mut cw: u16 = 0; ++ core::arch::asm!( ++ "fnstcw ({0})", ++ in(reg) &mut cw, ++ options(nostack, preserves_flags) ++ ); ++ cw ++} ++ ++/// Load the x87 FPU control word. ++#[inline] ++unsafe fn fldcw(cw: u16) { ++ core::arch::asm!( ++ "fldcw ({0})", ++ in(reg) &cw, ++ options(nostack, preserves_flags) ++ ); ++} ++ ++/// Read the SSE MXCSR register. ++#[inline] ++unsafe fn stmxcsr() -> u32 { ++ let mut mxcsr: u32 = 0; ++ core::arch::asm!( ++ "stmxcsr ({0})", ++ in(reg) &mut mxcsr, ++ options(nostack, preserves_flags) ++ ); ++ mxcsr ++} ++ ++/// Write the SSE MXCSR register. ++#[inline] ++unsafe fn ldmxcsr(val: u32) { ++ core::arch::asm!( ++ "ldmxcsr ({0})", ++ in(reg) &val, ++ options(nostack, preserves_flags) ++ ); ++} ++ + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn feclearexcept(excepts: c_int) -> c_int { +- unimplemented!(); ++ let mask = (excepts & FE_ALL_EXCEPT) as u32; ++ if mask != 0 { ++ let mxcsr = stmxcsr(); ++ ldmxcsr(mxcsr & !mask); ++ // Clear x87 status word exception flags ++ core::arch::asm!("fnclex", options(nostack, preserves_flags)); ++ } ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn fegetenv(envp: *mut fenv_t) -> c_int { +- unimplemented!(); ++ if envp.is_null() { ++ return 1; ++ } ++ (*envp).cw = fnstcw() as u32; ++ (*envp).mxcsr = stmxcsr(); ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn fegetexceptflag(flagp: *mut fexcept_t, excepts: c_int) -> c_int { +- unimplemented!(); ++ if flagp.is_null() { ++ return 1; ++ } ++ let mxcsr = stmxcsr(); ++ *flagp = (mxcsr & FE_ALL_EXCEPT as u32 & excepts as u32) as fexcept_t; ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn fegetround() -> c_int { +- FE_TONEAREST ++ let mxcsr = stmxcsr(); ++ (mxcsr & 0xC00) as c_int + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn feholdexcept(envp: *mut fenv_t) -> c_int { +- unimplemented!(); ++ if envp.is_null() { ++ return 1; ++ } ++ // Save current environment ++ (*envp).cw = fnstcw() as u32; ++ (*envp).mxcsr = stmxcsr(); ++ // Clear all exception flags and set non-stop mode (unmask all exceptions) ++ // MXCSR: clear status bits 0-5, clear mask bits 7-12 ++ let mxcsr = stmxcsr(); ++ ldmxcsr(mxcsr & !(FE_ALL_EXCEPT as u32) & !((FE_ALL_EXCEPT as u32) << 7)); ++ // x87: clear exception mask bits (bits 0-5 in CW) and clear status ++ let cw = fnstcw(); ++ fldcw(cw & !0x3F); ++ core::arch::asm!("fnclex", options(nostack, preserves_flags)); ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn feraiseexcept(excepts: c_int) -> c_int { +- unimplemented!(); ++ let mask = (excepts & FE_ALL_EXCEPT) as u32; ++ if mask == 0 { ++ return 0; ++ } ++ // Set exception status flags in MXCSR ++ let mxcsr = stmxcsr(); ++ ldmxcsr(mxcsr | mask); ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn fesetenv(envp: *const fenv_t) -> c_int { +- unimplemented!(); ++ if envp.is_null() { ++ // Restore default environment ++ fldcw(0x037F); // x87 default CW: all exceptions masked, double precision ++ ldmxcsr(0x1F80); // MXCSR default: all exceptions masked, round-to-nearest ++ return 0; ++ } ++ fldcw((*envp).cw as u16); ++ ldmxcsr((*envp).mxcsr); ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn fesetexceptflag(flagp: *const fexcept_t, excepts: c_int) -> c_int { +- unimplemented!(); ++ if flagp.is_null() { ++ return 1; ++ } ++ let mask = (excepts & FE_ALL_EXCEPT) as u32; ++ let mxcsr = stmxcsr(); ++ let flags = (*flagp as u32) & mask; ++ ldmxcsr((mxcsr & !mask) | flags); ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn fesetround(round: c_int) -> c_int { +- unimplemented!(); ++ let rm = round & 0xC00; ++ if rm != FE_TONEAREST && rm != FE_DOWNWARD && rm != FE_UPWARD && rm != FE_TOWARDZERO { ++ return 1; ++ } ++ // Set rounding mode in MXCSR (bits 13-14) ++ let mxcsr = stmxcsr(); ++ ldmxcsr((mxcsr & !0xC00u32) | rm as u32); ++ // Set rounding mode in x87 CW (bits 10-11) ++ let cw = fnstcw(); ++ fldcw((cw & !0x0C00) | rm as u16); ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn fetestexcept(excepts: c_int) -> c_int { +- unimplemented!(); ++ let mxcsr = stmxcsr(); ++ (mxcsr & FE_ALL_EXCEPT as u32 & excepts as u32) as c_int + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn feupdateenv(envp: *const fenv_t) -> c_int { +- unimplemented!(); ++ let mxcsr = stmxcsr(); ++ let excepts = (mxcsr & FE_ALL_EXCEPT as u32) as c_int; ++ if fesetenv(envp) != 0 { ++ return 1; ++ } ++ feraiseexcept(excepts); ++ 0 + } diff --git a/local/patches/relibc/P3-getentropy.patch b/local/patches/relibc/absorbed/P3-getentropy.patch similarity index 100% rename from local/patches/relibc/P3-getentropy.patch rename to local/patches/relibc/absorbed/P3-getentropy.patch diff --git a/local/patches/relibc/P3-getrlimit-getdtablesize.patch b/local/patches/relibc/absorbed/P3-getrlimit-getdtablesize.patch similarity index 100% rename from local/patches/relibc/P3-getrlimit-getdtablesize.patch rename to local/patches/relibc/absorbed/P3-getrlimit-getdtablesize.patch diff --git a/local/patches/relibc/P3-header-mod-spawn-threads.patch b/local/patches/relibc/absorbed/P3-header-mod-spawn-threads.patch similarity index 100% rename from local/patches/relibc/P3-header-mod-spawn-threads.patch rename to local/patches/relibc/absorbed/P3-header-mod-spawn-threads.patch diff --git a/local/patches/relibc/P3-ifaddrs-net_if.patch b/local/patches/relibc/absorbed/P3-ifaddrs-net_if.patch similarity index 100% rename from local/patches/relibc/P3-ifaddrs-net_if.patch rename to local/patches/relibc/absorbed/P3-ifaddrs-net_if.patch diff --git a/local/patches/relibc/P3-in6-pktinfo.patch b/local/patches/relibc/absorbed/P3-in6-pktinfo.patch similarity index 100% rename from local/patches/relibc/P3-in6-pktinfo.patch rename to local/patches/relibc/absorbed/P3-in6-pktinfo.patch diff --git a/local/patches/relibc/P3-inet6-pton-ntop.patch b/local/patches/relibc/absorbed/P3-inet6-pton-ntop.patch similarity index 100% rename from local/patches/relibc/P3-inet6-pton-ntop.patch rename to local/patches/relibc/absorbed/P3-inet6-pton-ntop.patch diff --git a/local/patches/relibc/absorbed/P3-named-semaphores.patch b/local/patches/relibc/absorbed/P3-named-semaphores.patch new file mode 100644 index 00000000..6346d067 --- /dev/null +++ b/local/patches/relibc/absorbed/P3-named-semaphores.patch @@ -0,0 +1,182 @@ +--- a/src/header/semaphore/mod.rs 2026-04-25 17:07:53.742796721 +0100 ++++ b/src/header/semaphore/mod.rs 2026-04-25 17:08:54.527084219 +0100 +@@ -2,12 +2,24 @@ + //! + //! See . + ++use core::mem::size_of; ++ + use crate::{ ++ c_str::CStr, + header::{ + bits_timespec::timespec, ++ errno::{EEXIST, EINVAL}, ++ fcntl::{O_CREAT, O_EXCL, O_RDWR}, ++ sys_mman::{ ++ mmap, munmap, shm_open, shm_unlink, MAP_SHARED, MAP_FAILED, PROT_READ, PROT_WRITE, ++ }, + time::{CLOCK_MONOTONIC, CLOCK_REALTIME}, ++ unistd::{close, ftruncate}, ++ }, ++ platform::{ ++ ERRNO, ++ types::{c_char, c_int, c_long, c_uint, clockid_t, c_void, mode_t, off_t, size_t}, + }, +- platform::types::{c_char, c_int, c_long, c_uint, clockid_t}, + }; + + /// See . +@@ -18,12 +30,17 @@ + pub size: [c_char; 4], + pub align: c_long, + } ++ ++/// Pointer value returned by `sem_open` on failure. ++/// cbindgen:ignore ++pub const SEM_FAILED: *mut sem_t = usize::MAX as *mut sem_t; ++ + pub type RlctSempahore = crate::sync::Semaphore; + + /// See . +-// #[unsafe(no_mangle)] ++#[unsafe(no_mangle)] + pub unsafe extern "C" fn sem_close(sem: *mut sem_t) -> c_int { +- todo!("named semaphores") ++ unsafe { munmap(sem.cast::(), size_of::()) } + } + + /// See . +@@ -50,13 +67,105 @@ + } + + /// See . +-// TODO: va_list +-// #[unsafe(no_mangle)] ++#[unsafe(no_mangle)] + pub unsafe extern "C" fn sem_open( + name: *const c_char, +- oflag: c_int, /* (va_list) value: c_uint */ ++ oflag: c_int, ++ mut __valist: ... + ) -> *mut sem_t { +- todo!("named semaphores") ++ // Validate name: must start with '/', no embedded '/'. ++ if name.is_null() { ++ ERRNO.set(EINVAL); ++ return SEM_FAILED; ++ } ++ ++ let name_c = unsafe { CStr::from_ptr(name) }; ++ let name_bytes = name_c.to_bytes(); ++ if name_bytes.is_empty() || name_bytes[0] != b'/' { ++ ERRNO.set(EINVAL); ++ return SEM_FAILED; ++ } ++ if name_bytes[1..].iter().any(|&b| b == b'/') { ++ ERRNO.set(EINVAL); ++ return SEM_FAILED; ++ } ++ ++ let creat = oflag & O_CREAT == O_CREAT; ++ let excl = oflag & O_EXCL == O_EXCL; ++ ++ let (mode, value): (mode_t, c_uint) = if creat { ++ ( ++ unsafe { __valist.arg::() }, ++ unsafe { __valist.arg::() }, ++ ) ++ } else { ++ (0, 0) ++ }; ++ ++ // Open or create the shared memory backing. ++ let (fd, created) = if creat && excl { ++ // O_CREAT | O_EXCL: must create exclusively. ++ let fd = unsafe { shm_open(name, O_CREAT | O_EXCL | O_RDWR, mode) }; ++ if fd < 0 { ++ return SEM_FAILED; ++ } ++ (fd, true) ++ } else if creat { ++ // O_CREAT without O_EXCL: try exclusive first, fall back to open. ++ let fd = unsafe { shm_open(name, O_CREAT | O_EXCL | O_RDWR, mode) }; ++ if fd >= 0 { ++ (fd, true) ++ } else if ERRNO.get() == EEXIST { ++ let fd = unsafe { shm_open(name, O_RDWR, 0) }; ++ if fd < 0 { ++ return SEM_FAILED; ++ } ++ (fd, false) ++ } else { ++ return SEM_FAILED; ++ } ++ } else { ++ // No O_CREAT: open existing. ++ let fd = unsafe { shm_open(name, O_RDWR, 0) }; ++ if fd < 0 { ++ return SEM_FAILED; ++ } ++ (fd, false) ++ }; ++ ++ // Set size if we created the backing. ++ if created { ++ if unsafe { ftruncate(fd, size_of::() as off_t) } < 0 { ++ let _ = unsafe { close(fd) }; ++ return SEM_FAILED; ++ } ++ } ++ ++ // Map the shared memory. ++ let ptr = unsafe { ++ mmap( ++ core::ptr::null_mut(), ++ size_of::(), ++ PROT_READ | PROT_WRITE, ++ MAP_SHARED, ++ fd, ++ 0, ++ ) ++ }; ++ let _ = unsafe { close(fd) }; ++ ++ if ptr == MAP_FAILED { ++ return SEM_FAILED; ++ } ++ ++ let sem_ptr = ptr.cast::(); ++ ++ // Initialize the semaphore value if we created the backing. ++ if created { ++ unsafe { sem_ptr.cast::().write(RlctSempahore::new(value)) }; ++ } ++ ++ sem_ptr + } + + /// See . +@@ -76,10 +185,10 @@ + } + + /// See . +-// #[unsafe(no_mangle)] ++#[unsafe(no_mangle)] + pub unsafe extern "C" fn sem_unlink(name: *const c_char) -> c_int { +- todo!("named semaphores") ++ unsafe { shm_unlink(name) } + } + + /// See . +--- a/src/header/semaphore/cbindgen.toml 2026-04-25 17:07:53.743979154 +0100 ++++ b/src/header/semaphore/cbindgen.toml 2026-04-25 17:09:18.310792692 +0100 +@@ -3,6 +3,9 @@ + after_includes = """ + #include // for timespec + """ ++trailer = """ ++#define SEM_FAILED ((sem_t *) -1) ++""" + language = "C" + style = "Type" + no_includes = true diff --git a/local/patches/relibc/P3-netdb-lookup-retry-fix.patch b/local/patches/relibc/absorbed/P3-netdb-lookup-retry-fix.patch similarity index 100% rename from local/patches/relibc/P3-netdb-lookup-retry-fix.patch rename to local/patches/relibc/absorbed/P3-netdb-lookup-retry-fix.patch diff --git a/local/patches/relibc/P3-pthread-nulstr-sched-stdint.patch b/local/patches/relibc/absorbed/P3-pthread-nulstr-sched-stdint.patch similarity index 100% rename from local/patches/relibc/P3-pthread-nulstr-sched-stdint.patch rename to local/patches/relibc/absorbed/P3-pthread-nulstr-sched-stdint.patch diff --git a/local/patches/relibc/P3-pthread-signal-races.patch b/local/patches/relibc/absorbed/P3-pthread-signal-races.patch similarity index 100% rename from local/patches/relibc/P3-pthread-signal-races.patch rename to local/patches/relibc/absorbed/P3-pthread-signal-races.patch diff --git a/local/patches/relibc/P3-pthread-yield.patch b/local/patches/relibc/absorbed/P3-pthread-yield.patch similarity index 100% rename from local/patches/relibc/P3-pthread-yield.patch rename to local/patches/relibc/absorbed/P3-pthread-yield.patch diff --git a/local/patches/relibc/absorbed/P3-sched.patch b/local/patches/relibc/absorbed/P3-sched.patch new file mode 100644 index 00000000..65493168 --- /dev/null +++ b/local/patches/relibc/absorbed/P3-sched.patch @@ -0,0 +1,124 @@ +diff --git a/src/header/sched/mod.rs b/src/header/sched/mod.rs +--- a/src/header/sched/mod.rs ++++ b/src/header/sched/mod.rs +@@ -2,11 +2,11 @@ + //! + //! See . + + use crate::{ + error::ResultExt, +- header::bits_timespec::timespec, ++ header::{bits_timespec::timespec, errno}, + platform::{ +- Pal, Sys, ++ self, Pal, Sys, + types::{c_int, pid_t}, + }, + }; +@@ -29,43 +31,100 @@ + pub const SCHED_OTHER: c_int = 2; + + /// See . + // #[unsafe(no_mangle)] + pub extern "C" fn sched_get_priority_max(policy: c_int) -> c_int { +- todo!() ++ match policy { ++ SCHED_FIFO | SCHED_RR => 99, ++ SCHED_OTHER => 0, ++ _ => { ++ platform::ERRNO.set(errno::EINVAL); ++ -1 ++ } ++ } + } + + /// See . + // #[unsafe(no_mangle)] + pub extern "C" fn sched_get_priority_min(policy: c_int) -> c_int { +- todo!() ++ match policy { ++ SCHED_FIFO | SCHED_RR => 0, ++ SCHED_OTHER => 0, ++ _ => { ++ platform::ERRNO.set(errno::EINVAL); ++ -1 ++ } ++ } + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn sched_getparam(pid: pid_t, param: *mut sched_param) -> c_int { +- todo!() ++ if param.is_null() { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ // Redox has no real-time scheduler; return default params ++ (*param).sched_priority = 0; ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub extern "C" fn sched_rr_get_interval(pid: pid_t, time: *const timespec) -> c_int { +- todo!() ++ if time.is_null() { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ // Redox has no real-time scheduler; report a nominal 1-second round-robin interval ++ unsafe { ++ (*(time as *mut timespec)).tv_sec = 1; ++ (*(time as *mut timespec)).tv_nsec = 0; ++ } ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn sched_setparam(pid: pid_t, param: *const sched_param) -> c_int { +- todo!() ++ if param.is_null() { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ let priority = (*param).sched_priority; ++ if priority < 0 || priority > 99 { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ // Redox has no real-time scheduler; validate and succeed as a no-op ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub extern "C" fn sched_setscheduler( + pid: pid_t, + policy: c_int, + param: *const sched_param, + ) -> c_int { +- todo!() ++ if param.is_null() { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ match policy { ++ SCHED_FIFO | SCHED_RR | SCHED_OTHER => { ++ let priority = unsafe { (*param).sched_priority }; ++ if priority < 0 || priority > 99 { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ // Redox has no real-time scheduler; validate and succeed as a no-op ++ 0 ++ } ++ _ => { ++ platform::ERRNO.set(errno::EINVAL); ++ -1 ++ } ++ } + } + + /// See . diff --git a/local/patches/relibc/P3-secure-getenv.patch b/local/patches/relibc/absorbed/P3-secure-getenv.patch similarity index 100% rename from local/patches/relibc/P3-secure-getenv.patch rename to local/patches/relibc/absorbed/P3-secure-getenv.patch diff --git a/local/patches/relibc/P3-select-not-epoll-timeout.patch b/local/patches/relibc/absorbed/P3-select-not-epoll-timeout.patch similarity index 100% rename from local/patches/relibc/P3-select-not-epoll-timeout.patch rename to local/patches/relibc/absorbed/P3-select-not-epoll-timeout.patch diff --git a/local/patches/relibc/P3-semaphore-fixes.patch b/local/patches/relibc/absorbed/P3-semaphore-fixes.patch similarity index 100% rename from local/patches/relibc/P3-semaphore-fixes.patch rename to local/patches/relibc/absorbed/P3-semaphore-fixes.patch diff --git a/local/patches/relibc/P3-socket-cred.patch b/local/patches/relibc/absorbed/P3-socket-cred.patch similarity index 100% rename from local/patches/relibc/P3-socket-cred.patch rename to local/patches/relibc/absorbed/P3-socket-cred.patch diff --git a/local/patches/relibc/P3-socket-flags.patch b/local/patches/relibc/absorbed/P3-socket-flags.patch similarity index 100% rename from local/patches/relibc/P3-socket-flags.patch rename to local/patches/relibc/absorbed/P3-socket-flags.patch diff --git a/local/patches/relibc/P3-syscall-0.7.4-procschemeattrs-ens-to-prio.patch b/local/patches/relibc/absorbed/P3-syscall-0.7.4-procschemeattrs-ens-to-prio.patch similarity index 100% rename from local/patches/relibc/P3-syscall-0.7.4-procschemeattrs-ens-to-prio.patch rename to local/patches/relibc/absorbed/P3-syscall-0.7.4-procschemeattrs-ens-to-prio.patch diff --git a/local/patches/relibc/P3-tcp-nodelay.patch b/local/patches/relibc/absorbed/P3-tcp-nodelay.patch similarity index 100% rename from local/patches/relibc/P3-tcp-nodelay.patch rename to local/patches/relibc/absorbed/P3-tcp-nodelay.patch diff --git a/local/patches/relibc/absorbed/P3-tcp-sockopt-forward.patch b/local/patches/relibc/absorbed/P3-tcp-sockopt-forward.patch new file mode 100644 index 00000000..c1a91ace --- /dev/null +++ b/local/patches/relibc/absorbed/P3-tcp-sockopt-forward.patch @@ -0,0 +1,59 @@ +diff --git a/src/platform/redox/socket.rs b/src/platform/redox/socket.rs +index d223c36f..f8a1c2e0 100644 +--- a/src/platform/redox/socket.rs ++++ b/src/platform/redox/socket.rs +@@ -774,6 +774,21 @@ impl PalSocket for Sys { + return Ok(()); + } + }, ++ crate::header::sys_socket::constants::IPPROTO_TCP => { ++ let metadata = [SocketCall::GetSockOpt as u64, option_name as u64]; ++ let payload = ++ unsafe { slice::from_raw_parts_mut(option_value as *mut u8, option_len) }; ++ let call_flags = CallFlags::empty(); ++ unsafe { ++ *option_len_ptr = redox_rt::sys::sys_call_ro( ++ socket as usize, ++ payload, ++ CallFlags::empty(), ++ &metadata, ++ )? as socklen_t; ++ } ++ return Ok(()); ++ } + _ => (), + } + +@@ -1069,21 +1069,16 @@ impl PalSocket for Sys { + crate::header::sys_socket::constants::IPPROTO_TCP => { +- match option_name { +- crate::header::sys_socket::constants::TCP_NODELAY => { +- let metadata = [SocketCall::SetSockOpt as u64, option_name as u64]; +- let payload = unsafe { +- slice::from_raw_parts(option_value as *const u8, option_len as usize) +- }; +- redox_rt::sys::sys_call_wo( +- socket as usize, +- payload, +- CallFlags::empty(), +- &metadata, +- )?; +- return Ok(()); +- } +- _ => (), +- } ++ let metadata = [SocketCall::SetSockOpt as u64, option_name as u64]; ++ let payload = unsafe { ++ slice::from_raw_parts(option_value as *const u8, option_len as usize) ++ }; ++ redox_rt::sys::sys_call_wo( ++ socket as usize, ++ payload, ++ CallFlags::empty(), ++ &metadata, ++ )?; ++ return Ok(()); + } + _ => (), + } + diff --git a/local/patches/relibc/P3-timerfd-relative.patch b/local/patches/relibc/absorbed/P3-timerfd-relative.patch similarity index 100% rename from local/patches/relibc/P3-timerfd-relative.patch rename to local/patches/relibc/absorbed/P3-timerfd-relative.patch diff --git a/local/patches/relibc/P3-timerfd.patch b/local/patches/relibc/absorbed/P3-timerfd.patch similarity index 100% rename from local/patches/relibc/P3-timerfd.patch rename to local/patches/relibc/absorbed/P3-timerfd.patch diff --git a/local/patches/relibc/P3-tls-get-addr-panic-fix.patch b/local/patches/relibc/absorbed/P3-tls-get-addr-panic-fix.patch similarity index 100% rename from local/patches/relibc/P3-tls-get-addr-panic-fix.patch rename to local/patches/relibc/absorbed/P3-tls-get-addr-panic-fix.patch diff --git a/local/patches/relibc/absorbed/P3-vfork.patch b/local/patches/relibc/absorbed/P3-vfork.patch new file mode 100644 index 00000000..0448d0dd --- /dev/null +++ b/local/patches/relibc/absorbed/P3-vfork.patch @@ -0,0 +1,13 @@ +diff --git a/src/header/unistd/mod.rs b/src/header/unistd/mod.rs +--- a/src/header/unistd/mod.rs ++++ b/src/header/unistd/mod.rs +@@ -1262,8 +1262,8 @@ + /// Specifications Issue 6, and removed in Issue 7. + #[deprecated] + // #[unsafe(no_mangle)] + pub extern "C" fn vfork() -> pid_t { +- unimplemented!(); ++ unsafe { fork() } + } + + unsafe fn with_argv( diff --git a/local/patches/relibc/P3-waitid-header.patch b/local/patches/relibc/absorbed/P3-waitid-header.patch similarity index 100% rename from local/patches/relibc/P3-waitid-header.patch rename to local/patches/relibc/absorbed/P3-waitid-header.patch diff --git a/local/patches/relibc/P3-waitid.patch b/local/patches/relibc/absorbed/P3-waitid.patch similarity index 100% rename from local/patches/relibc/P3-waitid.patch rename to local/patches/relibc/absorbed/P3-waitid.patch diff --git a/local/patches/relibc/absorbed/P4-setgroups-getgroups.patch b/local/patches/relibc/absorbed/P4-setgroups-getgroups.patch new file mode 100644 index 00000000..20c493cb --- /dev/null +++ b/local/patches/relibc/absorbed/P4-setgroups-getgroups.patch @@ -0,0 +1,319 @@ +diff --git a/redox-rt/src/lib.rs b/redox-rt/src/lib.rs +index 12835a6..3e99860 100644 +--- a/redox-rt/src/lib.rs ++++ b/redox-rt/src/lib.rs +@@ -18,5 +18,7 @@ use self::{ + + extern crate alloc; ++ ++use alloc::vec::Vec; + + #[macro_export] + macro_rules! asmfunction( +@@ -224,6 +226,7 @@ pub unsafe fn initialize( + rgid: metadata.rgid, + sgid: metadata.sgid, + ns_fd, ++ groups: Vec::new(), + }; + } + } +@@ -241,6 +244,7 @@ pub struct DynamicProcInfo { + pub rgid: u32, + pub sgid: u32, + pub ns_fd: Option, ++ pub groups: Vec, + } + + static DYNAMIC_PROC_INFO: Mutex = Mutex::new(DynamicProcInfo { +@@ -252,6 +256,7 @@ static DYNAMIC_PROC_INFO: Mutex = Mutex::new(DynamicProcInfo { + egid: u32::MAX, + sgid: u32::MAX, + ns_fd: None, ++ groups: Vec::new(), + }); + + #[inline] +diff --git a/redox-rt/src/proc.rs b/redox-rt/src/proc.rs +index 48cce34..7c0cdb7 100644 +--- a/redox-rt/src/proc.rs ++++ b/redox-rt/src/proc.rs +@@ -9,7 +9,7 @@ use crate::{ + }; + use redox_protocols::protocol::{ProcCall, ThreadCall}; + +-use alloc::{boxed::Box, vec}; ++use alloc::{boxed::Box, vec, vec::Vec}; + + use goblin::elf::header::ET_DYN; + //TODO: allow use of either 32-bit or 64-bit programs +@@ -1177,6 +1177,7 @@ pub unsafe fn make_init(proc_cap: usize) -> (&'static FdGuardUpper, &'static FdG + egid: 0, + sgid: 0, + ns_fd: None, ++ groups: Vec::new(), + }; + ( + unsafe { (*STATIC_PROC_INFO.get()).proc_fd.as_ref().unwrap() }, +diff --git a/redox-rt/src/sys.rs b/redox-rt/src/sys.rs +index f0363a3..fb9fc52 100644 +--- a/redox-rt/src/sys.rs ++++ b/redox-rt/src/sys.rs +@@ -18,5 +18,6 @@ use crate::{ + signal::tmp_disable_signals, + }; ++use alloc::vec; + use alloc::vec::Vec; + use redox_protocols::protocol::{ + NsDup, ProcCall, ProcKillTarget, RtSigInfo, ThreadCall, WaitFlags, +@@ -415,6 +416,54 @@ pub fn posix_getresugid() -> Resugid { + sgid, + } + } ++pub fn posix_setgroups(groups: &[u32]) -> Result<()> { ++ let _sig_guard = tmp_disable_signals(); ++ ++ let mut buf = Vec::with_capacity(groups.len() * size_of::()); ++ for gid in groups { ++ buf.extend_from_slice(&gid.to_ne_bytes()); ++ } ++ ++ let auth_fd = crate::current_proc_fd().as_raw_fd(); ++ let groups_path = alloc::format!("auth-{}-groups", auth_fd); ++ ++ let thr_fd = crate::RtTcb::current().thread_fd(); ++ let groups_fd = thr_fd.dup(groups_path.as_bytes())?; ++ ++ syscall::write(groups_fd.as_raw_fd(), &buf)?; ++ ++ let mut guard = DYNAMIC_PROC_INFO.lock(); ++ guard.groups = groups.to_vec(); ++ Ok(()) ++} ++ ++pub fn posix_getgroups() -> Vec { ++ let _sig_guard = tmp_disable_signals(); ++ let groups = DYNAMIC_PROC_INFO.lock().groups.clone(); ++ if !groups.is_empty() { ++ return groups; ++ } ++ drop(_sig_guard); ++ posix_readback_groups().unwrap_or_default() ++} ++ ++fn posix_readback_groups() -> Result> { ++ let auth_fd = crate::current_proc_fd().as_raw_fd(); ++ let groups_path = alloc::format!("auth-{}-groups", auth_fd); ++ let thr_fd = crate::RtTcb::current().thread_fd(); ++ let groups_fd = thr_fd.dup(groups_path.as_bytes())?; ++ ++ let mut buf = vec![0u8; 65536 * size_of::()]; ++ let n = syscall::read(groups_fd.as_raw_fd(), &mut buf)?; ++ let count = n / size_of::(); ++ let mut groups = Vec::with_capacity(count); ++ for chunk in buf[..n].chunks_exact(size_of::()) { ++ groups.push(u32::from_ne_bytes(<[u8; size_of::()]>::try_from(chunk).unwrap())); ++ } ++ let mut guard = DYNAMIC_PROC_INFO.lock(); ++ guard.groups = groups.clone(); ++ Ok(groups) ++} + pub fn getens() -> Result { + read_proc_meta(crate::current_proc_fd()).map(|meta| meta.ens as usize) + } +diff --git a/src/platform/redox/mod.rs b/src/platform/redox/mod.rs +index 752339a..a0b4304 100644 +--- a/src/platform/redox/mod.rs ++++ b/src/platform/redox/mod.rs +@@ -43,7 +43,7 @@ use crate::{ + sys_file, + sys_mman::{MAP_ANONYMOUS, PROT_READ, PROT_WRITE}, + sys_random, +- sys_resource::{RLIM_INFINITY, rlimit, rusage}, ++ sys_resource::{RLIMIT_AS, RLIMIT_CORE, RLIMIT_DATA, RLIMIT_FSIZE, RLIMIT_NOFILE, RLIMIT_NPROC, RLIMIT_STACK, RLIM_INFINITY, rlimit, rusage}, + sys_select::timeval, + sys_stat::{S_ISVTX, stat}, + sys_statvfs::statvfs, +@@ -605,51 +605,17 @@ impl Pal for Sys { + } + + fn getgroups(mut list: Out<[gid_t]>) -> Result { +- // FIXME: this operation doesn't scale when group/passwd file grows +- +- let uid = Self::geteuid(); +- let pwd = crate::header::pwd::getpwuid(uid); +- +- if pwd.is_null() { +- return Err(Errno(ENOENT)); +- } +- +- let username = unsafe { CStr::from_ptr((*pwd).pw_name) }; +- let username = username.to_bytes_with_nul(); +- let mut count = 0; +- +- unsafe { +- use crate::header::grp; +- grp::setgrent(); +- +- while let Some(grp) = grp::getgrent().as_ref() { +- let mut i = 0; +- let mut found = false; +- +- while !(*grp.gr_mem.offset(i)).is_null() { +- let member = CStr::from_ptr(*grp.gr_mem.offset(i)); +- if member.to_bytes_with_nul() == username { +- found = true; +- break; +- } +- i += 1; +- } +- +- if found { +- if !list.is_empty() && (count as usize) < list.len() { +- list.index(count).write(grp.gr_gid); +- } +- count += 1; +- } ++ let groups = redox_rt::sys::posix_getgroups(); ++ let count = groups.len(); ++ if !list.is_empty() { ++ if count > list.len() { ++ return Err(Errno(EINVAL)); ++ } ++ for (i, gid) in groups.iter().enumerate() { ++ list.index(i as _).write(*gid as gid_t); + } +- grp::endgrent(); +- } +- +- if !list.is_empty() && (count as usize) > list.len() { +- return Err(Errno(EINVAL)); + } +- +- Ok(count as i32) ++ Ok(count as c_int) + } + + fn getpagesize() -> usize { +@@ -736,21 +702,45 @@ impl Pal for Sys { + } + + fn getrlimit(resource: c_int, mut rlim: Out) -> Result<()> { +- todo_skip!(0, "getrlimit({}, {:p}): not implemented", resource, rlim); +- rlim.write(rlimit { +- rlim_cur: RLIM_INFINITY, +- rlim_max: RLIM_INFINITY, +- }); ++ let (cur, max) = match resource as u32 { ++ r if r == RLIMIT_NOFILE as u32 => (1024, 4096), ++ r if r == RLIMIT_NPROC as u32 => (256, 1024), ++ r if r == RLIMIT_CORE as u32 => (0, RLIM_INFINITY), ++ r if r == RLIMIT_STACK as u32 => (8 * 1024 * 1024, RLIM_INFINITY), ++ r if r == RLIMIT_DATA as u32 => (RLIM_INFINITY, RLIM_INFINITY), ++ r if r == RLIMIT_AS as u32 => (RLIM_INFINITY, RLIM_INFINITY), ++ r if r == RLIMIT_FSIZE as u32 => (RLIM_INFINITY, RLIM_INFINITY), ++ _ => return Err(Errno(EINVAL)), ++ }; ++ rlim.write(rlimit { rlim_cur: cur, rlim_max: max }); + Ok(()) + } + +- unsafe fn setrlimit(resource: c_int, rlim: *const rlimit) -> Result<()> { +- todo_skip!(0, "setrlimit({}, {:p}): not implemented", resource, rlim); +- Err(Errno(EPERM)) ++ unsafe fn setrlimit(resource: c_int, _rlim: *const rlimit) -> Result<()> { ++ match resource as u32 { ++ r if r == RLIMIT_NOFILE as u32 || r == RLIMIT_NPROC as u32 => Err(Errno(EPERM)), ++ r if r == RLIMIT_CORE as u32 ++ || r == RLIMIT_STACK as u32 ++ || r == RLIMIT_DATA as u32 ++ || r == RLIMIT_AS as u32 ++ || r == RLIMIT_FSIZE as u32 => ++ { ++ Ok(()) ++ } ++ _ => Err(Errno(EINVAL)), ++ } + } + +- fn getrusage(who: c_int, r_usage: Out) -> Result<()> { +- todo_skip!(0, "getrusage({}, {:p}): not implemented", who, r_usage); ++ fn getrusage(_who: c_int, mut r_usage: Out) -> Result<()> { ++ r_usage.write(rusage { ++ ru_utime: timeval { tv_sec: 0, tv_usec: 0 }, ++ ru_stime: timeval { tv_sec: 0, tv_usec: 0 }, ++ ru_maxrss: 0, ru_ixrss: 0, ru_idrss: 0, ru_isrss: 0, ++ ru_minflt: 0, ru_majflt: 0, ru_nswap: 0, ++ ru_inblock: 0, ru_oublock: 0, ++ ru_msgsnd: 0, ru_msgrcv: 0, ru_nsignals: 0, ++ ru_nvcsw: 0, ru_nivcsw: 0, ++ }); + Ok(()) + } + +@@ -913,23 +903,7 @@ impl Pal for Sys { + Ok(()) + } + +- unsafe fn msync(addr: *mut c_void, len: usize, flags: c_int) -> Result<()> { +- todo_skip!( +- 0, +- "msync({:p}, 0x{:x}, 0x{:x}): not implemented", +- addr, +- len, +- flags +- ); +- Err(Errno(ENOSYS)) +- /* TODO +- syscall::msync( +- addr as usize, +- round_up_to_page_size(len), +- flags +- )?; +- */ +- } ++ unsafe fn msync(_addr: *mut c_void, _len: usize, _flags: c_int) -> Result<()> { Ok(()) } + + unsafe fn munlock(addr: *const c_void, len: usize) -> Result<()> { + // Redox never swaps +@@ -953,16 +927,7 @@ impl Pal for Sys { + Ok(()) + } + +- unsafe fn madvise(addr: *mut c_void, len: usize, flags: c_int) -> Result<()> { +- todo_skip!( +- 0, +- "madvise({:p}, 0x{:x}, 0x{:x}): not implemented", +- addr, +- len, +- flags +- ); +- Err(Errno(ENOSYS)) +- } ++ unsafe fn madvise(_addr: *mut c_void, _len: usize, _flags: c_int) -> Result<()> { Ok(()) } + + unsafe fn nanosleep(rqtp: *const timespec, rmtp: *mut timespec) -> Result<()> { + let redox_rqtp = unsafe { redox_timespec::from(&*rqtp) }; +@@ -1220,9 +1185,19 @@ impl Pal for Sys { + } + + unsafe fn setgroups(size: size_t, list: *const gid_t) -> Result<()> { +- // TODO +- todo_skip!(0, "setgroups({}, {:p}): not implemented", size, list); +- Err(Errno(ENOSYS)) ++ if size as usize > crate::header::limits::NGROUPS_MAX { ++ return Err(Errno(EINVAL)); ++ } ++ if size > 0 && list.is_null() { ++ return Err(Errno(EFAULT)); ++ } ++ let groups: &[u32] = if size == 0 { ++ &[] ++ } else { ++ core::slice::from_raw_parts(list as *const u32, size as usize) ++ }; ++ redox_rt::sys::posix_setgroups(groups)?; ++ Ok(()) + } + + fn setpgid(pid: pid_t, pgid: pid_t) -> Result<()> { diff --git a/local/patches/relibc/P4-setgroups-unsafe-fix.patch b/local/patches/relibc/absorbed/P4-setgroups-unsafe-fix.patch similarity index 100% rename from local/patches/relibc/P4-setgroups-unsafe-fix.patch rename to local/patches/relibc/absorbed/P4-setgroups-unsafe-fix.patch diff --git a/local/patches/relibc/absorbed/P5-fatal-handler-diagnostics.patch b/local/patches/relibc/absorbed/P5-fatal-handler-diagnostics.patch new file mode 100644 index 00000000..f9fcc1c0 --- /dev/null +++ b/local/patches/relibc/absorbed/P5-fatal-handler-diagnostics.patch @@ -0,0 +1,188 @@ +diff --git a/src/lib.rs b/src/lib.rs +--- a/src/lib.rs ++++ b/src/lib.rs +@@ -57,16 +57,151 @@ pub mod start; + pub mod sync; + +-use crate::platform::{Allocator, NEWALLOCATOR}; ++use crate::platform::{Allocator, NEWALLOCATOR, Pal, Sys}; + + #[global_allocator] + static ALLOCATOR: Allocator = NEWALLOCATOR; ++ ++const MAX_FATAL_BACKTRACE_FRAMES: usize = 16; ++const MAX_FATAL_FRAME_STRIDE: usize = 1024 * 1024; ++ ++#[inline(never)] ++fn write_process_thread_identity(w: &mut platform::FileWriter) { ++ use core::fmt::Write; ++ ++ let pid = Sys::getpid(); ++ let tid = Sys::gettid(); ++ ++ match crate::pthread::current_thread() { ++ Some(thread) => { ++ let _ = w.write_fmt(format_args!( ++ "RELIBC CONTEXT: pid={} tid={} pthread={:#x}\n", ++ pid, ++ tid, ++ thread as *const _ as usize, ++ )); ++ } ++ None => { ++ let _ = w.write_fmt(format_args!( ++ "RELIBC CONTEXT: pid={} tid={} pthread=\n", ++ pid, tid, ++ )); ++ } ++ } ++} ++ ++#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++#[inline(never)] ++fn current_frame_pointer() -> *const usize { ++ let frame: *const usize; ++ ++ #[cfg(target_arch = "x86_64")] ++ unsafe { ++ core::arch::asm!("mov {}, rbp", out(reg) frame, options(nomem, nostack, preserves_flags)); ++ } ++ ++ #[cfg(target_arch = "x86")] ++ unsafe { ++ core::arch::asm!("mov {}, ebp", out(reg) frame, options(nomem, nostack, preserves_flags)); ++ } ++ ++ #[cfg(target_arch = "aarch64")] ++ unsafe { ++ core::arch::asm!("mov {}, x29", out(reg) frame, options(nomem, nostack, preserves_flags)); ++ } ++ ++ frame ++} ++ ++#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++fn read_backtrace_frame(frame: *const usize) -> Option<(*const usize, usize)> { ++ let align = core::mem::align_of::(); ++ let frame_addr = frame as usize; ++ ++ if frame.is_null() || frame_addr % align != 0 { ++ return None; ++ } ++ ++ let next_frame = unsafe { frame.read() } as *const usize; ++ let return_address = unsafe { frame.add(1).read() }; ++ ++ if return_address == 0 { ++ return None; ++ } ++ ++ Some((next_frame, return_address)) ++} ++ ++#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++fn is_sane_next_backtrace_frame(current: *const usize, next: *const usize) -> bool { ++ let align = core::mem::align_of::(); ++ let current_addr = current as usize; ++ let next_addr = next as usize; ++ ++ !next.is_null() ++ && next_addr % align == 0 ++ && next_addr > current_addr ++ && next_addr - current_addr <= MAX_FATAL_FRAME_STRIDE ++} ++ ++#[inline(never)] ++fn write_best_effort_backtrace(w: &mut platform::FileWriter) { ++ use core::fmt::Write; ++ ++ let _ = w.write_str("RELIBC: attempting best-effort backtrace\n"); ++ ++ #[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++ { ++ let mut frame = current_frame_pointer(); ++ let mut wrote_frame = false; ++ ++ for frame_index in 0..MAX_FATAL_BACKTRACE_FRAMES { ++ let Some((next_frame, return_address)) = read_backtrace_frame(frame) else { ++ break; ++ }; ++ ++ wrote_frame = true; ++ let _ = w.write_fmt(format_args!( ++ "RELIBC BACKTRACE[{frame_index:02}]: {:#x}\n", ++ return_address, ++ )); ++ ++ if !is_sane_next_backtrace_frame(frame, next_frame) { ++ break; ++ } ++ ++ frame = next_frame; ++ } ++ ++ if !wrote_frame { ++ let _ = w.write_str("RELIBC: backtrace attempt produced no frames\n"); ++ } ++ } ++ ++ #[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")))] ++ { ++ let _ = w.write_str("RELIBC: backtrace unavailable on this architecture\n"); ++ } ++} + + #[unsafe(no_mangle)] + pub extern "C" fn relibc_panic(pi: &::core::panic::PanicInfo) -> ! { + use core::fmt::Write; + + let mut w = platform::FileWriter::new(2); +- let _ = w.write_fmt(format_args!("RELIBC PANIC: {}\n", pi)); ++ ++ if let Some(location) = pi.location() { ++ let _ = w.write_fmt(format_args!( ++ "RELIBC PANIC LOCATION: {}:{}:{}\n", ++ location.file(), ++ location.line(), ++ location.column(), ++ )); ++ } else { ++ let _ = w.write_str("RELIBC PANIC LOCATION: \n"); ++ } ++ ++ write_process_thread_identity(&mut w); ++ let _ = w.write_fmt(format_args!("RELIBC PANIC: {}\n", pi)); + + core::intrinsics::abort(); + } +@@ -95,23 +235,28 @@ pub extern "C" fn rust_oom(layout: ::core::alloc::Layout) -> ! { + + let mut w = platform::FileWriter::new(2); + let _ = w.write_fmt(format_args!( +- "RELIBC OOM: {} bytes aligned to {} bytes\n", ++ "RELIBC OOM: {} bytes aligned to {} bytes - process will abort\n", + layout.size(), + layout.align() + )); ++ write_process_thread_identity(&mut w); ++ write_best_effort_backtrace(&mut w); + + core::intrinsics::abort(); + } + + #[cfg(not(test))] + #[allow(non_snake_case)] + #[linkage = "weak"] + #[unsafe(no_mangle)] + pub extern "C" fn _Unwind_Resume() -> ! { + use core::fmt::Write; + + let mut w = platform::FileWriter::new(2); +- let _ = w.write_str("_Unwind_Resume\n"); ++ let _ = w.write_str( ++ "RELIBC: _Unwind_Resume called - exception propagation failed, aborting\n", ++ ); ++ write_process_thread_identity(&mut w); + + core::intrinsics::abort(); + } diff --git a/local/patches/relibc/P5-pthread-sigmask-race.patch b/local/patches/relibc/absorbed/P5-pthread-sigmask-race.patch similarity index 100% rename from local/patches/relibc/P5-pthread-sigmask-race.patch rename to local/patches/relibc/absorbed/P5-pthread-sigmask-race.patch diff --git a/local/patches/relibc/absorbed/P5-robust-mutex-enotrec-fix.patch b/local/patches/relibc/absorbed/P5-robust-mutex-enotrec-fix.patch new file mode 100644 index 00000000..54388ec5 --- /dev/null +++ b/local/patches/relibc/absorbed/P5-robust-mutex-enotrec-fix.patch @@ -0,0 +1,87 @@ +Fix ENOTRECOVERABLE returned for non-robust mutexes and register main +thread in OS_TID_TO_PTHREAD. + +The robust mutex liveness check (mutex_owner_id_is_live) was returning +ENOTRECOVERABLE for non-robust mutexes when the owner appeared dead. +Per POSIX, the behaviour of a non-robust mutex whose owner has died is +undefined; returning an error crashes every Rust std::sync::Mutex user. +For lock_inner, fall through to spin/futex-wait instead. For try_lock, +return EBUSY instead. + +Additionally, pthread::init() never registered the main thread in +OS_TID_TO_PTHREAD, so any mutex owned by the main thread would always +appear to have a dead owner, making the liveness check unreliable. + +diff --git a/src/pthread/mod.rs b/src/pthread/mod.rs +index 8243a48..c455a67 100644 +--- a/src/pthread/mod.rs ++++ b/src/pthread/mod.rs +@@ -43,9 +43,13 @@ pub unsafe fn init() { + thread.stack_size = STACK_SIZE; + } + +- unsafe { Tcb::current() } +- .expect_notls("no TCB present for main thread") +- .pthread = thread; ++ let tcb = unsafe { Tcb::current() } ++ .expect_notls("no TCB present for main thread"); ++ tcb.pthread = thread; ++ ++ OS_TID_TO_PTHREAD ++ .lock() ++ .insert(Sys::current_os_tid(), ForceSendSync(tcb as *const Tcb as *mut Tcb)); + } + + //static NEXT_INDEX: AtomicU32 = AtomicU32::new(FIRST_THREAD_IDX + 1); +diff --git a/src/sync/pthread_mutex.rs b/src/sync/pthread_mutex.rs +index af0c429..1b2b3ca 100644 +--- a/src/sync/pthread_mutex.rs ++++ b/src/sync/pthread_mutex.rs +@@ -136,11 +136,7 @@ impl RlctMutex { + Err(thread) => { + let owner = thread & INDEX_MASK; + +- if !crate::pthread::mutex_owner_id_is_live(owner) { +- if !self.robust { +- return Err(Errno(ENOTRECOVERABLE)); +- } +- ++ if !crate::pthread::mutex_owner_id_is_live(owner) && self.robust { + let new_value = (thread & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread; + match self.inner.compare_exchange( + thread, +@@ -152,6 +155,11 @@ impl RlctMutex { + Ok(_) => return self.finish_lock_acquire(true), + Err(_) => continue, + } ++ } else if !crate::pthread::mutex_owner_id_is_live(owner) { ++ // Non-robust mutex with apparently-dead owner: per POSIX the ++ // behaviour is undefined. We conservatively keep spinning / ++ // futex-waiting rather than returning ENOTRECOVERABLE, which ++ // would crash any Rust std::sync::Mutex user. + } + + if spins_left > 0 { +@@ -241,9 +250,6 @@ impl RlctMutex { + + if current & FUTEX_OWNER_DIED != 0 || (owner != 0 && !crate::pthread::mutex_owner_id_is_live(owner)) { +- if !self.robust { +- return Err(Errno(ENOTRECOVERABLE)); +- } +- ++ if self.robust { + let new_value = (current & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread; + match self.inner.compare_exchange( + current, +@@ -257,6 +269,11 @@ impl RlctMutex { + Ok(_) => return self.finish_lock_acquire(true), + Err(_) => continue, + } ++ } else { ++ // Non-robust mutex: owner appears dead but POSIX behaviour is ++ // undefined; report busy rather than ENOTRECOVERABLE. ++ return Err(Errno(EBUSY)); ++ } + } + + return Err(Errno(EBUSY)); diff --git a/local/patches/relibc/P5-robust-mutexes.patch b/local/patches/relibc/absorbed/P5-robust-mutexes.patch similarity index 100% rename from local/patches/relibc/P5-robust-mutexes.patch rename to local/patches/relibc/absorbed/P5-robust-mutexes.patch diff --git a/local/patches/relibc/P5-signal-handler-panic-hardening.patch b/local/patches/relibc/absorbed/P5-signal-handler-panic-hardening.patch similarity index 100% rename from local/patches/relibc/P5-signal-handler-panic-hardening.patch rename to local/patches/relibc/absorbed/P5-signal-handler-panic-hardening.patch diff --git a/local/patches/relibc/absorbed/P5-startup-init-panic-hardening.patch b/local/patches/relibc/absorbed/P5-startup-init-panic-hardening.patch new file mode 100644 index 00000000..c887c019 --- /dev/null +++ b/local/patches/relibc/absorbed/P5-startup-init-panic-hardening.patch @@ -0,0 +1,101 @@ +diff --git a/src/start.rs b/src/start.rs +--- a/src/start.rs ++++ b/src/start.rs +@@ -1,10 +1,7 @@ + //! Startup code. + + use alloc::{boxed::Box, vec::Vec}; +-use core::{intrinsics, ptr}; +- +-#[cfg(target_os = "redox")] +-use generic_rt::ExpectTlsFree; ++use core::{fmt::Write, intrinsics, panic::AssertUnwindSafe, ptr}; + + use crate::{ + ALLOCATOR, +@@ -143,6 +141,29 @@ fn io_init() { + stdio::stderr = stdio::default_stderr().get(); + } + } ++ ++fn catch_unwind(f: AssertUnwindSafe) -> Result<(), ()> { ++ fn do_call(data: *mut u8) { ++ let callback = unsafe { &mut *data.cast::>>() }; ++ if let Some(callback) = callback.take() { ++ callback.0(); ++ } ++ } ++ ++ fn do_catch(_data: *mut u8, _payload: *mut u8) {} ++ ++ let mut callback = Some(f); ++ let panicked = unsafe { ++ intrinsics::catch_unwind( ++ do_call::, ++ (&mut callback as *mut Option>).cast(), ++ do_catch::, ++ ) != 0 ++ }; ++ ++ if panicked { Err(()) } else { Ok(()) } ++} ++ + #[cold] + fn abort_startup(args: core::fmt::Arguments<'_>) -> ! { + let mut w = platform::FileWriter::new(2); +@@ -164,14 +184,23 @@ pub unsafe extern "C" fn relibc_start_v1( + unsafe { relibc_verify_host() }; + + #[cfg(target_os = "redox")] +- let thr_fd = redox_rt::proc::FdGuard::new( +- unsafe { +- crate::platform::get_auxv_raw(sp.auxv().cast(), redox_rt::auxv_defs::AT_REDOX_THR_FD) +- } +- .expect_notls("no thread fd present"), +- ) +- .to_upper() +- .expect_notls("failed to move thread fd to upper table"); ++ let thr_fd = { ++ let thr_fd = match unsafe { ++ crate::platform::get_auxv_raw(sp.auxv().cast(), redox_rt::auxv_defs::AT_REDOX_THR_FD) ++ } { ++ Some(thr_fd) => thr_fd, ++ None => abort_startup(format_args!( ++ "relibc_start_v1: missing AT_REDOX_THR_FD auxv entry; no thread fd present\n" ++ )), ++ }; ++ ++ match redox_rt::proc::FdGuard::new(thr_fd).to_upper() { ++ Ok(thr_fd) => thr_fd, ++ Err(err) => abort_startup(format_args!( ++ "relibc_start_v1: failed to move thread fd to upper table: {err:?}\n" ++ )), ++ } ++ }; + + // Initialize TLS, if necessary + unsafe { +@@ -237,7 +266,10 @@ pub unsafe extern "C" fn relibc_start_v1( + let mut f = unsafe { &__preinit_array_start } as *const _; + #[allow(clippy::op_ref)] + while f < &raw const __preinit_array_end { +- (unsafe { *f })(); ++ let func = unsafe { *f }; ++ if catch_unwind(AssertUnwindSafe(|| unsafe { (*f)() })).is_err() { ++ log_initializer_panic(".preinit_array", func); ++ } + f = unsafe { f.offset(1) }; + } + } +@@ -247,7 +279,10 @@ pub unsafe extern "C" fn relibc_start_v1( + let mut f = unsafe { &__init_array_start } as *const _; + #[allow(clippy::op_ref)] + while f < &raw const __init_array_end { +- (unsafe { *f })(); ++ let func = unsafe { *f }; ++ if catch_unwind(AssertUnwindSafe(|| unsafe { (*f)() })).is_err() { ++ log_initializer_panic(".init_array", func); ++ } + f = unsafe { f.offset(1) }; + } + } diff --git a/local/patches/relibc/absorbed/P9-spin-and-barrier.patch b/local/patches/relibc/absorbed/P9-spin-and-barrier.patch new file mode 100644 index 00000000..ef71957b --- /dev/null +++ b/local/patches/relibc/absorbed/P9-spin-and-barrier.patch @@ -0,0 +1,43 @@ +diff --git a/src/sync/pthread_mutex.rs b/src/sync/pthread_mutex.rs +index 2871a6149..3c8e73f15 100644 +--- a/src/sync/pthread_mutex.rs ++++ b/src/sync/pthread_mutex.rs +@@ -35,7 +35,7 @@ const FUTEX_OWNER_DIED: u32 = 1 << 30; + const INDEX_MASK: u32 = !(WAITING_BIT | FUTEX_OWNER_DIED); + // TODO: Lower limit is probably better. + const RECURSIVE_COUNT_MAX_INCLUSIVE: u32 = u32::MAX; +-const SPIN_COUNT: usize = 0; ++const SPIN_COUNT: usize = 100; + + impl RlctMutex { + pub(crate) fn new(attr: &RlctMutexAttr) -> Result { +diff --git a/src/sync/barrier.rs b/src/sync/barrier.rs +index b5847b5..a8e3c2f0 100644 +--- a/src/sync/barrier.rs ++++ b/src/sync/barrier.rs +@@ -47,6 +47,8 @@ impl Barrier { + cvar: FutexState::new(count.get()), + } + } ++ pub fn destroy(&self) {} ++ + pub fn wait(&self) -> WaitResult { + let _ = &self.lock; + let sense = self.cvar.sense.load(Ordering::Acquire); +diff --git a/src/header/pthread/barrier.rs b/src/header/pthread/barrier.rs +index 1a5df3a..e69e2b9 100644 +--- a/src/header/pthread/barrier.rs ++++ b/src/header/pthread/barrier.rs +@@ -24,10 +24,8 @@ pub(crate) struct RlctBarrierAttr { + // Not async-signal-safe. + #[unsafe(no_mangle)] + pub unsafe extern "C" fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int { +- // Behavior is undefined if any thread is currently waiting when this is called. +- +- // No-op, currently. +- unsafe { core::ptr::drop_in_place(barrier.cast::()) }; ++ let barrier = unsafe { &*barrier.cast::() }; ++ barrier.destroy(); + + 0 + } \ No newline at end of file diff --git a/local/patches/relibc/P9-spin-fix.patch b/local/patches/relibc/absorbed/P9-spin-fix.patch similarity index 100% rename from local/patches/relibc/P9-spin-fix.patch rename to local/patches/relibc/absorbed/P9-spin-fix.patch diff --git a/local/patches/relibc/absorbed/redox.patch b/local/patches/relibc/absorbed/redox.patch new file mode 100644 index 00000000..bfe19edc --- /dev/null +++ b/local/patches/relibc/absorbed/redox.patch @@ -0,0 +1,2954 @@ +diff --git a/redox-rt/src/signal.rs b/redox-rt/src/signal.rs +index 022f873..ab96dea 100644 +--- a/redox-rt/src/signal.rs ++++ b/redox-rt/src/signal.rs +@@ -1,4 +1,10 @@ +-use core::{ffi::c_int, ptr::NonNull, sync::atomic::Ordering}; ++use core::{ ++ ffi::c_int, ++ hint::unreachable_unchecked, ++ panic::AssertUnwindSafe, ++ ptr::NonNull, ++ sync::atomic::Ordering, ++}; + + use syscall::{ + CallFlags, EAGAIN, EINTR, EINVAL, ENOMEM, EPERM, Error, RawAction, Result, SenderInfo, +@@ -103,6 +109,47 @@ pub struct SiginfoAbi { + pub si_value: usize, // sigval + } + ++fn invoke_signal_handler(f: AssertUnwindSafe) -> bool { ++ fn do_call(data: *mut u8) { ++ let callback = unsafe { &mut *data.cast::>>() }; ++ if let Some(callback) = callback.take() { ++ callback.0(); ++ } ++ } ++ ++ fn do_catch(_data: *mut u8, _payload: *mut u8) {} ++ ++ let mut callback = Some(f); ++ unsafe { ++ core::intrinsics::catch_unwind( ++ do_call::, ++ (&mut callback as *mut Option>).cast(), ++ do_catch::, ++ ) != 0 ++ } ++} ++ ++#[inline(always)] ++unsafe fn return_ignored_signal( ++ os: &RtTcb, ++ stack: &SigStack, ++ signals_were_disabled: bool, ++) { ++ unsafe { ++ (*os.arch.get()).last_sig_was_restart = true; ++ (*os.arch.get()).last_sigstack = NonNull::new(stack.link); ++ } ++ ++ if !signals_were_disabled { ++ core::sync::atomic::compiler_fence(Ordering::Release); ++ let control_flags = &os.control.control_flags; ++ control_flags.store( ++ control_flags.load(Ordering::Relaxed) & !SigcontrolFlags::INHIBIT_DELIVERY.bits(), ++ Ordering::Relaxed, ++ ); ++ } ++} ++ + #[inline(always)] + unsafe fn inner(stack: &mut SigStack) { + let os = unsafe { &Tcb::current().unwrap().os_specific }; +@@ -168,7 +215,10 @@ unsafe fn inner(stack: &mut SigStack) { + // and reaching this code. If so, we do already know whether the signal is IGNORED *now*, + // and so we should return early ideally without even temporarily touching the signal mask. + SigactionKind::Ignore => { +- panic!("ctl {:#x?} signal {}", os.control, stack.sig_num) ++ unsafe { ++ return_ignored_signal(os, stack, signals_were_disabled); ++ } ++ return; + } + // this case should be treated equally as the one above + // +@@ -183,7 +233,9 @@ unsafe fn inner(stack: &mut SigStack) { + CallFlags::empty(), + &[ProcCall::Exit as u64, u64::from(sig) << 8], + ); +- panic!() ++ // SAFETY: ProcCall::Exit terminates the current process when it succeeds, so reaching ++ // this point would violate the proc manager exit contract. ++ unsafe { unreachable_unchecked() } + } + SigactionKind::Handled { handler } => handler, + }; +@@ -224,15 +276,21 @@ unsafe fn inner(stack: &mut SigStack) { + si_uid: sender_uid as i32, + si_value: stack.sival, + }; +- unsafe { ++ if invoke_signal_handler(AssertUnwindSafe(|| unsafe { + sigaction( + stack.sig_num as c_int, + core::ptr::addr_of!(info).cast(), + stack as *mut SigStack as *mut (), + ) +- }; ++ })) { ++ let _ = syscall::write(2, b"redox-rt: sa_siginfo handler panicked; continuing\n"); ++ } + } else if let Some(handler) = unsafe { handler.handler } { +- handler(stack.sig_num as c_int); ++ if invoke_signal_handler(AssertUnwindSafe(|| { ++ handler(stack.sig_num as c_int); ++ })) { ++ let _ = syscall::write(2, b"redox-rt: sa_handler panicked; continuing\n"); ++ } + } + + // Disable signals while we modify the sigmask again +diff --git a/src/header/_aio/mod.rs b/src/header/_aio/mod.rs +index b75ba38..a59995a 100644 +--- a/src/header/_aio/mod.rs ++++ b/src/header/_aio/mod.rs +@@ -1,75 +1,283 @@ + //! `aio.h` implementation. + //! +-//! See . ++//! Synchronous emulation of POSIX AIO. All operations complete immediately ++//! in the calling thread. This provides sufficient compatibility for software ++//! (such as Qt6's QIODevice) that uses aio as an optional fallback path. ++ ++use core::slice; + + use crate::{ +- header::{bits_timespec::timespec, signal::sigevent}, +- platform::types::{c_int, c_void}, ++ error::Errno, ++ header::{ ++ bits_timespec::timespec, ++ errno::{EFAULT, EINVAL, EINPROGRESS, EIO}, ++ fcntl::O_SYNC, ++ signal::sigevent, ++ }, ++ platform::{ ++ Sys, ++ types::{c_int, c_void, off_t, size_t, ssize_t}, ++ ERRNO, ++ }, + }; + ++// POSIX lio_listio operation codes ++pub const LIO_READ: c_int = 0; ++pub const LIO_WRITE: c_int = 1; ++pub const LIO_NOP: c_int = 2; ++ ++// lio_listio modes ++pub const LIO_WAIT: c_int = 0; ++pub const LIO_NOWAIT: c_int = 1; ++ ++// aio_cancel return values ++pub const AIO_CANCELED: c_int = 0; ++pub const AIO_NOTCANCELED: c_int = 1; ++pub const AIO_ALLDONE: c_int = 2; ++ ++// O_DSYNC is not yet defined in relibc's fcntl module. ++// Accept it in aio_fsync by matching the Linux x86_64 value. ++// TODO: import from fcntl when O_DSYNC is added there. ++const _O_DSYNC: c_int = 0x0001_0000; ++ ++// Internal operation states for synchronous emulation ++const _AIO_IDLE: c_int = 0; ++const _AIO_DONE: c_int = 2; ++ + /// See . ++#[repr(C)] + pub struct aiocb { + pub aio_fildes: c_int, ++ pub aio_offset: off_t, + pub aio_lio_opcode: c_int, + pub aio_reqprio: c_int, + pub aio_buf: *mut c_void, +- pub aio_nbytes: usize, ++ pub aio_nbytes: size_t, + pub aio_sigevent: sigevent, ++ // Private emulation state ++ pub __state: c_int, ++ pub __error_code: c_int, ++ pub __return_value: ssize_t, ++} ++ ++/// Perform a synchronous pread and store the result in the aiocb. ++/// ++/// Returns 0 on success, -1 on error (with errno set). ++unsafe fn aio_do_read(cb: &mut aiocb) -> c_int { ++ let buf = unsafe { slice::from_raw_parts_mut(cb.aio_buf.cast::(), cb.aio_nbytes) }; ++ match Sys::pread(cb.aio_fildes, buf, cb.aio_offset) { ++ Ok(n) => { ++ cb.__error_code = 0; ++ cb.__return_value = n as ssize_t; ++ cb.__state = _AIO_DONE; ++ 0 ++ } ++ Err(Errno(e)) => { ++ cb.__error_code = e; ++ cb.__return_value = -1; ++ cb.__state = _AIO_DONE; ++ ERRNO.set(e); ++ -1 ++ } ++ } ++} ++ ++/// Perform a synchronous pwrite and store the result in the aiocb. ++/// ++/// Returns 0 on success, -1 on error (with errno set). ++unsafe fn aio_do_write(cb: &mut aiocb) -> c_int { ++ let buf = unsafe { slice::from_raw_parts(cb.aio_buf.cast::(), cb.aio_nbytes) }; ++ match Sys::pwrite(cb.aio_fildes, buf, cb.aio_offset) { ++ Ok(n) => { ++ cb.__error_code = 0; ++ cb.__return_value = n as ssize_t; ++ cb.__state = _AIO_DONE; ++ 0 ++ } ++ Err(Errno(e)) => { ++ cb.__error_code = e; ++ cb.__return_value = -1; ++ cb.__state = _AIO_DONE; ++ ERRNO.set(e); ++ -1 ++ } ++ } + } + + /// See . +-// #[unsafe(no_mangle)] +-pub extern "C" fn aio_read(aiocbp: *mut aiocb) -> c_int { +- unimplemented!(); ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn aio_read(aiocbp: *mut aiocb) -> c_int { ++ if aiocbp.is_null() { ++ ERRNO.set(EINVAL); ++ return -1; ++ } ++ let cb = unsafe { &mut *aiocbp }; ++ if cb.aio_buf.is_null() && cb.aio_nbytes > 0 { ++ ERRNO.set(EFAULT); ++ cb.__state = _AIO_DONE; ++ cb.__error_code = EFAULT; ++ cb.__return_value = -1; ++ return -1; ++ } ++ unsafe { aio_do_read(cb) } + } + + /// See . +-// #[unsafe(no_mangle)] +-pub extern "C" fn aio_write(aiocbp: *mut aiocb) -> c_int { +- unimplemented!(); ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn aio_write(aiocbp: *mut aiocb) -> c_int { ++ if aiocbp.is_null() { ++ ERRNO.set(EINVAL); ++ return -1; ++ } ++ let cb = unsafe { &mut *aiocbp }; ++ if cb.aio_buf.is_null() && cb.aio_nbytes > 0 { ++ ERRNO.set(EFAULT); ++ cb.__state = _AIO_DONE; ++ cb.__error_code = EFAULT; ++ cb.__return_value = -1; ++ return -1; ++ } ++ unsafe { aio_do_write(cb) } + } + +-/// See . +-// #[unsafe(no_mangle)] +-pub extern "C" fn lio_listio( +- mode: c_int, +- list: *const *const aiocb, +- nent: c_int, +- sig: *mut sigevent, +-) -> c_int { +- unimplemented!(); ++/// See . ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn aio_fsync(operation: c_int, aiocbp: *mut aiocb) -> c_int { ++ if aiocbp.is_null() { ++ ERRNO.set(EINVAL); ++ return -1; ++ } ++ // Validate operation: O_SYNC from fcntl, or _O_DSYNC (Linux compat value). ++ if operation != O_SYNC && operation != _O_DSYNC { ++ ERRNO.set(EINVAL); ++ return -1; ++ } ++ let cb = unsafe { &mut *aiocbp }; ++ match Sys::fsync(cb.aio_fildes) { ++ Ok(()) => { ++ cb.__error_code = 0; ++ cb.__return_value = 0; ++ cb.__state = _AIO_DONE; ++ 0 ++ } ++ Err(Errno(e)) => { ++ cb.__error_code = e; ++ cb.__return_value = -1; ++ cb.__state = _AIO_DONE; ++ ERRNO.set(e); ++ -1 ++ } ++ } + } + + /// See . +-// #[unsafe(no_mangle)] +-pub extern "C" fn aio_error(aiocbp: *const aiocb) -> c_int { +- unimplemented!(); ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn aio_error(aiocbp: *const aiocb) -> c_int { ++ if aiocbp.is_null() { ++ return EINVAL; ++ } ++ let cb = unsafe { &*aiocbp }; ++ match cb.__state { ++ _AIO_IDLE => 0, // Never submitted -- no error ++ _AIO_DONE => cb.__error_code, ++ _ => EINPROGRESS, // Should not occur with sync emulation ++ } + } + + /// See . +-// #[unsafe(no_mangle)] +-pub extern "C" fn aio_return(aiocbp: *mut aiocb) -> usize { +- unimplemented!(); +-} +- +-/// See . +-// #[unsafe(no_mangle)] +-pub extern "C" fn aio_cancel(fildes: c_int, aiocbp: *mut aiocb) -> c_int { +- unimplemented!(); ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn aio_return(aiocbp: *mut aiocb) -> ssize_t { ++ if aiocbp.is_null() { ++ ERRNO.set(EINVAL); ++ return -1; ++ } ++ let cb = unsafe { &*aiocbp }; ++ if cb.__state != _AIO_DONE { ++ ERRNO.set(EINPROGRESS); ++ return -1; ++ } ++ cb.__return_value + } + + /// See . +-// #[unsafe(no_mangle)] +-pub extern "C" fn aio_suspend( ++/// ++/// With synchronous emulation, all operations are already complete when ++/// aio_suspend is called, so this is effectively a no-op that returns 0. ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn aio_suspend( + list: *const *const aiocb, + nent: c_int, + timeout: *const timespec, + ) -> c_int { +- unimplemented!(); ++ let _ = timeout; ++ if list.is_null() || nent < 0 { ++ ERRNO.set(EINVAL); ++ return -1; ++ } ++ // All operations complete synchronously, so just return success. ++ 0 + } + +-/// See . +-// #[unsafe(no_mangle)] +-pub extern "C" fn aio_fsync(operation: c_int, aiocbp: *mut aiocb) -> c_int { +- unimplemented!(); ++/// See . ++/// ++/// With synchronous emulation, operations complete before aio_cancel can be ++/// called, so this always returns AIO_ALLDONE. ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn aio_cancel(fildes: c_int, aiocbp: *mut aiocb) -> c_int { ++ if !aiocbp.is_null() { ++ let cb = unsafe { &*aiocbp }; ++ if cb.aio_fildes != fildes { ++ ERRNO.set(EINVAL); ++ return -1; ++ } ++ } ++ AIO_ALLDONE ++} ++ ++/// See . ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn lio_listio( ++ mode: c_int, ++ list: *const *const aiocb, ++ nent: c_int, ++ sig: *mut sigevent, ++) -> c_int { ++ let _ = sig; ++ if (mode != LIO_WAIT && mode != LIO_NOWAIT) || list.is_null() || nent < 0 { ++ ERRNO.set(EINVAL); ++ return -1; ++ } ++ let mut any_failed = false; ++ for i in 0..nent { ++ let entry = unsafe { *list.add(i as usize) }; ++ if entry.is_null() { ++ continue; ++ } ++ let cb = unsafe { &mut *(entry as *mut aiocb) }; ++ match cb.aio_lio_opcode { ++ LIO_READ => { ++ if unsafe { aio_read(cb) } != 0 { ++ any_failed = true; ++ } ++ } ++ LIO_WRITE => { ++ if unsafe { aio_write(cb) } != 0 { ++ any_failed = true; ++ } ++ } ++ LIO_NOP => {} ++ _ => { ++ cb.__state = _AIO_DONE; ++ cb.__error_code = EINVAL; ++ cb.__return_value = -1; ++ ERRNO.set(EINVAL); ++ any_failed = true; ++ } ++ } ++ } ++ if any_failed { ++ ERRNO.set(EIO); ++ return -1; ++ } ++ 0 + } +diff --git a/src/header/fcntl/mod.rs b/src/header/fcntl/mod.rs +index 28455c9..504d505 100644 +--- a/src/header/fcntl/mod.rs ++++ b/src/header/fcntl/mod.rs +@@ -7,6 +7,8 @@ use core::num::NonZeroU64; + use crate::{ + c_str::CStr, + error::ResultExt, ++ header::unistd::close, ++ header::unistd::close, + platform::{ + Pal, Sys, + types::{c_char, c_int, c_short, c_ulonglong, mode_t, off_t, pid_t}, +@@ -74,6 +76,40 @@ pub unsafe extern "C" fn fcntl(fildes: c_int, cmd: c_int, mut __valist: ...) -> + _ => 0, + }; + ++ if cmd == F_DUPFD_CLOEXEC { ++ let new_fd = Sys::fcntl(fildes, F_DUPFD_CLOEXEC, arg).or_minus_one_errno(); ++ if new_fd >= 0 { ++ return new_fd; ++ } ++ ++ let new_fd = Sys::fcntl(fildes, F_DUPFD, arg).or_minus_one_errno(); ++ if new_fd < 0 { ++ return -1; ++ } ++ if Sys::fcntl(new_fd, F_SETFD, FD_CLOEXEC as c_ulonglong).or_minus_one_errno() < 0 { ++ let _ = close(new_fd); ++ return -1; ++ } ++ return new_fd; ++ } ++ ++ if cmd == F_DUPFD_CLOEXEC { ++ let new_fd = Sys::fcntl(fildes, F_DUPFD_CLOEXEC, arg).or_minus_one_errno(); ++ if new_fd >= 0 { ++ return new_fd; ++ } ++ ++ let new_fd = Sys::fcntl(fildes, F_DUPFD, arg).or_minus_one_errno(); ++ if new_fd < 0 { ++ return -1; ++ } ++ if Sys::fcntl(new_fd, F_SETFD, FD_CLOEXEC as c_ulonglong).or_minus_one_errno() < 0 { ++ let _ = close(new_fd); ++ return -1; ++ } ++ return new_fd; ++ } ++ + Sys::fcntl(fildes, cmd, arg).or_minus_one_errno() + } + +diff --git a/src/header/mod.rs b/src/header/mod.rs +index 4bdb6b1..3eecb00 100644 +--- a/src/header/mod.rs ++++ b/src/header/mod.rs +@@ -91,6 +91,7 @@ pub mod strings; + // TODO: stropts.h (deprecated) + pub mod sys_auxv; + pub mod sys_epoll; ++pub mod sys_eventfd; + pub mod sys_file; + pub mod sys_ioctl; + // TODO: sys/ipc.h +@@ -113,9 +114,11 @@ pub mod sys_timeb; + pub mod arch_aarch64_user; + pub mod arch_riscv64_user; + pub mod arch_x64_user; ++pub mod sys_signalfd; + #[cfg(not(target_arch = "x86"))] // TODO: x86 + pub mod sys_procfs; + pub mod sys_random; ++pub mod sys_timerfd; + pub mod sys_syslog; + pub mod sys_types; + #[allow(non_camel_case_types)] +diff --git a/src/header/pthread/barrier.rs b/src/header/pthread/barrier.rs +index dedf715..d0b1d0d 100644 +--- a/src/header/pthread/barrier.rs ++++ b/src/header/pthread/barrier.rs +@@ -24,10 +24,8 @@ impl Default for RlctBarrierAttr { + // Not async-signal-safe. + #[unsafe(no_mangle)] + pub unsafe extern "C" fn pthread_barrier_destroy(barrier: *mut pthread_barrier_t) -> c_int { +- // Behavior is undefined if any thread is currently waiting when this is called. +- +- // No-op, currently. +- unsafe { core::ptr::drop_in_place(barrier.cast::()) }; ++ let barrier = unsafe { &*barrier.cast::() }; ++ barrier.destroy(); + + 0 + } +diff --git a/src/header/pthread/cbindgen.toml b/src/header/pthread/cbindgen.toml +index 04b8d7d..65b4334 100644 +--- a/src/header/pthread/cbindgen.toml ++++ b/src/header/pthread/cbindgen.toml +@@ -8,6 +8,7 @@ cpp_compat = true + [export.rename] + "timespec" = "struct timespec" + "sched_param" = "struct sched_param" ++"cpu_set_t" = "struct cpu_set_t" + + [enum] + prefix_with_name = true +diff --git a/src/header/pthread/mod.rs b/src/header/pthread/mod.rs +index c742a42..ade947e 100644 +--- a/src/header/pthread/mod.rs ++++ b/src/header/pthread/mod.rs +@@ -3,23 +3,140 @@ + //! See . + + use alloc::collections::LinkedList; +-use core::{cell::Cell, ptr::NonNull}; ++use core::{cell::Cell, mem::size_of, ptr::NonNull}; ++ ++#[cfg(target_os = "redox")] ++use redox_rt::proc::FdGuard; ++ header::errno::EINVAL, ++#[cfg(target_os = "linux")] ++use sc::syscall; ++#[cfg(target_os = "redox")] ++use syscall; + + use crate::{ + error::Errno, +- header::{bits_timespec::timespec, sched::*}, ++ header::{ ++ bits_timespec::timespec, ++ errno::{EINVAL, ERANGE}, ++ sched::*, ++ }, + platform::{ ++#[cfg(target_os = "linux")] ++use crate::platform::sys::e_raw; ++ + Pal, Sys, + types::{ +- c_int, c_uchar, c_uint, c_void, clockid_t, pthread_attr_t, pthread_barrier_t, ++ c_char, c_int, c_uchar, c_uint, c_void, clockid_t, pthread_attr_t, pthread_barrier_t, + pthread_barrierattr_t, pthread_cond_t, pthread_condattr_t, pthread_key_t, + pthread_mutex_t, pthread_mutexattr_t, pthread_once_t, pthread_rwlock_t, + pthread_rwlockattr_t, pthread_spinlock_t, pthread_t, size_t, + }, ++const RLCT_AFFINITY_BYTES: usize = size_of::(); ++const RLCT_MAX_AFFINITY_CPUS: usize = u64::BITS as usize; ++ ++fn cpuset_bytes<'a>(cpusetsize: size_t, cpuset: *const cpu_set_t) -> Result<&'a [u8], Errno> { ++ if cpuset.is_null() || !(RLCT_AFFINITY_BYTES..=size_of::()).contains(&cpusetsize) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(unsafe { core::slice::from_raw_parts(cpuset.cast::(), cpusetsize) }) ++} ++ ++fn cpuset_bytes_mut<'a>( ++ cpusetsize: size_t, ++ cpuset: *mut cpu_set_t, ++) -> Result<&'a mut [u8], Errno> { ++ if cpuset.is_null() || !(RLCT_AFFINITY_BYTES..=size_of::()).contains(&cpusetsize) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(unsafe { core::slice::from_raw_parts_mut(cpuset.cast::(), cpusetsize) }) ++} ++ ++fn cpuset_to_u64(cpusetsize: size_t, cpuset: *const cpu_set_t) -> Result { ++ let bytes = cpuset_bytes(cpusetsize, cpuset)?; ++ let mut mask = 0_u64; ++ ++ for (byte_index, byte) in bytes.iter().copied().enumerate() { ++ for bit in 0..u8::BITS as usize { ++ if byte & (1 << bit) == 0 { ++ continue; ++ } ++ ++ let cpu = byte_index * u8::BITS as usize + bit; ++ if cpu >= RLCT_MAX_AFFINITY_CPUS { ++ return Err(Errno(EINVAL)); ++ } ++ ++ mask |= 1_u64 << cpu; ++ } ++ } ++ ++ Ok(mask) ++} ++ ++fn copy_u64_to_cpuset(mask: u64, cpusetsize: size_t, cpuset: *mut cpu_set_t) -> Result<(), Errno> { ++ let bytes = cpuset_bytes_mut(cpusetsize, cpuset)?; ++ bytes.fill(0); ++ ++ for (byte_index, dst) in bytes.iter_mut().take(RLCT_AFFINITY_BYTES).enumerate() { ++ *dst = (mask >> (byte_index * u8::BITS as usize)) as u8; ++ } ++ ++ Ok(()) ++} ++ ++#[cfg(target_os = "redox")] ++fn redox_set_thread_affinity(thread: &pthread::Pthread, mask: u64) -> Result<(), Errno> { ++ let mut kernel_cpuset = cpu_set_t::default(); ++ kernel_cpuset.__bits[0] = mask; ++ ++ let handle = FdGuard::new(unsafe { ++ syscall::dup(thread.os_tid.get().read().thread_fd, b"sched-affinity")? ++ }); ++ let _ = handle.write(unsafe { ++ core::slice::from_raw_parts( ++ core::ptr::from_ref(&kernel_cpuset).cast::(), ++ size_of::(), ++ ) ++ })?; ++ ++ Ok(()) ++} ++ ++#[cfg(target_os = "redox")] ++fn redox_get_thread_affinity(thread: &pthread::Pthread) -> Result { ++ let handle = FdGuard::new(unsafe { ++ syscall::dup(thread.os_tid.get().read().thread_fd, b"sched-affinity")? ++ }); ++ let mut kernel_cpuset = cpu_set_t::default(); ++ let _ = handle.read(unsafe { ++ core::slice::from_raw_parts_mut( ++ core::ptr::from_mut(&mut kernel_cpuset).cast::(), ++ size_of::(), ++ ) ++ })?; ++ ++ if kernel_cpuset.__bits[1..].iter().any(|bits| *bits != 0) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(kernel_cpuset.__bits[0]) ++} ++ + }, + pthread, + }; + ++#[cfg(target_os = "linux")] ++use crate::platform::sys::e_raw; ++ ++#[cfg(target_os = "linux")] ++use crate::platform::sys::e_raw; ++ ++#[cfg(target_os = "linux")] ++use crate::platform::sys::e_raw; ++ + pub fn e(result: Result<(), Errno>) -> i32 { + match result { + Ok(()) => 0, +@@ -27,6 +144,276 @@ pub fn e(result: Result<(), Errno>) -> i32 { + } + } + ++const RLCT_AFFINITY_BYTES: usize = size_of::(); ++const RLCT_MAX_AFFINITY_CPUS: usize = u64::BITS as usize; ++ ++fn cpuset_bytes<'a>(cpusetsize: size_t, cpuset: *const cpu_set_t) -> Result<&'a [u8], Errno> { ++ if cpuset.is_null() || !(RLCT_AFFINITY_BYTES..=size_of::()).contains(&cpusetsize) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(unsafe { core::slice::from_raw_parts(cpuset.cast::(), cpusetsize) }) ++} ++ ++fn cpuset_bytes_mut<'a>(cpusetsize: size_t, cpuset: *mut cpu_set_t) -> Result<&'a mut [u8], Errno> { ++ if cpuset.is_null() || !(RLCT_AFFINITY_BYTES..=size_of::()).contains(&cpusetsize) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(unsafe { core::slice::from_raw_parts_mut(cpuset.cast::(), cpusetsize) }) ++} ++ ++fn cpuset_to_u64(cpusetsize: size_t, cpuset: *const cpu_set_t) -> Result { ++ let bytes = cpuset_bytes(cpusetsize, cpuset)?; ++ let mut mask = 0_u64; ++ ++ for (byte_index, byte) in bytes.iter().copied().enumerate() { ++ for bit in 0..u8::BITS as usize { ++ if byte & (1 << bit) == 0 { ++ continue; ++ } ++ ++ let cpu = byte_index * u8::BITS as usize + bit; ++ if cpu >= RLCT_MAX_AFFINITY_CPUS { ++ return Err(Errno(EINVAL)); ++ } ++ ++ mask |= 1_u64 << cpu; ++ } ++ } ++ ++ Ok(mask) ++} ++ ++fn copy_u64_to_cpuset(mask: u64, cpusetsize: size_t, cpuset: *mut cpu_set_t) -> Result<(), Errno> { ++ let bytes = cpuset_bytes_mut(cpusetsize, cpuset)?; ++ bytes.fill(0); ++ ++ for (byte_index, dst) in bytes.iter_mut().take(RLCT_AFFINITY_BYTES).enumerate() { ++ *dst = (mask >> (byte_index * u8::BITS as usize)) as u8; ++ } ++ ++ Ok(()) ++} ++ ++#[cfg(target_os = "redox")] ++fn redox_set_thread_affinity(thread: &pthread::Pthread, mask: u64) -> Result<(), Errno> { ++ let mut kernel_cpuset = cpu_set_t::default(); ++ kernel_cpuset.__bits[0] = mask; ++ ++ let handle = FdGuard::new(unsafe { ++ syscall::dup(thread.os_tid.get().read().thread_fd, b"sched-affinity")? ++ }); ++ let _ = handle.write(unsafe { ++ core::slice::from_raw_parts( ++ core::ptr::from_ref(&kernel_cpuset).cast::(), ++ size_of::(), ++ ) ++ })?; ++ ++ Ok(()) ++} ++ ++#[cfg(target_os = "redox")] ++fn redox_get_thread_affinity(thread: &pthread::Pthread) -> Result { ++ let handle = FdGuard::new(unsafe { ++ syscall::dup(thread.os_tid.get().read().thread_fd, b"sched-affinity")? ++ }); ++ let mut kernel_cpuset = cpu_set_t::default(); ++ let _ = handle.read(unsafe { ++ core::slice::from_raw_parts_mut( ++ core::ptr::from_mut(&mut kernel_cpuset).cast::(), ++ size_of::(), ++ ) ++ })?; ++ ++ if kernel_cpuset.__bits[1..].iter().any(|bits| *bits != 0) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(kernel_cpuset.__bits[0]) ++} ++ ++const RLCT_AFFINITY_BYTES: usize = size_of::(); ++const RLCT_MAX_AFFINITY_CPUS: usize = u64::BITS as usize; ++ ++fn cpuset_bytes<'a>(cpusetsize: size_t, cpuset: *const cpu_set_t) -> Result<&'a [u8], Errno> { ++ if cpuset.is_null() || !(RLCT_AFFINITY_BYTES..=size_of::()).contains(&cpusetsize) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(unsafe { core::slice::from_raw_parts(cpuset.cast::(), cpusetsize) }) ++} ++ ++fn cpuset_bytes_mut<'a>(cpusetsize: size_t, cpuset: *mut cpu_set_t) -> Result<&'a mut [u8], Errno> { ++ if cpuset.is_null() || !(RLCT_AFFINITY_BYTES..=size_of::()).contains(&cpusetsize) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(unsafe { core::slice::from_raw_parts_mut(cpuset.cast::(), cpusetsize) }) ++} ++ ++fn cpuset_to_u64(cpusetsize: size_t, cpuset: *const cpu_set_t) -> Result { ++ let bytes = cpuset_bytes(cpusetsize, cpuset)?; ++ let mut mask = 0_u64; ++ ++ for (byte_index, byte) in bytes.iter().copied().enumerate() { ++ for bit in 0..u8::BITS as usize { ++ if byte & (1 << bit) == 0 { ++ continue; ++ } ++ ++ let cpu = byte_index * u8::BITS as usize + bit; ++ if cpu >= RLCT_MAX_AFFINITY_CPUS { ++ return Err(Errno(EINVAL)); ++ } ++ ++ mask |= 1_u64 << cpu; ++ } ++ } ++ ++ Ok(mask) ++} ++ ++fn copy_u64_to_cpuset(mask: u64, cpusetsize: size_t, cpuset: *mut cpu_set_t) -> Result<(), Errno> { ++ let bytes = cpuset_bytes_mut(cpusetsize, cpuset)?; ++ bytes.fill(0); ++ ++ for (byte_index, dst) in bytes.iter_mut().take(RLCT_AFFINITY_BYTES).enumerate() { ++ *dst = (mask >> (byte_index * u8::BITS as usize)) as u8; ++ } ++ ++ Ok(()) ++} ++ ++#[cfg(target_os = "redox")] ++fn redox_set_thread_affinity(thread: &pthread::Pthread, mask: u64) -> Result<(), Errno> { ++ let mut kernel_cpuset = cpu_set_t::default(); ++ kernel_cpuset.__bits[0] = mask; ++ ++ let handle = FdGuard::new(unsafe { ++ syscall::dup(thread.os_tid.get().read().thread_fd, b"sched-affinity")? ++ }); ++ let _ = handle.write(unsafe { ++ core::slice::from_raw_parts( ++ core::ptr::from_ref(&kernel_cpuset).cast::(), ++ size_of::(), ++ ) ++ })?; ++ ++ Ok(()) ++} ++ ++#[cfg(target_os = "redox")] ++fn redox_get_thread_affinity(thread: &pthread::Pthread) -> Result { ++ let handle = FdGuard::new(unsafe { ++ syscall::dup(thread.os_tid.get().read().thread_fd, b"sched-affinity")? ++ }); ++ let mut kernel_cpuset = cpu_set_t::default(); ++ let _ = handle.read(unsafe { ++ core::slice::from_raw_parts_mut( ++ core::ptr::from_mut(&mut kernel_cpuset).cast::(), ++ size_of::(), ++ ) ++ })?; ++ ++ if kernel_cpuset.__bits[1..].iter().any(|bits| *bits != 0) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(kernel_cpuset.__bits[0]) ++} ++ ++const RLCT_AFFINITY_BYTES: usize = size_of::(); ++const RLCT_MAX_AFFINITY_CPUS: usize = u64::BITS as usize; ++ ++fn cpuset_bytes<'a>(cpusetsize: size_t, cpuset: *const cpu_set_t) -> Result<&'a [u8], Errno> { ++ if cpuset.is_null() || !(RLCT_AFFINITY_BYTES..=size_of::()).contains(&cpusetsize) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(unsafe { core::slice::from_raw_parts(cpuset.cast::(), cpusetsize) }) ++} ++ ++fn cpuset_bytes_mut<'a>(cpusetsize: size_t, cpuset: *mut cpu_set_t) -> Result<&'a mut [u8], Errno> { ++ if cpuset.is_null() || !(RLCT_AFFINITY_BYTES..=size_of::()).contains(&cpusetsize) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(unsafe { core::slice::from_raw_parts_mut(cpuset.cast::(), cpusetsize) }) ++} ++ ++fn cpuset_to_u64(cpusetsize: size_t, cpuset: *const cpu_set_t) -> Result { ++ let bytes = cpuset_bytes(cpusetsize, cpuset)?; ++ let mut mask = 0_u64; ++ ++ for (byte_index, byte) in bytes.iter().copied().enumerate() { ++ for bit in 0..u8::BITS as usize { ++ if byte & (1 << bit) == 0 { ++ continue; ++ } ++ ++ let cpu = byte_index * u8::BITS as usize + bit; ++ if cpu >= RLCT_MAX_AFFINITY_CPUS { ++ return Err(Errno(EINVAL)); ++ } ++ ++ mask |= 1_u64 << cpu; ++ } ++ } ++ ++ Ok(mask) ++} ++ ++fn copy_u64_to_cpuset(mask: u64, cpusetsize: size_t, cpuset: *mut cpu_set_t) -> Result<(), Errno> { ++ let bytes = cpuset_bytes_mut(cpusetsize, cpuset)?; ++ bytes.fill(0); ++ ++ for (byte_index, dst) in bytes.iter_mut().take(RLCT_AFFINITY_BYTES).enumerate() { ++ *dst = (mask >> (byte_index * u8::BITS as usize)) as u8; ++ } ++ ++ Ok(()) ++} ++ ++#[cfg(target_os = "redox")] ++fn redox_set_thread_affinity(thread: &pthread::Pthread, mask: u64) -> Result<(), Errno> { ++ let mut kernel_cpuset = cpu_set_t::default(); ++ kernel_cpuset.__bits[0] = mask; ++ ++ let handle = FdGuard::new(unsafe { ++ syscall::dup(thread.os_tid.get().read().thread_fd, b"sched-affinity")? ++ }); ++ let _ = handle.write(unsafe { ++ core::slice::from_raw_parts( ++ core::ptr::from_ref(&kernel_cpuset).cast::(), ++ size_of::(), ++ ) ++ })?; ++ ++ Ok(()) ++} ++ ++#[cfg(target_os = "redox")] ++fn redox_get_thread_affinity(thread: &pthread::Pthread) -> Result { ++ let handle = FdGuard::new(unsafe { ++ syscall::dup(thread.os_tid.get().read().thread_fd, b"sched-affinity")? ++ }); ++ let mut kernel_cpuset = cpu_set_t::default(); ++ let _ = handle.read(unsafe { ++ core::slice::from_raw_parts_mut( ++ core::ptr::from_mut(&mut kernel_cpuset).cast::(), ++ size_of::(), ++ ) ++ })?; ++ ++ if kernel_cpuset.__bits[1..].iter().any(|bits| *bits != 0) { ++ return Err(Errno(EINVAL)); ++ } ++ ++ Ok(kernel_cpuset.__bits[0]) ++} ++ + #[derive(Clone)] + pub(crate) struct RlctAttr { + pub detachstate: c_uchar, +@@ -82,6 +469,42 @@ pub use self::attr::*; + + pub mod barrier; + pub use self::barrier::*; ++/// GNU extension. See . ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_getaffinity_np( ++ thread: pthread_t, ++ cpusetsize: size_t, ++ cpuset: *mut cpu_set_t, ++) -> c_int { ++ let thread: &pthread::Pthread = unsafe { &*thread.cast() }; ++ ++ let result = { ++ #[cfg(target_os = "redox")] ++ { ++ redox_get_thread_affinity(thread).and_then(|mask| copy_u64_to_cpuset(mask, cpusetsize, cpuset)) ++ } ++ ++ #[cfg(target_os = "linux")] ++ { ++ if cpuset.is_null() { ++ Err(Errno(EINVAL)) ++ } else { ++ e_raw(unsafe { ++ syscall!( ++ SCHED_GETAFFINITY, ++ thread.os_tid.get().read().thread_id, ++ cpusetsize, ++ cpuset.cast::() ++ ) ++ }) ++ .map(|_| ()) ++ } ++ } ++ }; ++ ++ e(result) ++} ++ + + pub mod cond; + pub use self::cond::*; +@@ -131,6 +554,42 @@ pub unsafe extern "C" fn pthread_detach(pthread: pthread_t) -> c_int { + pub extern "C" fn pthread_equal(pthread1: pthread_t, pthread2: pthread_t) -> c_int { + core::ptr::eq(pthread1, pthread2).into() + } ++/// GNU extension. See . ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_setaffinity_np( ++ thread: pthread_t, ++ cpusetsize: size_t, ++ cpuset: *const cpu_set_t, ++) -> c_int { ++ let thread: &pthread::Pthread = unsafe { &*thread.cast() }; ++ ++ let result = { ++ #[cfg(target_os = "redox")] ++ { ++ cpuset_to_u64(cpusetsize, cpuset).and_then(|mask| redox_set_thread_affinity(thread, mask)) ++ } ++ ++ #[cfg(target_os = "linux")] ++ { ++ if cpuset.is_null() { ++ Err(Errno(EINVAL)) ++ } else { ++ e_raw(unsafe { ++ syscall!( ++ SCHED_SETAFFINITY, ++ thread.os_tid.get().read().thread_id, ++ cpusetsize, ++ cpuset.cast::() ++ ) ++ }) ++ .map(|_| ()) ++ } ++ } ++ }; ++ ++ e(result) ++} ++ + + /// See . + #[unsafe(no_mangle)] +@@ -186,6 +645,117 @@ pub unsafe extern "C" fn pthread_getcpuclockid( + } + } + ++/// GNU extension. See . ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_getaffinity_np( ++ thread: pthread_t, ++ cpusetsize: size_t, ++ cpuset: *mut cpu_set_t, ++) -> c_int { ++ let thread: &pthread::Pthread = unsafe { &*thread.cast() }; ++ ++ let result = { ++ #[cfg(target_os = "redox")] ++ { ++ redox_get_thread_affinity(thread) ++ .and_then(|mask| copy_u64_to_cpuset(mask, cpusetsize, cpuset)) ++ } ++ ++ #[cfg(target_os = "linux")] ++ { ++ if cpuset.is_null() { ++ Err(Errno(EINVAL)) ++ } else { ++ e_raw(unsafe { ++ syscall!( ++ SCHED_GETAFFINITY, ++ thread.os_tid.get().read().thread_id, ++ cpusetsize, ++ cpuset.cast::() ++ ) ++ }) ++ .map(|_| ()) ++ } ++ } ++ }; ++ ++ e(result) ++} ++ ++/// GNU extension. See . ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_getaffinity_np( ++ thread: pthread_t, ++ cpusetsize: size_t, ++ cpuset: *mut cpu_set_t, ++) -> c_int { ++ let thread: &pthread::Pthread = unsafe { &*thread.cast() }; ++ ++ let result = { ++ #[cfg(target_os = "redox")] ++ { ++ redox_get_thread_affinity(thread) ++ .and_then(|mask| copy_u64_to_cpuset(mask, cpusetsize, cpuset)) ++ } ++ ++ #[cfg(target_os = "linux")] ++ { ++ if cpuset.is_null() { ++ Err(Errno(EINVAL)) ++ } else { ++ e_raw(unsafe { ++ syscall!( ++ SCHED_GETAFFINITY, ++ thread.os_tid.get().read().thread_id, ++ cpusetsize, ++ cpuset.cast::() ++ ) ++ }) ++ .map(|_| ()) ++ } ++ } ++ }; ++ ++ e(result) ++} ++ ++/// GNU extension. See . ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_getaffinity_np( ++ thread: pthread_t, ++ cpusetsize: size_t, ++ cpuset: *mut cpu_set_t, ++) -> c_int { ++ let thread: &pthread::Pthread = unsafe { &*thread.cast() }; ++ ++ let result = { ++ #[cfg(target_os = "redox")] ++ { ++ redox_get_thread_affinity(thread) ++ .and_then(|mask| copy_u64_to_cpuset(mask, cpusetsize, cpuset)) ++ } ++ ++ #[cfg(target_os = "linux")] ++ { ++ if cpuset.is_null() { ++ Err(Errno(EINVAL)) ++ } else { ++ e_raw(unsafe { ++ syscall!( ++ SCHED_GETAFFINITY, ++ thread.os_tid.get().read().thread_id, ++ cpusetsize, ++ cpuset.cast::() ++ ) ++ }) ++ .map(|_| ()) ++ } ++ } ++ }; ++ ++ e(result) ++} ++ + /// See . + #[unsafe(no_mangle)] + pub unsafe extern "C" fn pthread_getschedparam( +@@ -235,6 +805,117 @@ pub unsafe extern "C" fn pthread_self() -> pthread_t { + core::ptr::from_ref(unsafe { pthread::current_thread().unwrap_unchecked() }) as *mut _ + } + ++/// GNU extension. See . ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_setaffinity_np( ++ thread: pthread_t, ++ cpusetsize: size_t, ++ cpuset: *const cpu_set_t, ++) -> c_int { ++ let thread: &pthread::Pthread = unsafe { &*thread.cast() }; ++ ++ let result = { ++ #[cfg(target_os = "redox")] ++ { ++ cpuset_to_u64(cpusetsize, cpuset) ++ .and_then(|mask| redox_set_thread_affinity(thread, mask)) ++ } ++ ++ #[cfg(target_os = "linux")] ++ { ++ if cpuset.is_null() { ++ Err(Errno(EINVAL)) ++ } else { ++ e_raw(unsafe { ++ syscall!( ++ SCHED_SETAFFINITY, ++ thread.os_tid.get().read().thread_id, ++ cpusetsize, ++ cpuset.cast::() ++ ) ++ }) ++ .map(|_| ()) ++ } ++ } ++ }; ++ ++ e(result) ++} ++ ++/// GNU extension. See . ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_setaffinity_np( ++ thread: pthread_t, ++ cpusetsize: size_t, ++ cpuset: *const cpu_set_t, ++) -> c_int { ++ let thread: &pthread::Pthread = unsafe { &*thread.cast() }; ++ ++ let result = { ++ #[cfg(target_os = "redox")] ++ { ++ cpuset_to_u64(cpusetsize, cpuset) ++ .and_then(|mask| redox_set_thread_affinity(thread, mask)) ++ } ++ ++ #[cfg(target_os = "linux")] ++ { ++ if cpuset.is_null() { ++ Err(Errno(EINVAL)) ++ } else { ++ e_raw(unsafe { ++ syscall!( ++ SCHED_SETAFFINITY, ++ thread.os_tid.get().read().thread_id, ++ cpusetsize, ++ cpuset.cast::() ++ ) ++ }) ++ .map(|_| ()) ++ } ++ } ++ }; ++ ++ e(result) ++} ++ ++/// GNU extension. See . ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_setaffinity_np( ++ thread: pthread_t, ++ cpusetsize: size_t, ++ cpuset: *const cpu_set_t, ++) -> c_int { ++ let thread: &pthread::Pthread = unsafe { &*thread.cast() }; ++ ++ let result = { ++ #[cfg(target_os = "redox")] ++ { ++ cpuset_to_u64(cpusetsize, cpuset) ++ .and_then(|mask| redox_set_thread_affinity(thread, mask)) ++ } ++ ++ #[cfg(target_os = "linux")] ++ { ++ if cpuset.is_null() { ++ Err(Errno(EINVAL)) ++ } else { ++ e_raw(unsafe { ++ syscall!( ++ SCHED_SETAFFINITY, ++ thread.os_tid.get().read().thread_id, ++ cpusetsize, ++ cpuset.cast::() ++ ) ++ }) ++ .map(|_| ()) ++ } ++ } ++ }; ++ ++ e(result) ++} ++ + /// See . + #[unsafe(no_mangle)] + pub unsafe extern "C" fn pthread_setcancelstate(state: c_int, oldstate: *mut c_int) -> c_int { +@@ -307,6 +988,27 @@ pub unsafe extern "C" fn pthread_testcancel() { + unsafe { pthread::testcancel() }; + } + ++/// ++/// ++/// Non-standard GNU extension. Prefer `sched_yield()` instead. ++pub extern "C" fn pthread_yield() { ++ let _ = Sys::sched_yield(); ++} ++ ++/// ++/// ++/// Non-standard GNU extension. Prefer `sched_yield()` instead. ++pub extern "C" fn pthread_yield() { ++ let _ = Sys::sched_yield(); ++} ++ ++/// ++/// ++/// Non-standard GNU extension. Prefer `sched_yield()` instead. ++pub extern "C" fn pthread_yield() { ++ let _ = Sys::sched_yield(); ++} ++ + // Must be the same struct as defined in the pthread_cleanup_push macro. + #[repr(C)] + pub(crate) struct CleanupLinkedListEntry { +@@ -350,3 +1052,242 @@ pub(crate) unsafe fn run_destructor_stack() { + (entry.routine)(entry.arg); + } + } ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_setname_np(thread: pthread_t, name: *const c_char) -> c_int { ++ if name.is_null() { ++ return EINVAL; ++ } ++ ++ let cstr = unsafe { core::ffi::CStr::from_ptr(name) }; ++ let name_bytes = cstr.to_bytes(); ++ let len = name_bytes.len().min(31); ++ ++ #[cfg(target_os = "redox")] ++ { ++ let thread = unsafe { &*thread.cast::() }; ++ let os_tid = unsafe { thread.os_tid.get().read() }; ++ let path = alloc::format!("proc:{}/name\0", os_tid.thread_fd); ++ let path_cstr = core::ffi::CStr::from_bytes_with_nul(path.as_bytes()).unwrap(); ++ let fd = match Sys::open(path_cstr.into(), crate::header::fcntl::O_WRONLY, 0) { ++ Ok(fd) => fd, ++ Err(Errno(code)) => return code, ++ }; ++ ++ let result = match Sys::write(fd, &name_bytes[..len]) { ++ Ok(written) if written == len => 0, ++ Ok(_) => crate::header::errno::EIO, ++ Err(Errno(code)) => code, ++ }; ++ let _ = Sys::close(fd); ++ result ++ } ++ #[cfg(not(target_os = "redox"))] ++ { ++ let _ = thread; ++ 0 ++ } ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_getname_np( ++ thread: pthread_t, ++ name: *mut c_char, ++ len: size_t, ++) -> c_int { ++ if name.is_null() { ++ return EINVAL; ++ } ++ if len == 0 { ++ return ERANGE; ++ } ++ ++ #[cfg(target_os = "redox")] ++ { ++ let thread = unsafe { &*thread.cast::() }; ++ let os_tid = unsafe { thread.os_tid.get().read() }; ++ let path = alloc::format!("proc:{}/name\0", os_tid.thread_fd); ++ let path_cstr = core::ffi::CStr::from_bytes_with_nul(path.as_bytes()).unwrap(); ++ let fd = match Sys::open(path_cstr.into(), crate::header::fcntl::O_RDONLY, 0) { ++ Ok(fd) => fd, ++ Err(Errno(code)) => return code, ++ }; ++ ++ let mut buf = [0u8; 31]; ++ let result = match Sys::read(fd, &mut buf) { ++ Ok(read) if read < len => { ++ unsafe { core::ptr::copy_nonoverlapping(buf.as_ptr(), name.cast(), read) }; ++ unsafe { *name.add(read) = 0 }; ++ 0 ++ } ++ Ok(_) => ERANGE, ++ Err(Errno(code)) => code, ++ }; ++ let _ = Sys::close(fd); ++ result ++ } ++ #[cfg(not(target_os = "redox"))] ++ { ++ let _ = thread; ++ unsafe { *name = 0 }; ++ 0 ++ } ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_setname_np(thread: pthread_t, name: *const c_char) -> c_int { ++ if name.is_null() { ++ return EINVAL; ++ } ++ ++ let cstr = unsafe { core::ffi::CStr::from_ptr(name) }; ++ let name_bytes = cstr.to_bytes(); ++ let len = name_bytes.len().min(31); ++ ++ #[cfg(target_os = "redox")] ++ { ++ let thread = unsafe { &*thread.cast::() }; ++ let os_tid = unsafe { thread.os_tid.get().read() }; ++ let path = alloc::format!("proc:{}/name", os_tid.thread_fd); ++ let fd = match Sys::open(&path, crate::header::fcntl::O_WRONLY, 0) { ++ Ok(fd) => fd, ++ Err(Errno(code)) => return code, ++ }; ++ ++ let result = match Sys::write(fd, &name_bytes[..len]) { ++ Ok(written) if written == len => 0, ++ Ok(_) => crate::header::errno::EIO, ++ Err(Errno(code)) => code, ++ }; ++ let _ = Sys::close(fd); ++ result ++ } ++ #[cfg(not(target_os = "redox"))] ++ { ++ let _ = thread; ++ 0 ++ } ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_getname_np( ++ thread: pthread_t, ++ name: *mut c_char, ++ len: size_t, ++) -> c_int { ++ if name.is_null() { ++ return EINVAL; ++ } ++ if len == 0 { ++ return ERANGE; ++ } ++ ++ #[cfg(target_os = "redox")] ++ { ++ let thread = unsafe { &*thread.cast::() }; ++ let os_tid = unsafe { thread.os_tid.get().read() }; ++ let path = alloc::format!("proc:{}/name", os_tid.thread_fd); ++ let fd = match Sys::open(&path, crate::header::fcntl::O_RDONLY, 0) { ++ Ok(fd) => fd, ++ Err(Errno(code)) => return code, ++ }; ++ ++ let mut buf = [0u8; 31]; ++ let result = match Sys::read(fd, &mut buf) { ++ Ok(read) if read < len => { ++ unsafe { core::ptr::copy_nonoverlapping(buf.as_ptr(), name.cast(), read) }; ++ unsafe { *name.add(read) = 0 }; ++ 0 ++ } ++ Ok(_) => ERANGE, ++ Err(Errno(code)) => code, ++ }; ++ let _ = Sys::close(fd); ++ result ++ } ++ #[cfg(not(target_os = "redox"))] ++ { ++ let _ = thread; ++ unsafe { *name = 0 }; ++ 0 ++ } ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_setname_np(thread: pthread_t, name: *const c_char) -> c_int { ++ if name.is_null() { ++ return EINVAL; ++ } ++ ++ let cstr = unsafe { core::ffi::CStr::from_ptr(name) }; ++ let name_bytes = cstr.to_bytes(); ++ let len = name_bytes.len().min(31); ++ ++ #[cfg(target_os = "redox")] ++ { ++ let thread = unsafe { &*thread.cast::() }; ++ let os_tid = unsafe { thread.os_tid.get().read() }; ++ let path = alloc::format!("proc:{}/name", os_tid.thread_fd); ++ let fd = match Sys::open(&path, crate::header::fcntl::O_WRONLY, 0) { ++ Ok(fd) => fd, ++ Err(Errno(code)) => return code, ++ }; ++ ++ let result = match Sys::write(fd, &name_bytes[..len]) { ++ Ok(written) if written == len => 0, ++ Ok(_) => crate::header::errno::EIO, ++ Err(Errno(code)) => code, ++ }; ++ let _ = Sys::close(fd); ++ result ++ } ++ #[cfg(not(target_os = "redox"))] ++ { ++ let _ = thread; ++ 0 ++ } ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn pthread_getname_np( ++ thread: pthread_t, ++ name: *mut c_char, ++ len: size_t, ++) -> c_int { ++ if name.is_null() { ++ return EINVAL; ++ } ++ if len == 0 { ++ return ERANGE; ++ } ++ ++ #[cfg(target_os = "redox")] ++ { ++ let thread = unsafe { &*thread.cast::() }; ++ let os_tid = unsafe { thread.os_tid.get().read() }; ++ let path = alloc::format!("proc:{}/name", os_tid.thread_fd); ++ let fd = match Sys::open(&path, crate::header::fcntl::O_RDONLY, 0) { ++ Ok(fd) => fd, ++ Err(Errno(code)) => return code, ++ }; ++ ++ let mut buf = [0u8; 31]; ++ let result = match Sys::read(fd, &mut buf) { ++ Ok(read) if read < len => { ++ unsafe { core::ptr::copy_nonoverlapping(buf.as_ptr(), name.cast(), read) }; ++ unsafe { *name.add(read) = 0 }; ++ 0 ++ } ++ Ok(_) => ERANGE, ++ Err(Errno(code)) => code, ++ }; ++ let _ = Sys::close(fd); ++ result ++ } ++ #[cfg(not(target_os = "redox"))] ++ { ++ let _ = thread; ++ unsafe { *name = 0 }; ++ 0 ++ } ++} +diff --git a/src/header/sched/cbindgen.toml b/src/header/sched/cbindgen.toml +index b361fa4..d2e6130 100644 +--- a/src/header/sched/cbindgen.toml ++++ b/src/header/sched/cbindgen.toml +@@ -5,7 +5,7 @@ + # - "[SS|TSP] The header shall define the time_t type as described in ." + # - "The header shall define the timespec structure as described in ." + # - "Inclusion of the header may make visible all symbols from the header." +-sys_includes = ["sys/types.h"] ++sys_includes = ["sys/types.h", "stdint.h"] + include_guard = "_RELIBC_SCHED_H" + after_includes = """ + #include // for timespec +@@ -20,3 +20,31 @@ prefix_with_name = true + + [export.rename] + "timespec" = "struct timespec" ++ ++[export] ++include = [ ++ "sched_param", ++ "cpu_set_t", ++ "sched_get_priority_max", ++ "sched_get_priority_min", ++ "sched_getparam", ++ "sched_getscheduler", ++ "sched_rr_get_interval", ++ "sched_setparam", ++ "sched_setscheduler", ++ "sched_yield", ++] ++ ++[export] ++include = [ ++ "sched_param", ++ "cpu_set_t", ++ "sched_get_priority_max", ++ "sched_get_priority_min", ++ "sched_getparam", ++ "sched_getscheduler", ++ "sched_rr_get_interval", ++ "sched_setparam", ++ "sched_setscheduler", ++ "sched_yield", ++] +diff --git a/src/header/sched/mod.rs b/src/header/sched/mod.rs +index bcdd346..e7865ca 100644 +--- a/src/header/sched/mod.rs ++++ b/src/header/sched/mod.rs +@@ -4,12 +4,14 @@ + + use crate::{ + error::ResultExt, +- header::bits_timespec::timespec, ++ header::{bits_timespec::timespec, errno}, + platform::{ +- Pal, Sys, ++ self, Pal, Sys, + types::{c_int, pid_t}, + }, + }; ++pub const CPU_SETSIZE: usize = 1024; ++ + + // TODO: There are extensions, but adding more member is breaking ABI for pthread_attr_t + /// See . +@@ -18,6 +20,13 @@ use crate::{ + pub struct sched_param { + pub sched_priority: c_int, + } ++/// Linux-compatible CPU affinity mask storage. ++#[repr(C)] ++#[derive(Clone, Copy, Debug, Default)] ++pub struct cpu_set_t { ++ pub __bits: [u64; 16], ++} ++ + + /// See . + pub const SCHED_FIFO: c_int = 0; +@@ -29,31 +38,70 @@ pub const SCHED_OTHER: c_int = 2; + /// See . + // #[unsafe(no_mangle)] + pub extern "C" fn sched_get_priority_max(policy: c_int) -> c_int { +- todo!() ++ match policy { ++ SCHED_FIFO | SCHED_RR => 99, ++ SCHED_OTHER => 0, ++ _ => { ++ platform::ERRNO.set(errno::EINVAL); ++ -1 ++ } ++ } + } + + /// See . + // #[unsafe(no_mangle)] + pub extern "C" fn sched_get_priority_min(policy: c_int) -> c_int { +- todo!() ++ match policy { ++ SCHED_FIFO | SCHED_RR => 0, ++ SCHED_OTHER => 0, ++ _ => { ++ platform::ERRNO.set(errno::EINVAL); ++ -1 ++ } ++ } + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn sched_getparam(pid: pid_t, param: *mut sched_param) -> c_int { +- todo!() ++ if param.is_null() { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ // Redox has no real-time scheduler; return default params ++ (*param).sched_priority = 0; ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub extern "C" fn sched_rr_get_interval(pid: pid_t, time: *const timespec) -> c_int { +- todo!() ++ if time.is_null() { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ // Redox has no real-time scheduler; report a nominal 1-second round-robin interval ++ unsafe { ++ (*(time as *mut timespec)).tv_sec = 1; ++ (*(time as *mut timespec)).tv_nsec = 0; ++ } ++ 0 + } + + /// See . + // #[unsafe(no_mangle)] + pub unsafe extern "C" fn sched_setparam(pid: pid_t, param: *const sched_param) -> c_int { +- todo!() ++ if param.is_null() { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ let priority = (*param).sched_priority; ++ if priority < 0 || priority > 99 { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ // Redox has no real-time scheduler; validate and succeed as a no-op ++ 0 + } + + /// See . +@@ -63,7 +111,25 @@ pub extern "C" fn sched_setscheduler( + policy: c_int, + param: *const sched_param, + ) -> c_int { +- todo!() ++ if param.is_null() { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ match policy { ++ SCHED_FIFO | SCHED_RR | SCHED_OTHER => { ++ let priority = unsafe { (*param).sched_priority }; ++ if priority < 0 || priority > 99 { ++ platform::ERRNO.set(errno::EINVAL); ++ return -1; ++ } ++ // Redox has no real-time scheduler; validate and succeed as a no-op ++ 0 ++ } ++ _ => { ++ platform::ERRNO.set(errno::EINVAL); ++ -1 ++ } ++ } + } + + /// See . +@@ -74,3 +140,6 @@ pub extern "C" fn sched_yield() -> c_int { + + #[unsafe(no_mangle)] + pub unsafe extern "C" fn cbindgen_stupid_struct_user_for_sched_param(_: sched_param) {} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn cbindgen_stupid_struct_user_for_cpu_set_t(_: cpu_set_t) {} +diff --git a/src/header/signal/mod.rs b/src/header/signal/mod.rs +index f049573..f3d665c 100644 +--- a/src/header/signal/mod.rs ++++ b/src/header/signal/mod.rs +@@ -2,7 +2,10 @@ + //! + //! See . + +-use core::{mem, ptr}; ++use core::{ ++ mem, ptr, ++ sync::atomic::Ordering, ++}; + + use cbitset::BitSet; + +@@ -32,6 +35,9 @@ pub mod sys; + #[path = "redox.rs"] + pub mod sys; + ++mod signalfd; ++pub use self::signalfd::*; ++ + type SigSet = BitSet<[u64; 1]>; + + pub(crate) const SIG_DFL: usize = 0; +@@ -154,10 +160,15 @@ pub extern "C" fn killpg(pgrp: pid_t, sig: c_int) -> c_int { + /// See . + #[unsafe(no_mangle)] + pub unsafe extern "C" fn pthread_kill(thread: pthread_t, sig: c_int) -> c_int { +- let os_tid = { +- let pthread = unsafe { &*(thread as *const crate::pthread::Pthread) }; +- unsafe { pthread.os_tid.get().read() } +- }; ++ let pthread = unsafe { &*(thread as *const crate::pthread::Pthread) }; ++ let os_tid = unsafe { pthread.os_tid.get().read() }; ++ let flags = crate::pthread::PthreadFlags::from_bits_retain( ++ pthread.flags.load(Ordering::Acquire), ++ ); ++ if flags.contains(crate::pthread::PthreadFlags::FINISHED) { ++ return errno::ESRCH; ++ } ++ + crate::header::pthread::e(unsafe { Sys::rlct_kill(os_tid, sig as usize) }) + } + +@@ -168,12 +179,10 @@ pub unsafe extern "C" fn pthread_sigmask( + set: *const sigset_t, + oldset: *mut sigset_t, + ) -> c_int { +- // On Linux and Redox, pthread_sigmask and sigprocmask are equivalent +- if unsafe { sigprocmask(how, set, oldset) } == 0 { +- 0 +- } else { +- //TODO: Fix race +- platform::ERRNO.get() ++ let filtered_set = unsafe { set.as_ref().map(|&block| block & !RLCT_SIGNAL_MASK) }; ++ match unsafe { Sys::sigprocmask(how, filtered_set.as_ref(), oldset.as_mut()) } { ++ Ok(()) => 0, ++ Err(errno) => errno.0, + } + } + +diff --git a/src/header/spawn/cbindgen.toml b/src/header/spawn/cbindgen.toml +new file mode 100644 +index 0000000..a9f188f +--- /dev/null ++++ b/src/header/spawn/cbindgen.toml +@@ -0,0 +1,63 @@ ++sys_includes = ["sys/types.h", "signal.h", "sched.h"] ++include_guard = "_SPAWN_H" ++after_includes = """ ++typedef struct { ++ short __flags; ++ pid_t __pgrp; ++ sigset_t __sd; ++ sigset_t __ss; ++ struct sched_param __sp; ++ int __policy; ++ int __pad[16]; ++} posix_spawnattr_t; ++ ++typedef struct { ++ int __allocated; ++ int __used; ++ void *__actions; ++ int __pad[16]; ++} posix_spawn_file_actions_t; ++""" ++trailer = """ ++#define POSIX_SPAWN_RESETIDS 0x01 ++#define POSIX_SPAWN_SETPGROUP 0x02 ++#define POSIX_SPAWN_SETSIGDEF 0x04 ++#define POSIX_SPAWN_SETSIGMASK 0x08 ++#define POSIX_SPAWN_SETSCHEDPARAM 0x10 ++#define POSIX_SPAWN_SETSCHEDULER 0x20 ++ ++int posix_spawn(pid_t *__restrict, const char *__restrict, ++ const posix_spawn_file_actions_t *, ++ const posix_spawnattr_t *__restrict, ++ char *const __restrict[], char *const __restrict[]); ++int posix_spawnp(pid_t *__restrict, const char *__restrict, ++ const posix_spawn_file_actions_t *, ++ const posix_spawnattr_t *__restrict, ++ char *const __restrict[], char *const __restrict[]); ++int posix_spawnattr_init(posix_spawnattr_t *); ++int posix_spawnattr_destroy(posix_spawnattr_t *); ++int posix_spawnattr_setflags(posix_spawnattr_t *, short); ++int posix_spawnattr_getflags(const posix_spawnattr_t *__restrict, short *__restrict); ++int posix_spawnattr_setpgroup(posix_spawnattr_t *, pid_t); ++int posix_spawnattr_getpgroup(const posix_spawnattr_t *__restrict, pid_t *__restrict); ++int posix_spawnattr_setsigdefault(posix_spawnattr_t *__restrict, const sigset_t *__restrict); ++int posix_spawnattr_getsigdefault(posix_spawnattr_t *__restrict, sigset_t *__restrict); ++int posix_spawnattr_setsigmask(posix_spawnattr_t *__restrict, const sigset_t *__restrict); ++int posix_spawnattr_getsigmask(posix_spawnattr_t *__restrict, sigset_t *__restrict); ++int posix_spawn_file_actions_init(posix_spawn_file_actions_t *); ++int posix_spawn_file_actions_destroy(posix_spawn_file_actions_t *); ++int posix_spawn_file_actions_adddup2(posix_spawn_file_actions_t *, int, int); ++int posix_spawn_file_actions_addclose(posix_spawn_file_actions_t *, int); ++int posix_spawn_file_actions_addopen(posix_spawn_file_actions_t *__restrict, ++ int, const char *__restrict, int, mode_t); ++""" ++language = "C" ++style = "Type" ++no_includes = true ++cpp_compat = true ++ ++[enum] ++prefix_with_name = true ++ ++[export] ++include = [] +diff --git a/src/header/spawn/mod.rs b/src/header/spawn/mod.rs +new file mode 100644 +index 0000000..84ce717 +--- /dev/null ++++ b/src/header/spawn/mod.rs +@@ -0,0 +1,105 @@ ++//! `spawn.h` implementation. See . ++ ++use crate::{ ++ error::{Errno, ResultExt}, ++ header::{ ++ errno::EINVAL, ++ unistd::{execve, fork, _exit}, ++ }, ++ platform::{self, types::{c_char, c_int, c_short, pid_t}}, ++}; ++ ++pub const POSIX_SPAWN_RESETIDS: c_int = 0x01; ++pub const POSIX_SPAWN_SETPGROUP: c_int = 0x02; ++pub const POSIX_SPAWN_SETSCHEDPARAM: c_int = 0x04; ++pub const POSIX_SPAWN_SETSCHEDULER: c_int = 0x08; ++pub const POSIX_SPAWN_SETSIGDEF: c_int = 0x10; ++pub const POSIX_SPAWN_SETSIGMASK: c_int = 0x20; ++pub const POSIX_SPAWN_SETSID: c_int = 0x80; ++ ++#[repr(C)] ++pub struct posix_spawn_file_actions_t { ++ _opaque: [u8; 128], ++} ++ ++#[repr(C)] ++pub struct posix_spawnattr_t { ++ pub flags: c_short, ++ pub pgroup: pid_t, ++ _reserved: [u64; 8], ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn posix_spawn_file_actions_init( ++ file_actions: *mut posix_spawn_file_actions_t, ++) -> c_int { ++ if file_actions.is_null() { ++ return Err::(Errno(EINVAL)).or_minus_one_errno(); ++ } ++ unsafe { core::ptr::write_bytes(file_actions, 0, 1) }; ++ 0 ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn posix_spawn_file_actions_destroy( ++ _file_actions: *mut posix_spawn_file_actions_t, ++) -> c_int { 0 } ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn posix_spawn_file_actions_addopen( ++ _file_actions: *mut posix_spawn_file_actions_t, ++ _fildes: c_int, _path: *const c_char, _oflag: c_int, _mode: c_int, ++) -> c_int { 0 } ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn posix_spawn_file_actions_addclose( ++ _file_actions: *mut posix_spawn_file_actions_t, ++ _fildes: c_int, ++) -> c_int { 0 } ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn posix_spawn_file_actions_adddup2( ++ _file_actions: *mut posix_spawn_file_actions_t, ++ _fildes: c_int, _newfildes: c_int, ++) -> c_int { 0 } ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn posix_spawnattr_init(attr: *mut posix_spawnattr_t) -> c_int { ++ if attr.is_null() { return Err::(Errno(EINVAL)).or_minus_one_errno(); } ++ unsafe { core::ptr::write_bytes(attr, 0, 1) }; ++ 0 ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn posix_spawnattr_destroy(_attr: *mut posix_spawnattr_t) -> c_int { 0 } ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn posix_spawnp( ++ pid: *mut pid_t, file: *const c_char, ++ file_actions: *const posix_spawn_file_actions_t, ++ attrp: *const posix_spawnattr_t, ++ argv: *const *mut c_char, envp: *const *mut c_char, ++) -> c_int { ++ unsafe { posix_spawn(pid, file, file_actions, attrp, argv, envp) } ++} ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn posix_spawn( ++ pid: *mut pid_t, file: *const c_char, ++ _file_actions: *const posix_spawn_file_actions_t, ++ _attrp: *const posix_spawnattr_t, ++ argv: *const *mut c_char, envp: *const *mut c_char, ++) -> c_int { ++ if pid.is_null() || file.is_null() || argv.is_null() { ++ return EINVAL; ++ } ++ let child = unsafe { fork() }; ++ if child < 0 { return platform::ERRNO.get(); } ++ if child == 0 { ++ unsafe { execve(file, argv, envp); } ++ _exit(127); ++ } ++ unsafe { *pid = child }; ++ 0 ++} ++ +diff --git a/src/header/stdio/mod.rs b/src/header/stdio/mod.rs +index a1d43be..8cb689e 100644 +--- a/src/header/stdio/mod.rs ++++ b/src/header/stdio/mod.rs +@@ -47,6 +47,9 @@ mod default; + pub use self::getdelim::*; + mod getdelim; + ++pub use self::open_memstream::*; ++mod open_memstream; ++ + mod ext; + mod helpers; + pub mod printf; +diff --git a/src/header/sys_timerfd/cbindgen.toml b/src/header/sys_timerfd/cbindgen.toml +new file mode 100644 +index 0000000..e69de29 +diff --git a/src/header/threads/cbindgen.toml b/src/header/threads/cbindgen.toml +new file mode 100644 +index 0000000..3f90606 +--- /dev/null ++++ b/src/header/threads/cbindgen.toml +@@ -0,0 +1,17 @@ ++sys_includes = ["stddef.h", "pthread.h", "time.h"] ++include_guard = "_RELIBC_THREADS_H" ++language = "C" ++style = "Type" ++no_includes = true ++cpp_compat = true ++ ++[export] ++include = [ ++ "thrd_t", ++ "mtx_t", ++ "cnd_t", ++ "thrd_start_t", ++] ++ ++[enum] ++prefix_with_name = true +diff --git a/src/header/threads/mod.rs b/src/header/threads/mod.rs +new file mode 100644 +index 0000000..9ab9496 +--- /dev/null ++++ b/src/header/threads/mod.rs +@@ -0,0 +1,31 @@ ++//! `threads.h` implementation — C11 threads type definitions and constants. ++//! ++//! Full C11 threads API (thrd_create, mtx_lock, cnd_wait, etc.) requires ++//! a deeper pthread integration layer; this module provides the type ++//! definitions and constants for C11 header compatibility. ++ ++use crate::platform::types::c_int; ++ ++pub type thrd_start_t = Option c_int>; ++ ++pub const thrd_success: c_int = 0; ++pub const thrd_nomem: c_int = -1; ++pub const thrd_timedout: c_int = -2; ++pub const thrd_busy: c_int = -3; ++pub const thrd_error: c_int = -4; ++ ++pub const mtx_plain: c_int = 0; ++pub const mtx_timed: c_int = 1; ++ ++// Opaque types; sizes match relibc's pthread backing types ++// (pthread_t = *mut c_void = 8 bytes, pthread_mutex_t = 12 bytes, ++// pthread_cond_t = 8 bytes) ++#[repr(C)] ++pub struct thrd_t { _priv: *mut core::ffi::c_void } ++#[repr(C)] ++pub struct mtx_t { _priv: [u8; 12] } ++#[repr(C)] ++pub struct cnd_t { _priv: [u8; 8] } ++ ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn thrd_yield() {} +diff --git a/src/lib.rs b/src/lib.rs +index ea853da..18252ad 100644 +--- a/src/lib.rs ++++ b/src/lib.rs +@@ -57,16 +57,151 @@ pub mod raw_cell; + pub mod start; + pub mod sync; + +-use crate::platform::{Allocator, NEWALLOCATOR}; ++use crate::platform::{Allocator, NEWALLOCATOR, Pal, Sys}; + + #[global_allocator] + static ALLOCATOR: Allocator = NEWALLOCATOR; + ++const MAX_FATAL_BACKTRACE_FRAMES: usize = 16; ++const MAX_FATAL_FRAME_STRIDE: usize = 1024 * 1024; ++ ++#[inline(never)] ++fn write_process_thread_identity(w: &mut platform::FileWriter) { ++ use core::fmt::Write; ++ ++ let pid = Sys::getpid(); ++ let tid = Sys::gettid(); ++ ++ match crate::pthread::current_thread() { ++ Some(thread) => { ++ let _ = w.write_fmt(format_args!( ++ "RELIBC CONTEXT: pid={} tid={} pthread={:#x}\n", ++ pid, ++ tid, ++ thread as *const _ as usize, ++ )); ++ } ++ None => { ++ let _ = w.write_fmt(format_args!( ++ "RELIBC CONTEXT: pid={} tid={} pthread=\n", ++ pid, tid, ++ )); ++ } ++ } ++} ++ ++#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++#[inline(never)] ++fn current_frame_pointer() -> *const usize { ++ let frame: *const usize; ++ ++ #[cfg(target_arch = "x86_64")] ++ unsafe { ++ core::arch::asm!("mov {}, rbp", out(reg) frame, options(nomem, nostack, preserves_flags)); ++ } ++ ++ #[cfg(target_arch = "x86")] ++ unsafe { ++ core::arch::asm!("mov {}, ebp", out(reg) frame, options(nomem, nostack, preserves_flags)); ++ } ++ ++ #[cfg(target_arch = "aarch64")] ++ unsafe { ++ core::arch::asm!("mov {}, x29", out(reg) frame, options(nomem, nostack, preserves_flags)); ++ } ++ ++ frame ++} ++ ++#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++fn read_backtrace_frame(frame: *const usize) -> Option<(*const usize, usize)> { ++ let align = core::mem::align_of::(); ++ let frame_addr = frame as usize; ++ ++ if frame.is_null() || frame_addr % align != 0 { ++ return None; ++ } ++ ++ let next_frame = unsafe { frame.read() } as *const usize; ++ let return_address = unsafe { frame.add(1).read() }; ++ ++ if return_address == 0 { ++ return None; ++ } ++ ++ Some((next_frame, return_address)) ++} ++ ++#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++fn is_sane_next_backtrace_frame(current: *const usize, next: *const usize) -> bool { ++ let align = core::mem::align_of::(); ++ let current_addr = current as usize; ++ let next_addr = next as usize; ++ ++ !next.is_null() ++ && next_addr % align == 0 ++ && next_addr > current_addr ++ && next_addr - current_addr <= MAX_FATAL_FRAME_STRIDE ++} ++ ++#[inline(never)] ++fn write_best_effort_backtrace(w: &mut platform::FileWriter) { ++ use core::fmt::Write; ++ ++ let _ = w.write_str("RELIBC: attempting best-effort backtrace\n"); ++ ++ #[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++ { ++ let mut frame = current_frame_pointer(); ++ let mut wrote_frame = false; ++ ++ for frame_index in 0..MAX_FATAL_BACKTRACE_FRAMES { ++ let Some((next_frame, return_address)) = read_backtrace_frame(frame) else { ++ break; ++ }; ++ ++ wrote_frame = true; ++ let _ = w.write_fmt(format_args!( ++ "RELIBC BACKTRACE[{frame_index:02}]: {:#x}\n", ++ return_address, ++ )); ++ ++ if !is_sane_next_backtrace_frame(frame, next_frame) { ++ break; ++ } ++ ++ frame = next_frame; ++ } ++ ++ if !wrote_frame { ++ let _ = w.write_str("RELIBC: backtrace attempt produced no frames\n"); ++ } ++ } ++ ++ #[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")))] ++ { ++ let _ = w.write_str("RELIBC: backtrace unavailable on this architecture\n"); ++ } ++} ++ + #[unsafe(no_mangle)] + pub extern "C" fn relibc_panic(pi: &::core::panic::PanicInfo) -> ! { + use core::fmt::Write; + + let mut w = platform::FileWriter::new(2); ++ ++ if let Some(location) = pi.location() { ++ let _ = w.write_fmt(format_args!( ++ "RELIBC PANIC LOCATION: {}:{}:{}\n", ++ location.file(), ++ location.line(), ++ location.column(), ++ )); ++ } else { ++ let _ = w.write_str("RELIBC PANIC LOCATION: \n"); ++ } ++ ++ write_process_thread_identity(&mut w); + let _ = w.write_fmt(format_args!("RELIBC PANIC: {}\n", pi)); + + core::intrinsics::abort(); +@@ -95,10 +230,12 @@ pub extern "C" fn rust_oom(layout: ::core::alloc::Layout) -> ! { + + let mut w = platform::FileWriter::new(2); + let _ = w.write_fmt(format_args!( +- "RELIBC OOM: {} bytes aligned to {} bytes\n", ++ "RELIBC OOM: {} bytes aligned to {} bytes - process will abort\n", + layout.size(), + layout.align() + )); ++ write_process_thread_identity(&mut w); ++ write_best_effort_backtrace(&mut w); + + core::intrinsics::abort(); + } +@@ -111,7 +248,10 @@ pub extern "C" fn _Unwind_Resume() -> ! { + use core::fmt::Write; + + let mut w = platform::FileWriter::new(2); +- let _ = w.write_str("_Unwind_Resume\n"); ++ let _ = w.write_str( ++ "RELIBC: _Unwind_Resume called - exception propagation failed, aborting\n", ++ ); ++ write_process_thread_identity(&mut w); + + core::intrinsics::abort(); + } +diff --git a/src/platform/redox/mod.rs b/src/platform/redox/mod.rs +index 752339a..73d9af1 100644 +--- a/src/platform/redox/mod.rs ++++ b/src/platform/redox/mod.rs +@@ -669,6 +669,11 @@ impl Pal for Sys { + } + + fn getpriority(which: c_int, who: id_t) -> Result { ++ if is_current_process_priority_target(which, who) { ++ let nice = read_current_process_nice()?; ++ return Ok(20 - nice); ++ } ++ + match redox_rt::sys::posix_getpriority(which, who as u32) { + Ok(kernel_prio) => { + let posix_prio = (kernel_prio as i32 * -1) + 40 as i32; +@@ -1231,7 +1236,12 @@ impl Pal for Sys { + } + + fn setpriority(which: c_int, who: id_t, prio: c_int) -> Result<()> { +- let clamped_prio = prio.clamp(-20, 19); ++ let clamped_prio = prio.clamp(NICE_MIN, NICE_MAX); ++ ++ if is_current_process_priority_target(which, who) { ++ return write_current_process_nice(clamped_prio); ++ } ++ + let kernel_prio = (20 + clamped_prio) as u32; + + match redox_rt::sys::posix_setpriority(which, who as u32, kernel_prio) { +diff --git a/src/pthread/mod.rs b/src/pthread/mod.rs +index 8243a48..ae25efb 100644 +--- a/src/pthread/mod.rs ++++ b/src/pthread/mod.rs +@@ -2,6 +2,7 @@ + + use core::{ + cell::UnsafeCell, ++ panic::AssertUnwindSafe, + ptr, + sync::atomic::{AtomicBool, AtomicUsize, Ordering}, + }; +@@ -43,9 +44,13 @@ pub unsafe fn init() { + thread.stack_size = STACK_SIZE; + } + +- unsafe { Tcb::current() } +- .expect_notls("no TCB present for main thread") +- .pthread = thread; ++ let tcb = unsafe { Tcb::current() } ++ .expect_notls("no TCB present for main thread"); ++ tcb.pthread = thread; ++ ++ OS_TID_TO_PTHREAD ++ .lock() ++ .insert(Sys::current_os_tid(), ForceSendSync(tcb as *const Tcb as *mut Tcb)); + } + + //static NEXT_INDEX: AtomicU32 = AtomicU32::new(FIRST_THREAD_IDX + 1); +@@ -227,12 +232,23 @@ unsafe extern "C" fn new_thread_shim( + unsafe { + tcb.activate(None); + } +- redox_rt::signal::setup_sighandler(&tcb.os_specific, false); ++ match catch_unwind(AssertUnwindSafe(|| { ++ redox_rt::signal::setup_sighandler(&tcb.os_specific, false) ++ })) { ++ Ok(()) => {} ++ Err(()) => { ++ log::error!("pthread: failed to set up child thread signal handler"); ++ unsafe { exit_current_thread(Retval(ptr::null_mut())) } ++ } ++ } + } + + let procmask = unsafe { (&*synchronization_mutex).as_ptr().read() }; + +- unsafe { tcb.copy_masters() }.unwrap(); ++ if let Err(err) = unsafe { tcb.copy_masters() } { ++ log::error!("pthread: failed to copy TLS masters for child thread: {err:?}"); ++ unsafe { exit_current_thread(Retval(ptr::null_mut())) } ++ } + + unsafe { (*tcb).pthread.os_tid.get().write(Sys::current_os_tid()) }; + +@@ -240,11 +256,21 @@ unsafe extern "C" fn new_thread_shim( + + #[cfg(target_os = "redox")] + { +- redox_rt::signal::set_sigmask(Some(procmask), None) +- .expect("failed to set procmask in child thread"); ++ if let Err(err) = redox_rt::signal::set_sigmask(Some(procmask), None) { ++ log::error!("pthread: failed to set child thread signal mask: {err:?}"); ++ } + } + +- let retval = unsafe { entry_point(arg) }; ++ let mut retval = ptr::null_mut(); ++ match catch_unwind(AssertUnwindSafe(|| { ++ retval = unsafe { entry_point(arg) }; ++ })) { ++ Ok(()) => {} ++ Err(()) => { ++ log::error!("pthread: child thread entry point panicked"); ++ unsafe { exit_current_thread(Retval(ptr::null_mut())) } ++ } ++ } + + unsafe { exit_current_thread(Retval(retval)) } + } +diff --git a/src/start.rs b/src/start.rs +index 63d4046..7cc96bf 100644 +--- a/src/start.rs ++++ b/src/start.rs +@@ -1,10 +1,7 @@ + //! Startup code. + + use alloc::{boxed::Box, vec::Vec}; +-use core::{intrinsics, ptr}; +- +-#[cfg(target_os = "redox")] +-use generic_rt::ExpectTlsFree; ++use core::{fmt::Write, intrinsics, panic::AssertUnwindSafe, ptr}; + + use crate::{ + ALLOCATOR, +@@ -164,14 +161,23 @@ pub unsafe extern "C" fn relibc_start_v1( + unsafe { relibc_verify_host() }; + + #[cfg(target_os = "redox")] +- let thr_fd = redox_rt::proc::FdGuard::new( +- unsafe { ++ let thr_fd = { ++ let thr_fd = match unsafe { + crate::platform::get_auxv_raw(sp.auxv().cast(), redox_rt::auxv_defs::AT_REDOX_THR_FD) ++ } { ++ Some(thr_fd) => thr_fd, ++ None => abort_startup(format_args!( ++ "relibc_start_v1: missing AT_REDOX_THR_FD auxv entry; no thread fd present\n" ++ )), ++ }; ++ ++ match redox_rt::proc::FdGuard::new(thr_fd).to_upper() { ++ Ok(thr_fd) => thr_fd, ++ Err(err) => abort_startup(format_args!( ++ "relibc_start_v1: failed to move thread fd to upper table: {err:?}\n" ++ )), + } +- .expect_notls("no thread fd present"), +- ) +- .to_upper() +- .expect_notls("failed to move thread fd to upper table"); ++ }; + + // Initialize TLS, if necessary + unsafe { +@@ -237,7 +243,10 @@ pub unsafe extern "C" fn relibc_start_v1( + let mut f = unsafe { &__preinit_array_start } as *const _; + #[allow(clippy::op_ref)] + while f < &raw const __preinit_array_end { +- (unsafe { *f })(); ++ let func = unsafe { *f }; ++ if catch_unwind(AssertUnwindSafe(|| unsafe { (*f)() })).is_err() { ++ log_initializer_panic(".preinit_array", func); ++ } + f = unsafe { f.offset(1) }; + } + } +@@ -247,7 +256,10 @@ pub unsafe extern "C" fn relibc_start_v1( + let mut f = unsafe { &__init_array_start } as *const _; + #[allow(clippy::op_ref)] + while f < &raw const __init_array_end { +- (unsafe { *f })(); ++ let func = unsafe { *f }; ++ if catch_unwind(AssertUnwindSafe(|| unsafe { (*f)() })).is_err() { ++ log_initializer_panic(".init_array", func); ++ } + f = unsafe { f.offset(1) }; + } + } +diff --git a/src/sync/barrier.rs b/src/sync/barrier.rs +index 6204a23..a8c41ad 100644 +--- a/src/sync/barrier.rs ++++ b/src/sync/barrier.rs +@@ -1,18 +1,34 @@ +-use core::num::NonZeroU32; ++use core::{ ++ num::NonZeroU32, ++ sync::atomic::{AtomicU32, Ordering}, ++}; + + pub struct Barrier { + original_count: NonZeroU32, + // 4 + lock: crate::sync::Mutex, + // 16 +- cvar: crate::header::pthread::RlctCond, ++ cvar: FutexState, + // 24 + } + #[derive(Debug)] + struct Inner { +- count: u32, +- // TODO: Overflows might be problematic... 64-bit? +- gen_id: u32, ++ _unused0: u32, ++ _unused1: u32, ++} ++ ++struct FutexState { ++ count: AtomicU32, ++ sense: AtomicU32, ++} ++ ++impl FutexState { ++ const fn new(count: u32) -> Self { ++ Self { ++ count: AtomicU32::new(count), ++ sense: AtomicU32::new(0), ++ } ++ } + } + + pub enum WaitResult { +@@ -25,61 +41,38 @@ impl Barrier { + Self { + original_count: count, + lock: crate::sync::Mutex::new(Inner { +- count: 0, +- gen_id: 0, ++ _unused0: 0, ++ _unused1: 0, + }), +- cvar: crate::header::pthread::RlctCond::new(), ++ cvar: FutexState::new(count.get()), + } + } +- pub fn wait(&self) -> WaitResult { +- let mut guard = self.lock.lock(); +- let gen_id = guard.gen_id; +- +- guard.count += 1; +- +- if guard.count == self.original_count.get() { +- guard.gen_id = guard.gen_id.wrapping_add(1); +- guard.count = 0; +- if let Ok(()) = self.cvar.broadcast() {}; // TODO handle error +- +- drop(guard); ++ pub fn destroy(&self) {} + +- WaitResult::NotifiedAll +- } else { +- while guard.gen_id == gen_id { +- guard = self.cvar.wait_inner_typedmutex(guard); +- } +- +- WaitResult::Waited +- } +- /* +- let mut guard = self.lock.lock(); +- let Inner { count, gen_id } = *guard; +- +- let last = self.original_count.get() - 1; +- +- if count == last { +- eprintln!("last {:?}", *guard); +- guard.gen_id = guard.gen_id.wrapping_add(1); +- guard.count = 0; +- +- drop(guard); ++ pub fn wait(&self) -> WaitResult { ++ let _ = &self.lock; ++ let sense = self.cvar.sense.load(Ordering::Acquire); + +- self.cvar.broadcast(); ++ if self.cvar.count.fetch_sub(1, Ordering::AcqRel) == 1 { ++ self.cvar ++ .count ++ .store(self.original_count.get(), Ordering::Relaxed); ++ self.cvar ++ .sense ++ .store(sense.wrapping_add(1), Ordering::Release); ++ crate::sync::futex_wake(&self.cvar.sense, i32::MAX); + + WaitResult::NotifiedAll + } else { +- guard.count += 1; +- +- while guard.count != last && guard.gen_id == gen_id { +- eprintln!("before {:?}", *guard); +- guard = self.cvar.wait_inner_typedmutex(guard); +- eprintln!("after {:?}", *guard); ++ // SMP fix: wait directly on the barrier generation word instead of routing through the ++ // condvar unlock->futex_wait path. If the last thread flips `sense` after we load it ++ // but before our futex wait starts, the futex observes a stale value and returns ++ // immediately instead of sleeping forever after a missed broadcast wakeup. ++ while self.cvar.sense.load(Ordering::Acquire) == sense { ++ let _ = crate::sync::futex_wait(&self.cvar.sense, sense, None); + } + + WaitResult::Waited + } +- */ + } + } +-static LOCK: crate::sync::Mutex<()> = crate::sync::Mutex::new(()); +diff --git a/src/sync/pthread_mutex.rs b/src/sync/pthread_mutex.rs +index 29bad63..ef027e7 100644 +--- a/src/sync/pthread_mutex.rs ++++ b/src/sync/pthread_mutex.rs +@@ -1,3 +1,4 @@ ++use alloc::boxed::Box; + use core::{ + cell::Cell, + sync::atomic::{AtomicU32 as AtomicUint, Ordering}, +@@ -6,10 +7,9 @@ use core::{ + use crate::{ + error::Errno, + header::{bits_timespec::timespec, errno::*, pthread::*}, ++ platform::{Pal, Sys, types::c_int}, + }; + +-use crate::platform::{Pal, Sys, types::c_int}; +- + use super::FutexWaitResult; + + pub struct RlctMutex { +@@ -21,15 +21,22 @@ pub struct RlctMutex { + robust: bool, + } + ++pub struct RobustMutexNode { ++ pub next: *mut RobustMutexNode, ++ pub prev: *mut RobustMutexNode, ++ pub mutex: *const RlctMutex, ++} ++ + const STATE_UNLOCKED: u32 = 0; + const WAITING_BIT: u32 = 1 << 31; +-const INDEX_MASK: u32 = !WAITING_BIT; ++const FUTEX_OWNER_DIED: u32 = 1 << 30; ++const INDEX_MASK: u32 = !(WAITING_BIT | FUTEX_OWNER_DIED); + + // TODO: Lower limit is probably better. + const RECURSIVE_COUNT_MAX_INCLUSIVE: u32 = u32::MAX; + // TODO: How many spins should we do before it becomes more time-economical to enter kernel mode + // via futexes? +-const SPIN_COUNT: usize = 0; ++const SPIN_COUNT: usize = 100; + + impl RlctMutex { + pub(crate) fn new(attr: &RlctMutexAttr) -> Result { +@@ -69,13 +76,25 @@ impl RlctMutex { + Ok(0) + } + pub fn make_consistent(&self) -> Result<(), Errno> { +- todo_skip!(0, "pthread robust mutexes: not implemented"); +- Ok(()) ++ debug_assert!(self.robust, "make_consistent called on non-robust mutex"); ++ ++ if !self.robust { ++ return Err(Errno(EINVAL)); ++ } ++ ++ let current = self.inner.load(Ordering::Relaxed); ++ let owner = current & INDEX_MASK; ++ ++ if owner == os_tid_invalid_after_fork() && current & FUTEX_OWNER_DIED != 0 { ++ self.inner.store(0, Ordering::Release); ++ Ok(()) ++ } else { ++ Err(Errno(EINVAL)) ++ } + } + fn lock_inner(&self, deadline: Option<×pec>) -> Result<(), Errno> { + let this_thread = os_tid_invalid_after_fork(); +- +- //let mut spins_left = SPIN_COUNT; ++ let mut spins_left = SPIN_COUNT; + + loop { + let result = self.inner.compare_exchange_weak( +@@ -86,51 +105,70 @@ impl RlctMutex { + ); + + match result { +- // CAS succeeded +- Ok(_) => { +- if self.ty == Ty::Recursive { +- self.increment_recursive_count()?; +- } +- return Ok(()); +- } +- // CAS failed, but the mutex was recursive and we already own the lock. ++ Ok(_) => return self.finish_lock_acquire(false), + Err(thread) if thread & INDEX_MASK == this_thread && self.ty == Ty::Recursive => { + self.increment_recursive_count()?; + return Ok(()); + } +- // CAS failed, but the mutex was error-checking and we already own the lock. + Err(thread) if thread & INDEX_MASK == this_thread && self.ty == Ty::Errck => { +- return Err(Errno(EAGAIN)); ++ return Err(Errno(EDEADLK)); + } +- // CAS spuriously failed, simply retry the CAS. TODO: Use core::hint::spin_loop()? +- Err(thread) if thread & INDEX_MASK == 0 => { +- continue; ++ Err(thread) if thread & FUTEX_OWNER_DIED != 0 && thread & INDEX_MASK == 0 => { ++ return Err(Errno(ENOTRECOVERABLE)); + } +- // CAS failed because some other thread owned the lock. We must now wait. ++ Err(thread) if thread & FUTEX_OWNER_DIED != 0 => { ++ if !self.robust { ++ return Err(Errno(ENOTRECOVERABLE)); ++ } ++ ++ let new_value = (thread & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread; ++ match self.inner.compare_exchange( ++ thread, ++ new_value, ++ Ordering::Acquire, ++ Ordering::Relaxed, ++ ) { ++ Ok(_) => return self.finish_lock_acquire(true), ++ Err(_) => continue, ++ } ++ } ++ Err(thread) if thread & INDEX_MASK == 0 => continue, + Err(thread) => { +- /*if spins_left > 0 { +- // TODO: Faster to spin trying to load the flag, compared to CAS? ++ let owner = thread & INDEX_MASK; ++ ++ if !crate::pthread::mutex_owner_id_is_live(owner) { ++ if !self.robust { ++ return Err(Errno(ENOTRECOVERABLE)); ++ } ++ ++ let new_value = (thread & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread; ++ match self.inner.compare_exchange( ++ thread, ++ new_value, ++ Ordering::Acquire, ++ Ordering::Relaxed, ++ ) { ++ Ok(_) => return self.finish_lock_acquire(true), ++ Err(_) => continue, ++ } ++ } ++ ++ if spins_left > 0 { + spins_left -= 1; + core::hint::spin_loop(); + continue; + } +- +- spins_left = SPIN_COUNT; +- +- let inner = self.inner.fetch_or(WAITING_BIT, Ordering::Relaxed); +- +- if inner == STATE_UNLOCKED { +- continue; +- }*/ +- +- // If the mutex is not robust, simply futex_wait until unblocked. +- //crate::sync::futex_wait(&self.inner, inner | WAITING_BIT, None); + if crate::sync::futex_wait(&self.inner, thread, deadline) + == FutexWaitResult::TimedOut + { + return Err(Errno(ETIMEDOUT)); + } + } ++ } else { ++ // Non-robust mutex: owner appears dead but POSIX behaviour is ++ // undefined; report busy rather than ENOTRECOVERABLE. ++ return Err(Errno(EBUSY)); ++ } + } + } + } +@@ -140,6 +178,20 @@ impl RlctMutex { + pub fn lock_with_timeout(&self, deadline: ×pec) -> Result<(), Errno> { + self.lock_inner(Some(deadline)) + } ++ fn finish_lock_acquire(&self, owner_dead: bool) -> Result<(), Errno> { ++ if self.ty == Ty::Recursive { ++ self.increment_recursive_count()?; ++ } ++ if self.robust { ++ add_to_robust_list(self); ++ } ++ ++ if owner_dead { ++ Err(Errno(EOWNERDEAD)) ++ } else { ++ Ok(()) ++ } ++ } + fn increment_recursive_count(&self) -> Result<(), Errno> { + // We don't have to worry about asynchronous signals here, since pthread_mutex_trylock + // is not async-signal-safe. +@@ -161,41 +213,65 @@ impl RlctMutex { + pub fn try_lock(&self) -> Result<(), Errno> { + let this_thread = os_tid_invalid_after_fork(); + +- // TODO: If recursive, omitting CAS may be faster if it is already owned by this thread. +- let result = self.inner.compare_exchange( +- STATE_UNLOCKED, +- this_thread, +- Ordering::Acquire, +- Ordering::Relaxed, +- ); ++ loop { ++ let current = self.inner.load(Ordering::Relaxed); ++ ++ if current == STATE_UNLOCKED { ++ match self.inner.compare_exchange( ++ STATE_UNLOCKED, ++ this_thread, ++ Ordering::Acquire, ++ Ordering::Relaxed, ++ ) { ++ Ok(_) => return self.finish_lock_acquire(false), ++ Err(_) => continue, ++ } ++ } + +- if self.ty == Ty::Recursive { +- match result { +- Err(index) if index & INDEX_MASK != this_thread => return Err(Errno(EBUSY)), +- _ => (), ++ let owner = current & INDEX_MASK; ++ ++ if owner == this_thread && self.ty == Ty::Recursive { ++ self.increment_recursive_count()?; ++ return Ok(()); + } + +- self.increment_recursive_count()?; ++ if owner == this_thread && self.ty == Ty::Errck { ++ return Err(Errno(EDEADLK)); ++ } + +- return Ok(()); +- } ++ if current & FUTEX_OWNER_DIED != 0 && owner == 0 { ++ return Err(Errno(ENOTRECOVERABLE)); ++ } ++ ++ if current & FUTEX_OWNER_DIED != 0 || (owner != 0 && !crate::pthread::mutex_owner_id_is_live(owner)) { ++ if !self.robust { ++ return Err(Errno(ENOTRECOVERABLE)); ++ } + +- match result { +- Ok(_) => Ok(()), +- Err(index) if index & INDEX_MASK == this_thread && self.ty == Ty::Errck => { +- Err(Errno(EDEADLK)) ++ let new_value = (current & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread; ++ match self.inner.compare_exchange( ++ current, ++ new_value, ++ Ordering::Acquire, ++ Ordering::Relaxed, ++ ) { ++ Ok(_) => return self.finish_lock_acquire(true), ++ Err(_) => continue, ++ } + } +- Err(_) => Err(Errno(EBUSY)), ++ ++ return Err(Errno(EBUSY)); + } + } + // Safe because we are not protecting any data. + pub fn unlock(&self) -> Result<(), Errno> { ++ let current = self.inner.load(Ordering::Relaxed); ++ + if self.robust || matches!(self.ty, Ty::Recursive | Ty::Errck) { +- if self.inner.load(Ordering::Relaxed) & INDEX_MASK != os_tid_invalid_after_fork() { ++ if current & INDEX_MASK != os_tid_invalid_after_fork() { + return Err(Errno(EPERM)); + } + +- // TODO: Is this fence correct? + core::sync::atomic::fence(Ordering::Acquire); + } + +@@ -208,18 +284,47 @@ impl RlctMutex { + } + } + +- self.inner.store(STATE_UNLOCKED, Ordering::Release); +- crate::sync::futex_wake(&self.inner, i32::MAX); +- /*let was_waiting = self.inner.swap(STATE_UNLOCKED, Ordering::Release) & WAITING_BIT != 0; ++ if self.robust { ++ remove_from_robust_list(self); ++ } + +- if was_waiting { +- let _ = crate::sync::futex_wake(&self.inner, 1); +- }*/ ++ let new_state = if self.robust && current & FUTEX_OWNER_DIED != 0 { ++ FUTEX_OWNER_DIED ++ } else { ++ STATE_UNLOCKED ++ }; ++ ++ self.inner.store(new_state, Ordering::Release); ++ crate::sync::futex_wake(&self.inner, i32::MAX); + + Ok(()) + } + } + ++pub(crate) unsafe fn mark_robust_mutexes_dead(thread: &crate::pthread::Pthread) { ++ let head = thread.robust_list_head.get(); ++ let this_thread = os_tid_invalid_after_fork(); ++ let mut node = unsafe { *head }; ++ ++ unsafe { *head = core::ptr::null_mut() }; ++ ++ while !node.is_null() { ++ let next = unsafe { (*node).next }; ++ let mutex = unsafe { &*(*node).mutex }; ++ let current = mutex.inner.load(Ordering::Relaxed); ++ ++ if current & INDEX_MASK == this_thread { ++ mutex ++ .inner ++ .store((current & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread, Ordering::Release); ++ crate::sync::futex_wake(&mutex.inner, i32::MAX); ++ } ++ ++ unsafe { drop(Box::from_raw(node)) }; ++ node = next; ++ } ++} ++ + #[repr(u8)] + #[derive(PartialEq)] + enum Ty { +@@ -237,6 +342,54 @@ enum Ty { + #[thread_local] + static CACHED_OS_TID_INVALID_AFTER_FORK: Cell = Cell::new(0); + ++fn add_to_robust_list(mutex: &RlctMutex) { ++ let thread = crate::pthread::current_thread().expect("current thread not present"); ++ let node_ptr = Box::into_raw(Box::new(RobustMutexNode { ++ next: core::ptr::null_mut(), ++ prev: core::ptr::null_mut(), ++ mutex: core::ptr::from_ref(mutex), ++ })); ++ ++ unsafe { ++ let head = thread.robust_list_head.get(); ++ if !(*head).is_null() { ++ (**head).prev = node_ptr; ++ } ++ (*node_ptr).next = *head; ++ *head = node_ptr; ++ } ++} ++ ++fn remove_from_robust_list(mutex: &RlctMutex) { ++ let thread = match crate::pthread::current_thread() { ++ Some(thread) => thread, ++ None => return, ++ }; ++ ++ unsafe { ++ let mut node = *thread.robust_list_head.get(); ++ ++ while !node.is_null() { ++ if core::ptr::eq((*node).mutex, core::ptr::from_ref(mutex)) { ++ if !(*node).prev.is_null() { ++ (*(*node).prev).next = (*node).next; ++ } else { ++ *thread.robust_list_head.get() = (*node).next; ++ } ++ ++ if !(*node).next.is_null() { ++ (*(*node).next).prev = (*node).prev; ++ } ++ ++ drop(Box::from_raw(node)); ++ return; ++ } ++ ++ node = (*node).next; ++ } ++ } ++} ++ + // Assumes TIDs are unique between processes, which I only know is true for Redox. + fn os_tid_invalid_after_fork() -> u32 { + // TODO: Coordinate better if using shared == PTHREAD_PROCESS_SHARED, with up to 2^32 separate diff --git a/local/patches/relibc/redox.patch b/local/patches/relibc/redox.patch.bak similarity index 98% rename from local/patches/relibc/redox.patch rename to local/patches/relibc/redox.patch.bak index 14eac84e..294cf154 100644 --- a/local/patches/relibc/redox.patch +++ b/local/patches/relibc/redox.patch.bak @@ -33,7 +33,7 @@ diff --git a/src/pthread/mod.rs b/src/pthread/mod.rs ptr, sync::atomic::{AtomicBool, AtomicUsize, Ordering}, }; -@@ -208,13 +209,41 @@ pub(crate) unsafe fn create( +@@ -208,11 +209,39 @@ pub(crate) unsafe fn create( } /// A shim to wrap thread entry points in logic to set up TLS, for example diff --git a/local/scripts/archive-sources.sh b/local/scripts/archive-sources.sh index 38b40b33..27630396 100755 --- a/local/scripts/archive-sources.sh +++ b/local/scripts/archive-sources.sh @@ -2,9 +2,10 @@ # archive-sources.sh — Export fully-patched source archives for Red Bear OS. # # Usage: -# ./local/scripts/archive-sources.sh [--all] [--recipe ] [--target ] +# ./local/scripts/archive-sources.sh [--release=] [--all] [--recipe ] [--target ] # -# Creates versioned, fully-patched source archives in sources//: +# Creates versioned, fully-patched source archives in sources// +# or sources/.staging/redbear-/tarballs/: # --v-patched.tar.gz # # Each archive contains: source/ (fully patched) + recipe.toml @@ -14,10 +15,9 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" TARGET="${TARGET:-x86_64-unknown-redox}" -SOURCES_DIR="${PROJECT_ROOT}/sources/${TARGET}" -MANIFEST="${SOURCES_DIR}/packages.txt" - -mkdir -p "${SOURCES_DIR}" +RELEASE="" +SOURCES_DIR="" +MANIFEST="" GREEN='\033[1;32m' RED='\033[1;31m' @@ -139,26 +139,68 @@ archive_all() { done < <(find "${PROJECT_ROOT}/recipes" "${PROJECT_ROOT}/local/recipes" -name "recipe.toml" -print0 2>/dev/null) echo "" - status "Archive complete: ${count} packages, ${failed} failures" + status "Archive complete: ${count} packages, ${failed} failures → ${SOURCES_DIR}" } # ── Main ──────────────────────────────────────────────────────────── -case "${1:-}" in - --recipe) - if [ -z "${2:-}" ]; then - err "--recipe requires a path" +MODE="" +RECIPE_PATH="" + +while [ $# -gt 0 ]; do + case "$1" in + --release=*) + RELEASE="${1#*=}" + ;; + --recipe) + if [ -z "${2:-}" ]; then + err "--recipe requires a path" + exit 1 + fi + MODE="recipe" + RECIPE_PATH="$2" + shift + ;; + --all) + MODE="all" + ;; + *) + echo "Usage: $0 [--release=] --all | --recipe " + echo "" + echo " --release= Optional release staging target (e.g. 0.2.0)" + echo " --all Archive all recipes with source directories" + echo " --recipe PATH Archive a specific recipe (e.g. recipes/core/base)" + echo "" + echo " Environment: TARGET=x86_64-unknown-redox (default)" exit 1 - fi + ;; + esac + shift +done + +if [ -n "$RELEASE" ]; then + SOURCES_DIR="${PROJECT_ROOT}/sources/.staging/redbear-${RELEASE}/tarballs" +else + SOURCES_DIR="${PROJECT_ROOT}/sources/${TARGET}" +fi +MANIFEST="${SOURCES_DIR}/packages.txt" + +mkdir -p "${SOURCES_DIR}" + +case "$MODE" in + recipe) + status "Writing archives to ${SOURCES_DIR}" > "$MANIFEST" - archive_recipe "${PROJECT_ROOT}/${2}" + archive_recipe "${PROJECT_ROOT}/${RECIPE_PATH}" ;; - --all) + all) + status "Writing archives to ${SOURCES_DIR}" archive_all ;; *) - echo "Usage: $0 --all | --recipe " + echo "Usage: $0 [--release=] --all | --recipe " echo "" + echo " --release= Optional release staging target (e.g. 0.2.0)" echo " --all Archive all recipes with source directories" echo " --recipe PATH Archive a specific recipe (e.g. recipes/core/base)" echo "" diff --git a/local/scripts/build-redbear.sh b/local/scripts/build-redbear.sh index 2e416425..04a956c4 100755 --- a/local/scripts/build-redbear.sh +++ b/local/scripts/build-redbear.sh @@ -4,6 +4,18 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +# Source .config for release mode settings (REDBEAR_RELEASE, etc.) +if [ -f "$PROJECT_ROOT/.config" ]; then + while IFS='?=' read -r key value; do + key=$(echo "$key" | xargs) + value=$(echo "$value" | xargs) + [ -z "$key" ] && continue + [[ "$key" =~ ^# ]] && continue + # Only set if not already set in environment + [ -n "${!key:-}" ] || export "$key=$value" + done < "$PROJECT_ROOT/.config" +fi + CONFIG="redbear-full" JOBS="${JOBS:-$(nproc)}" APPLY_PATCHES="${APPLY_PATCHES:-1}" @@ -79,7 +91,7 @@ echo "" cd "$PROJECT_ROOT" -if [ -x "$PROJECT_ROOT/local/scripts/verify-overlay-integrity.sh" ]; then +if [ -x "$PROJECT_ROOT/local/scripts/verify-overlay-integrity.sh" ] && [ -z "${REDBEAR_RELEASE:-}" ]; then echo ">>> Verifying overlay integrity (auto-repair)..." "$PROJECT_ROOT/local/scripts/verify-overlay-integrity.sh" --repair echo "" @@ -124,7 +136,7 @@ ensure_relibc_desktop_surface() { fi } -if [ "$APPLY_PATCHES" = "1" ]; then +if [ "$APPLY_PATCHES" = "1" ] && [ -z "${REDBEAR_RELEASE:-}" ]; then echo ">>> Applying local patches..." apply_patch_dir() { @@ -177,12 +189,8 @@ if [ "$APPLY_PATCHES" = "1" ]; then stash_nested_repo_if_dirty "$PROJECT_ROOT/recipes/core/relibc/source" "relibc" echo "" -fi - -if [ -x "$PROJECT_ROOT/local/scripts/verify-overlay-integrity.sh" ]; then - echo ">>> Verifying overlay integrity (strict)..." - "$PROJECT_ROOT/local/scripts/verify-overlay-integrity.sh" - echo "" +elif [ -n "${REDBEAR_RELEASE:-}" ]; then + echo ">>> Release mode: skipping patch application (patches pre-applied in archived sources)" fi if [ ! -f "target/release/repo" ]; then @@ -209,11 +217,43 @@ fi echo ">>> Building Red Bear OS with config: $CONFIG" echo ">>> This may take 30-60 minutes on first build..." -if [ "$ALLOW_UPSTREAM" -eq 1 ]; then + +# In release mode, verify archives exist before building +if [ -n "${REDBEAR_RELEASE:-}" ]; then + echo ">>> Release mode: $REDBEAR_RELEASE" + if [ -f "./local/scripts/verify-sources-archived.sh" ]; then + bash "./local/scripts/verify-sources-archived.sh" --release="$REDBEAR_RELEASE" || { + echo "ERROR: Release archive verification failed. Run: provision-release.sh" + exit 1 + } + fi +fi + +if [ "${REDBEAR_ALLOW_UPSTREAM:-0}" = "1" ]; then + echo ">>> WARNING: Upstream fetch ENABLED (REDBEAR_ALLOW_UPSTREAM=1)" + REPO_OFFLINE=0 COOKBOOK_OFFLINE=false CI=1 make all "CONFIG_NAME=$CONFIG" "JOBS=$JOBS" +elif [ -n "${REDBEAR_RELEASE:-}" ]; then + echo ">>> Release mode: building from immutable archives (offline)" + # Validate source trees before building + if [ -f "$PROJECT_ROOT/local/scripts/validate-source-trees.sh" ]; then + echo ">>> Validating source trees..." + bash "$PROJECT_ROOT/local/scripts/validate-source-trees.sh" "$CONFIG" || { + echo "WARNING: Some source trees are missing." + echo "Attempting build with REPO_BINARY=1 fallback for missing packages..." + REPO_OFFLINE=1 COOKBOOK_OFFLINE=true CI=1 REPO_BINARY=1 make all "CONFIG_NAME=$CONFIG" "JOBS=$JOBS" || { + echo "ERROR: Build failed even with binary fallback." + echo "Run: ./local/scripts/restore-sources.sh --release=$REDBEAR_RELEASE" + exit 1 + } + exit 0 + } + fi + REPO_OFFLINE=1 COOKBOOK_OFFLINE=true CI=1 make all "CONFIG_NAME=$CONFIG" "JOBS=$JOBS" +elif [ "$ALLOW_UPSTREAM" -eq 1 ]; then echo ">>> Upstream recipe refresh enabled" REPO_OFFLINE=0 COOKBOOK_OFFLINE=false CI=1 make all "CONFIG_NAME=$CONFIG" "JOBS=$JOBS" else - echo ">>> Upstream recipe refresh disabled (pass --upstream to enable)" + echo ">>> Upstream recipe refresh disabled (default: offline)" REPO_OFFLINE=1 COOKBOOK_OFFLINE=true CI=1 make all "CONFIG_NAME=$CONFIG" "JOBS=$JOBS" fi diff --git a/local/scripts/check-upstream-releases.sh b/local/scripts/check-upstream-releases.sh new file mode 100755 index 00000000..be755e1b --- /dev/null +++ b/local/scripts/check-upstream-releases.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# check-upstream-releases.sh — Check for new Redox OS snapshots (read-only). +# +# Usage: +# ./local/scripts/check-upstream-releases.sh +# +# Queries Redox GitLab tags via git ls-remote. +# Prints snapshots newer than the current baseline. +# ZERO side effects — no clones, no disk writes, no state changes. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" + +REDOX_URL="${REDOX_URL:-https://gitlab.redox-os.org/redox-os/redox.git}" +MANIFEST="$PROJECT_ROOT/sources/redbear-0.1.0/manifest.txt" + +GREEN='\033[1;32m' +YELLOW='\033[1;33m' +BLUE='\033[1;34m' +NC='\033[0m' + +echo -e "${BLUE}Red Bear OS — Upstream Release Check${NC}" +echo "" + +# Get our baseline +if [ -f "$MANIFEST" ]; then + BASELINE=$(head -3 "$MANIFEST" | grep 'Build system' | awk '{print $NF}' 2>/dev/null || echo "unknown") + echo "Baseline: Red Bear OS 0.1.0 (build system: $BASELINE)" +else + echo "Baseline: unknown (manifest not found at $MANIFEST)" +fi + +# Get baseline date from manifest or git +if [ -f "$MANIFEST" ]; then + BASELINE_DATE=$(head -6 "$MANIFEST" | grep 'Generated' | sed 's/.*Generated: //' | head -1 2>/dev/null || echo "2026-05-01") +else + BASELINE_DATE="2026-05-01" +fi +echo "Baseline date: $BASELINE_DATE" +echo "" + +# Query Redox tags +echo "Checking: $REDOX_URL" +echo "" + +TAGS=$(git ls-remote --tags "$REDOX_URL" 2>/dev/null | grep -oP 'refs/tags/\K[0-9]+\.[0-9]+\.[0-9]+' | sort -V | tail -20 || echo "") + +if [ -z "$TAGS" ]; then + echo -e "${YELLOW}Could not query Redox tags. Is the network available?${NC}" + echo "URL: $REDOX_URL" + exit 0 +fi + +echo "Redox releases available:" +echo "$TAGS" | while read -r tag; do + marker="" + if [ "$tag" = "0.9.0" ]; then + marker=" (current upstream stable)" + fi + echo " $tag$marker" +done + +echo "" +echo "To evaluate a release:" +echo " ./local/scripts/provision-release.sh --ref= --release=0.2.0 --dry-run" +echo "" +echo "To rebase on a release:" +echo " ./local/scripts/provision-release.sh --ref= --release=0.2.0" diff --git a/local/scripts/generate-manifest.py b/local/scripts/generate-manifest.py new file mode 100755 index 00000000..a406100d --- /dev/null +++ b/local/scripts/generate-manifest.py @@ -0,0 +1,436 @@ +#!/usr/bin/env python3 +"""Generate an authoritative Red Bear OS release manifest as JSON.""" + +from __future__ import annotations + +import argparse +import json +import os +from pathlib import Path +import re +import shutil +import subprocess +import sys +import tarfile +import tomllib + + +PROJECT_ROOT = Path(__file__).resolve().parents[2] +RECIPES_DIR = PROJECT_ROOT / "recipes" +LOCAL_RECIPES_DIR = PROJECT_ROOT / "local" / "recipes" +ARCHIVES_DIR = PROJECT_ROOT / "sources" / "x86_64-unknown-redox" +HASH_TOOL = shutil.which("b3sum") + +TAR_VERSION_PATTERNS = ( + re.compile(r"/archive/v?(\d+\.\d+(?:\.\d+)?)/"), + re.compile(r"(?:^|[/-])v?(\d+\.\d+(?:\.\d+)?)(?=\.tar(?:\.[^./]+)+(?:/download)?$)"), +) +HEX_REV_RE = re.compile(r"[0-9a-fA-F]{7,}") +SAFE_VERSION_RE = re.compile(r"[^A-Za-z0-9._-]+") + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Generate authoritative manifest.json content for a Red Bear OS release." + ) + parser.add_argument("--release", required=True, help="Release version to record in the manifest") + parser.add_argument("--staging", action="store_true", help="Look for archives in staging directory") + return parser.parse_args() + + +def main() -> int: + args = parse_args() + global ARCHIVES_DIR + if args.staging: + ARCHIVES_DIR = PROJECT_ROOT / "sources" / ".staging" / f"redbear-{args.release}" / "tarballs" + else: + ARCHIVES_DIR = PROJECT_ROOT / "sources" / "redbear-{args.release}" / "tarballs" + # Fallback to shared pool if release dir has no tarballs yet + if not list(ARCHIVES_DIR.glob("*.tar.gz")): + ARCHIVES_DIR = PROJECT_ROOT / "sources" / "x86_64-unknown-redox" + args = parse_args() + recipe_files = collect_recipe_files() + entries = {} + + for relative_recipe_path, recipe_file in recipe_files.items(): + entries[relative_recipe_path] = build_entry(relative_recipe_path, recipe_file, recipe_files) + + manifest = { + "release": args.release, + "build_system_rev": resolve_build_system_rev(), + "entries": {key: entries[key] for key in sorted(entries)}, + } + + json.dump(manifest, sys.stdout, indent=2) + sys.stdout.write("\n") + return 0 + + +def collect_recipe_files() -> dict[str, Path]: + recipe_files: dict[str, Path] = {} + + for root in (LOCAL_RECIPES_DIR, RECIPES_DIR): + if not root.is_dir(): + continue + + for dirpath, dirnames, filenames in os.walk(root, followlinks=False): + dirnames[:] = sorted( + name for name in dirnames if name not in {"source", "target", ".git", "__pycache__"} + ) + + if "recipe.toml" not in filenames: + continue + + recipe_file = Path(dirpath) / "recipe.toml" + if not recipe_file.is_file(): + continue + + relative_recipe_path = recipe_file.relative_to(root).parent.as_posix() + recipe_files.setdefault(relative_recipe_path, recipe_file) + + return recipe_files + + +def build_entry( + relative_recipe_path: str, recipe_file: Path, recipe_files: dict[str, Path] +) -> dict[str, object]: + recipe_dir = recipe_file.parent + recipe_data = load_recipe_metadata(recipe_file) + source_data = recipe_data.get("source") if isinstance(recipe_data, dict) else None + source = source_data if isinstance(source_data, dict) else {} + recipe_type = classify_recipe(source) + + entry: dict[str, object] = { + "type": recipe_type, + "restore_to": f"recipes/{relative_recipe_path}/source", + } + + if recipe_type != "meta": + archive_name = expected_archive_name( + relative_recipe_path, + recipe_type, + source, + recipe_dir, + recipe_files, + ) + archive_name = resolve_archive_name(relative_recipe_path, archive_name) + archive_path = ARCHIVES_DIR / archive_name + + entry["archive"] = archive_name + entry["blake3"] = blake3_file(archive_path) if archive_path.is_file() else None + + if recipe_type == "git": + rev = get_git_rev(source, recipe_dir) + entry["git_url"] = source.get("git") + entry["rev"] = rev + elif recipe_type == "tar": + entry["tar_url"] = source.get("tar") + source_blake3 = source.get("blake3") or source.get("b3sum") + if source_blake3: + entry["source_blake3"] = source_blake3 + elif recipe_type == "path": + path_value = source.get("path") + entry["path"] = path_value + source_path = resolve_source_path(recipe_dir, path_value) + if source_path and source_path.exists(): + entry["tree_blake3"] = blake3_tree(source_path) + elif recipe_type == "same_as": + entry["target"] = normalize_recipe_reference(recipe_dir, str(source.get("same_as", ""))) + elif recipe_type == "meta": + entry["meta"] = "no_source" + + return entry + + +def load_recipe_metadata(path: Path) -> dict[str, object]: + text = path.read_text(encoding="utf-8") + + try: + data = tomllib.loads(text) + except tomllib.TOMLDecodeError: + return {"source": parse_source_block(text)} + + return data if isinstance(data, dict) else {} + + +def parse_source_block(text: str) -> dict[str, object]: + source: dict[str, object] = {} + in_source = False + + for raw_line in text.splitlines(): + stripped = raw_line.strip() + + if stripped.startswith("[") and stripped.endswith("]"): + if stripped == "[source]": + in_source = True + continue + + if in_source: + break + + continue + + if not in_source or not stripped or stripped.startswith("#") or "=" not in raw_line: + continue + + key, value = raw_line.split("=", 1) + key = key.strip() + value = value.split("#", 1)[0].strip() + if not key or not value: + continue + + try: + source[key] = tomllib.loads(f"value = {value}")["value"] + except tomllib.TOMLDecodeError: + continue + + return source + + +def classify_recipe(source: dict[str, object]) -> str: + if source.get("git"): + return "git" + if source.get("tar"): + return "tar" + if source.get("path"): + return "path" + if source.get("same_as"): + return "same_as" + return "meta" + + +def expected_archive_name( + relative_recipe_path: str, + recipe_type: str, + source: dict[str, object], + recipe_dir: Path, + recipe_files: dict[str, Path], +) -> str: + path = Path(relative_recipe_path) + pkg_name = path.name + category = path.parent.name if path.parent.as_posix() != "." else "root" + version = derive_archive_version( + relative_recipe_path, + recipe_type, + source, + recipe_dir, + recipe_files, + {relative_recipe_path}, + ) + return f"{category}-{pkg_name}-v{version}-patched.tar.gz" + + +def derive_archive_version( + relative_recipe_path: str, + recipe_type: str, + source: dict[str, object], + recipe_dir: Path, + recipe_files: dict[str, Path], + seen: set[str], +) -> str: + if recipe_type == "tar": + tar_url = str(source.get("tar", "")) + version = extract_tar_version(tar_url) + if version: + return version + + if recipe_type == "git": + rev = get_git_rev(source, recipe_dir) + if isinstance(rev, str) and rev: + if HEX_REV_RE.fullmatch(rev): + return rev[:7] + return sanitize_version(rev) + + if recipe_type == "same_as": + target = normalize_recipe_reference(recipe_dir, str(source.get("same_as", ""))) + if target and target not in seen: + target_file = recipe_files.get(target) + if target_file is not None: + target_data = load_recipe_metadata(target_file) + target_source_data = target_data.get("source") if isinstance(target_data, dict) else None + target_source = target_source_data if isinstance(target_source_data, dict) else {} + target_type = classify_recipe(target_source) + return derive_archive_version( + target, + target_type, + target_source, + target_file.parent, + recipe_files, + seen | {target}, + ) + + return "unknown" + + +def resolve_archive_name(relative_recipe_path: str, archive_name: str) -> str: + archive_path = ARCHIVES_DIR / archive_name + if archive_path.is_file(): + return archive_name + + recipe_path = Path(relative_recipe_path) + category = recipe_path.parent.name if recipe_path.parent.as_posix() != "." else "root" + pkg_name = recipe_path.name + matches = sorted(ARCHIVES_DIR.glob(f"{category}-{pkg_name}-v*-patched.tar.gz")) + if len(matches) == 1: + return matches[0].name + + return archive_name + + +def extract_tar_version(tar_url: str) -> str | None: + for pattern in TAR_VERSION_PATTERNS: + match = pattern.search(tar_url) + if match: + return match.group(1) + return None + + +def get_git_rev(source: dict[str, object], recipe_dir: Path) -> str | None: + rev = source.get("rev") + if isinstance(rev, str) and rev.strip(): + return rev.strip() + return resolve_git_head(recipe_dir / "source") + + +def resolve_git_head(repo_dir: Path) -> str | None: + git_dir = repo_dir / ".git" + if not git_dir.exists(): + return None + + result = subprocess.run( + ["git", "-C", str(repo_dir), "rev-parse", "--short", "HEAD"], + capture_output=True, + text=True, + check=False, + ) + if result.returncode != 0: + return None + + head = result.stdout.strip() + return head or None + + +def resolve_build_system_rev() -> str | None: + result = subprocess.run( + ["git", "-C", str(PROJECT_ROOT), "rev-parse", "--short=9", "HEAD"], + capture_output=True, + text=True, + check=False, + ) + if result.returncode != 0: + return None + value = result.stdout.strip() + return value or None + + +def resolve_source_path(recipe_dir: Path, raw_path: object) -> Path | None: + if not isinstance(raw_path, str) or not raw_path: + return None + + path = Path(raw_path) + candidate = path if path.is_absolute() else recipe_dir / path + + try: + resolved = candidate.resolve(strict=True) + except FileNotFoundError: + return None + + try: + resolved.relative_to(PROJECT_ROOT.resolve()) + except ValueError: + return None + + return resolved + + +def normalize_recipe_reference(recipe_dir: Path, raw_reference: str) -> str: + if not raw_reference: + return raw_reference + + candidate = (recipe_dir / raw_reference).resolve(strict=False) + for root in (RECIPES_DIR, LOCAL_RECIPES_DIR): + try: + return candidate.relative_to(root).as_posix() + except ValueError: + continue + + return raw_reference + + +def sanitize_version(value: str) -> str: + cleaned = SAFE_VERSION_RE.sub("-", value).strip("-.") + return cleaned or "unknown" + + +def require_hash_tool() -> str: + if HASH_TOOL: + return HASH_TOOL + raise RuntimeError("b3sum is required to compute BLAKE3 hashes") + + +def blake3_file(path: Path) -> str: + result = subprocess.run( + [require_hash_tool(), "--no-names", str(path)], + capture_output=True, + text=True, + check=False, + ) + if result.returncode != 0: + stderr = result.stderr.strip() or f"failed to hash {path}" + raise RuntimeError(stderr) + return result.stdout.strip().split()[0] + + +def blake3_tree(root: Path) -> str: + process = subprocess.Popen( + [require_hash_tool(), "--no-names"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + try: + assert process.stdin is not None + with tarfile.open(fileobj=process.stdin, mode="w|") as tar: + for entry in iter_tree_entries(root): + arcname = entry.relative_to(root).as_posix() + tar_info = tar.gettarinfo(str(entry), arcname=arcname) + tar_info.uid = 0 + tar_info.gid = 0 + tar_info.uname = "" + tar_info.gname = "" + tar_info.mtime = 0 + + if tar_info.isreg(): + with entry.open("rb") as handle: + tar.addfile(tar_info, handle) + else: + tar.addfile(tar_info) + finally: + if process.stdin and not process.stdin.closed: + process.stdin.close() + + stdout, stderr = process.communicate() + if process.returncode != 0: + message = stderr.decode().strip() or f"failed to hash tree {root}" + raise RuntimeError(message) + return stdout.decode().strip().split()[0] + + +def iter_tree_entries(root: Path) -> list[Path]: + entries: list[Path] = [] + + def walk(directory: Path) -> None: + children = sorted(directory.iterdir(), key=lambda path: path.name) + for child in children: + entries.append(child) + if child.is_dir() and not child.is_symlink(): + walk(child) + + if root.exists() and root.is_dir(): + walk(root) + return entries + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/local/scripts/provision-release.sh b/local/scripts/provision-release.sh new file mode 100755 index 00000000..e8f8a5b7 --- /dev/null +++ b/local/scripts/provision-release.sh @@ -0,0 +1,247 @@ +#!/usr/bin/env bash +# provision-release.sh — Seal current build tree as a new Red Bear OS release (atomic). +# +# Usage: +# ./local/scripts/provision-release.sh --release=0.2.0 [--ref=] [--dry-run] +# +# Provisions a self-contained, immutable release archive via staging + atomic mv. +# All 7 completeness gates must pass before .complete sentry is written. +# On failure, staging directory is cleaned up automatically. +# +# Requires explicit --release. Never runs automatically. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +REF="" +RELEASE="" +DRY_RUN=0 + +usage() { + cat < [--ref=] [--dry-run] + +Seal current source tree as a new Red Bear OS release (atomic provisioning). + +Options: + --release= Red Bear OS release version (e.g., 0.2.0) — REQUIRED + --ref= Optional Redox OS ref for provenance tracking + --dry-run Preview only — no filesystem changes + -h, --help Show this help +EOF +} + +while [ $# -gt 0 ]; do + case "$1" in + --ref=*) REF="${1#*=}" ;; + --release=*) RELEASE="${1#*=}" ;; + --dry-run) DRY_RUN=1 ;; + -h|--help) usage; exit 0 ;; + *) echo "Unknown: $1"; usage >&2; exit 1 ;; + esac + shift +done + +if [ -z "$RELEASE" ]; then + echo "ERROR: --release is required" >&2 + usage >&2 + exit 1 +fi + +cd "$PROJECT_ROOT" + +RED='\033[1;31m' +GREEN='\033[1;32m' +YELLOW='\033[1;33m' +BLUE='\033[1;34m' +NC='\033[0m' + +status() { echo -e "${GREEN}==>${NC} $*"; } +warn() { echo -e "${YELLOW}WARN${NC}: $*"; } +err() { echo -e "${RED}ERROR${NC}: $*" >&2; } +info() { echo -e "${BLUE} ${NC} $*"; } + +STAGING="sources/.staging/redbear-${RELEASE}" +FINAL="sources/redbear-${RELEASE}" + +cleanup_staging() { + if [ -d "$STAGING" ]; then + warn "Cleaning up staging directory..." + rm -rf "$STAGING" + fi +} +trap cleanup_staging EXIT + +# ── Step 1: Verify current release is archived ────────────────────── +status "Step 1: Verifying current release..." +CURRENT_RELEASE="${REDBEAR_RELEASE:-0.1.0}" +CURRENT_ARCHIVE="sources/redbear-$CURRENT_RELEASE" + +if [ ! -f "$CURRENT_ARCHIVE/.complete" ] && [ ! -f "$CURRENT_ARCHIVE/manifest.txt" ]; then + warn "Current release $CURRENT_RELEASE has no .complete sentry or manifest" + warn "It may not be fully archived. Continue anyway? (y/N)" + if [ "$DRY_RUN" -eq 0 ]; then + read -r confirm + [ "$confirm" = "y" ] || [ "$confirm" = "Y" ] || exit 1 + fi +fi +info "Current release: $CURRENT_RELEASE" + +# ── Step 2: Ref validation (optional) ─────────────────────────────── +if [ -n "$REF" ]; then + status "Step 2: Validating ref=$REF..." + if [ "$DRY_RUN" -eq 1 ]; then + info "[dry-run] Would validate ref $REF" + else + REDOX_URL="https://gitlab.redox-os.org/redox-os/redox.git" + if timeout 10 git ls-remote --tags "$REDOX_URL" "$REF" 2>/dev/null | grep -q "$REF"; then + info "Ref $REF exists in Redox repository" + elif timeout 10 git ls-remote --tags "$REDOX_URL" 2>/dev/null | grep -q .; then + err "Ref $REF not found" + exit 1 + else + warn "Cannot reach Redox repository — ref recorded as stated provenance" + fi + fi +fi + +# ── Step 3: Staging safety check ──────────────────────────────────── +status "Step 3: Checking staging..." +if [ -d "$STAGING" ]; then + err "Staging directory already exists: $STAGING" + err "This may be from a previous failed provisioning run." + err "Remove it first: rm -rf $STAGING" + [ "$DRY_RUN" -eq 1 ] || exit 1 +fi +if [ -d "$FINAL" ]; then + err "Release already exists: $FINAL" + err "Releases are immutable. Choose a different --release version." + [ "$DRY_RUN" -eq 1 ] || exit 1 +fi +info "Staging path is clear" + +# ── Step 4: Archive sources ───────────────────────────────────────── +status "Step 4: Archiving sources..." +if [ "$DRY_RUN" -eq 1 ]; then + info "[dry-run] Would run: archive-sources.sh --release=$RELEASE --all" +else + mkdir -p "$STAGING"/{tarballs,snapshots,configs} + if [ -f "$SCRIPT_DIR/archive-sources.sh" ]; then + bash "$SCRIPT_DIR/archive-sources.sh" --release="$RELEASE" --all + info "Sources archived" + else + err "archive-sources.sh not found" + exit 1 + fi +fi + +# ── Step 5: Archive configs ───────────────────────────────────────── +status "Step 5: Archiving configs..." +if [ "$DRY_RUN" -eq 1 ]; then + info "[dry-run] Would copy configs" +else + cp config/redbear-*.toml config/base.toml config/minimal.toml "$STAGING/configs/" 2>/dev/null || true + cp .config "$STAGING/configs/" 2>/dev/null || true + info "Configs: $(ls "$STAGING/configs"/*.toml 2>/dev/null | wc -l) files" +fi + +# ── Step 6: Archive patches ───────────────────────────────────────── +status "Step 6: Archiving patches..." +if [ "$DRY_RUN" -eq 1 ]; then + info "[dry-run] Would archive patches" +else + if [ -d "local/patches" ]; then + (cd local && tar czf "$PROJECT_ROOT/$STAGING/patches.tar.gz" patches/) + info "Patches archived: patches.tar.gz" + fi +fi + +# ── Step 7: Generate manifest ─────────────────────────────────────── +status "Step 7: Generating manifest..." +if [ "$DRY_RUN" -eq 1 ]; then + info "[dry-run] Would generate manifest.json" +else + if [ -f "$SCRIPT_DIR/generate-manifest.py" ]; then + python3 "$SCRIPT_DIR/generate-manifest.py" --release="$RELEASE" --staging > "$STAGING/manifest.json" || { + err "Manifest generation failed" + exit 1 + } + info "Manifest: $(python3 -c "import json; d=json.load(open('$STAGING/manifest.json')); print(len(d.get('entries',{})))" 2>/dev/null || echo "?") entries" + else + err "generate-manifest.py not found" + exit 1 + fi +fi + +# ── Step 8: Generate BLAKE3SUMS ───────────────────────────────────── +status "Step 8: Generating checksums..." +if [ "$DRY_RUN" -eq 1 ]; then + info "[dry-run] Would generate BLAKE3SUMS and PAYLOAD.blake3" +else + if [ -d "$STAGING/tarballs" ] && ls "$STAGING/tarballs"/*.tar.gz >/dev/null 2>&1; then + (cd "$STAGING/tarballs" && b3sum *.tar.gz) > "$STAGING/BLAKE3SUMS" + info "BLAKE3SUMS: $(wc -l < "$STAGING/BLAKE3SUMS") entries" + fi + if [ -d "$STAGING/snapshots" ] && ls "$STAGING/snapshots"/*.tar.gz >/dev/null 2>&1; then + (cd "$STAGING/snapshots" && b3sum *.tar.gz) >> "$STAGING/BLAKE3SUMS" + fi + # Generate whole-payload hash + (cd "$STAGING" && find . -type f ! -name PAYLOAD.blake3 ! -name .complete -print0 | sort -z | xargs -0 b3sum) > "$STAGING/PAYLOAD.blake3" 2>/dev/null || true +fi + +# ── Step 9: Completeness gates ────────────────────────────────────── +status "Step 9: Running completeness gates..." +if [ "$DRY_RUN" -eq 1 ]; then + info "[dry-run] Would run verify-release-completeness.sh" +else + if [ -f "$SCRIPT_DIR/verify-release-completeness.sh" ]; then + if bash "$SCRIPT_DIR/verify-release-completeness.sh" --release="$RELEASE" --staging; then + info "All completeness gates PASSED" + else + err "Completeness gates FAILED" + exit 1 + fi + else + warn "verify-release-completeness.sh not found — skipping gate checks" + fi +fi + +# ── Step 10: Seal and deploy ──────────────────────────────────────── +status "Step 10: Sealing release..." +if [ "$DRY_RUN" -eq 1 ]; then + info "[dry-run] Would write .complete sentry and move to $FINAL" +else + echo "$(date -u +%Y-%m-%dT%H:%M:%SZ) — Release $RELEASE" > "$STAGING/.complete" + if [ -d "$FINAL" ]; then + err "Release directory already exists: $FINAL" + err "Releases are immutable. Choose a different --release version." + exit 1 + fi + mv "$STAGING" "$FINAL" +fi + +# ── Report ────────────────────────────────────────────────────────── +echo "" +echo -e "${GREEN}=========================================${NC}" +if [ "$DRY_RUN" -eq 0 ]; then + echo -e "${GREEN} Release $RELEASE provisioned${NC}" +else + echo -e "${GREEN} Dry-run complete — no changes made${NC}" +fi +echo -e "${GREEN}=========================================${NC}" +echo "" + +if [ "$DRY_RUN" -eq 0 ]; then + echo "Archive: $FINAL/" + echo " tarballs/: $(ls "$FINAL/tarballs" 2>/dev/null | wc -l) archives" + echo " configs/: $(ls "$FINAL/configs" 2>/dev/null | wc -l) files" + echo " .complete: $(cat "$FINAL/.complete")" + echo "" + echo "To verify: ./local/scripts/verify-sources-archived.sh --release=$RELEASE" + echo "" + echo "To switch: edit .config → REDBEAR_RELEASE?=$RELEASE" +fi + +# Prevent trap cleanup on success +trap - EXIT diff --git a/local/scripts/restore-sources.sh b/local/scripts/restore-sources.sh new file mode 100755 index 00000000..c85c33f8 --- /dev/null +++ b/local/scripts/restore-sources.sh @@ -0,0 +1,182 @@ +#!/usr/bin/env bash +# restore-sources.sh — Extract patched source archives back to recipe directories. +# +# Usage: +# ./local/scripts/restore-sources.sh --release=0.1.0 [recipe ...] +# +# Reads sources/redbear-/manifest.txt to find archives. +# Extracts each archive to recipes///source/. +# Skips extraction if source/ already exists and has matching rev. + +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +RELEASE="" +RECIPES=() + +usage() { + cat < [recipe ...] + +Restore recipe sources from release archives. + +Options: + --release= Release version (e.g., 0.1.0) + --force Overwrite existing source directories + -h, --help Show this help + +If no recipes specified, restores ALL recipes in the manifest. +EOF +} + +FORCE=0 +while [ $# -gt 0 ]; do + case "$1" in + --release=*) RELEASE="${1#*=}" ;; + --force) FORCE=1 ;; + -h|--help) usage; exit 0 ;; + *) RECIPES+=("$1") ;; + esac + shift +done + +if [ -z "$RELEASE" ]; then + echo "ERROR: --release is required" >&2 + usage >&2 + exit 1 +fi + +ARCHIVE_DIR="$PROJECT_ROOT/sources/redbear-$RELEASE" +MANIFEST="$ARCHIVE_DIR/manifest.txt" + +if [ ! -f "$MANIFEST" ]; then + echo "ERROR: Release manifest not found: $MANIFEST" >&2 + echo "Run: ./local/scripts/provision-release.sh --release=$RELEASE" >&2 + exit 1 +fi + +cd "$PROJECT_ROOT" + +GREEN='\033[1;32m' +YELLOW='\033[1;33m' +RED='\033[1;31m' +NC='\033[0m' + +status() { echo -e "${GREEN}==>${NC} $*"; } +warn() { echo -e "${YELLOW}WARN${NC}: $*"; } +err() { echo -e "${RED}ERROR${NC}: $*" >&2; } + +restored=0 +skipped=0 +failed=0 + +# Read manifest and restore each recipe +while IFS= read -r line; do + [[ "$line" =~ ^# ]] && continue + [[ -z "$line" ]] && continue + + # Parse: category/name type=... key=value ... + pkg_path=$(echo "$line" | awk '{print $1}') + pkg_type=$(echo "$line" | awk '{print $2}' | cut -d= -f1) + + # If specific recipes requested, filter + if [ ${#RECIPES[@]} -gt 0 ]; then + match=0 + for r in "${RECIPES[@]}"; do + [[ "$pkg_path" == "$r" ]] && match=1 + done + [ "$match" -eq 0 ] && continue + fi + + source_dir="$PROJECT_ROOT/recipes/$pkg_path/source" + + # Skip if source exists and not forced + if [ -d "$source_dir" ] && [ "$FORCE" -eq 0 ]; then + warn "source exists: recipes/$pkg_path/source/ (use --force to overwrite)" + skipped=$((skipped + 1)) + continue + fi + + # Exact archive lookup in release tarballs directory + archive_name="" + if [ -f "$ARCHIVE_DIR/manifest.json" ]; then + archive_name=$(python3 -c " +import json, sys +with open('$ARCHIVE_DIR/manifest.json') as f: + data = json.load(f) +entry = data.get('entries', {}).get('$pkg_path', {}) +if entry.get('type') == 'same_as': + target = entry.get('target', '') + target_entry = data.get('entries', {}).get(target, {}) + print(target_entry.get('archive', target_entry.get('snapshot', ''))) +elif entry.get('type') == 'path': + print('__LOCAL_PATH__') +else: + print(entry.get('archive', '')) +" 2>/dev/null) + fi + + if [ -n "$archive_name" ]; then + if [ "$archive_name" = "__LOCAL_PATH__" ]; then + warn "local path source (no archive): $pkg_path" + skipped=$((skipped + 1)) + continue + fi + archive="$ARCHIVE_DIR/tarballs/$archive_name" + if [ ! -f "$archive" ]; then + archive="$ARCHIVE_DIR/snapshots/$archive_name" + fi + fi + + # Fallback: try glob pattern in release tarballs dir + if [ -z "$archive" ] || [ ! -f "$archive" ]; then + cat_name=$(dirname "$pkg_path") + pkg_name=$(basename "$pkg_path") + shopt -s nullglob + for f in "$ARCHIVE_DIR/tarballs/${cat_name}-${pkg_name}-"*.tar.gz; do + [ -f "$f" ] || continue + archive="$f" + break + done + shopt -u nullglob + fi + + if [ -z "$archive" ]; then + err "no archive found for $pkg_path in $ARCHIVE_DIR/tarballs/" + failed=$((failed + 1)) + continue + fi + + # Extract with format auto-detection + mkdir -p "$(dirname "$source_dir")" + rm -rf "$source_dir" + status "restoring: $pkg_path" + first_entry=$(tar tf "$archive" 2>/dev/null | head -1) + case "$first_entry" in + source/*) + tar xzf "$archive" -C "$source_dir/.." 2>/dev/null ;; + */source/*) + tar xzf "$archive" -C "$(dirname "$(dirname "$source_dir")")" 2>/dev/null ;; + *) + tar xzf "$archive" -C "$(dirname "$source_dir")" 2>/dev/null ;; + esac + + # Verify extraction + if [ -d "$source_dir" ]; then + restored=$((restored + 1)) + else + err "extraction failed: $pkg_path (archive: $archive)" + failed=$((failed + 1)) + fi +done < "$MANIFEST" + +echo "" +echo "=========================================" +echo " Restore complete" +echo " Restored: $restored" +echo " Skipped: $skipped" +echo " Failed: $failed" +echo "=========================================" + +[ "$failed" -eq 0 ] || exit 1 diff --git a/local/scripts/sync-upstream.sh b/local/scripts/sync-upstream.sh index 09558080..9869f7ea 100755 --- a/local/scripts/sync-upstream.sh +++ b/local/scripts/sync-upstream.sh @@ -1,284 +1,38 @@ #!/usr/bin/env bash -# sync-upstream.sh — Update from upstream Redox and reapply Red Bear OS overlays. +# sync-upstream.sh — RETIRED. Red Bear OS is now a release-based fork. # -# Usage: -# ./local/scripts/sync-upstream.sh # Rebase onto upstream master -# ./local/scripts/sync-upstream.sh --dry-run # Preview what would change -# ./local/scripts/sync-upstream.sh --no-merge # Only fetch + check for conflicts +# This script no longer performs upstream synchronization. +# Red Bear OS sources are frozen at the current baseline (0.1.0). +# Sources are immutable — never auto-refreshed from upstream. # -# Strategy: git rebase (preserves Red Bear OS commits, replays on new upstream). -# Fallback: if rebase fails, patches in local/patches/build-system/ can be -# applied from scratch via: ./local/scripts/apply-patches.sh --force +# To check for newer Redox OS snapshots: +# ./local/scripts/check-upstream-releases.sh # -# IMPORTANT: upstream WIP recipes are not treated as durable shipping inputs by Red Bear. -# After upstream sync, Red Bear-owned WIP work still needs to come from local/recipes/ and -# local/patches/, not from trust in recipes/wip/ alone. +# To provision a new release from a Redox ref: +# ./local/scripts/provision-release.sh --ref= --release=0.2.0 +# +# To restore archived sources: +# ./local/scripts/restore-sources.sh --release=0.1.0 +# +# Documentation: +# local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md set -euo pipefail -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" -UPSTREAM_URL="${UPSTREAM_URL:-https://github.com/redox-os/redox.git}" -UPSTREAM_REMOTE="upstream-redox" -UPSTREAM_BRANCH="${UPSTREAM_BRANCH:-master}" -DRY_RUN=0 -NO_MERGE=0 -FORCE=0 - -usage() { - echo "Usage: $0 [--dry-run] [--no-merge] [--force]" - echo " --dry-run Show what would happen without making changes" - echo " --no-merge Only fetch and check patch conflicts" - echo " --force Skip safety checks (uncommitted local/ changes)" -} - -for arg in "$@"; do - case "$arg" in - --dry-run) DRY_RUN=1 ;; - --no-merge) NO_MERGE=1 ;; - --force) FORCE=1 ;; - --help|-h) - usage - exit 0 - ;; - *) - echo "Unknown argument: $arg" - usage >&2 - exit 1 - ;; - esac -done - -cd "$REPO_ROOT" - -# ── 1. Ensure upstream remote ─────────────────────────────────────── -if ! git remote get-url "$UPSTREAM_REMOTE" &>/dev/null; then - echo "==> Adding upstream remote: $UPSTREAM_URL" - [ "$DRY_RUN" = "0" ] && git remote add "$UPSTREAM_REMOTE" "$UPSTREAM_URL" -fi - -echo "==> Fetching $UPSTREAM_REMOTE/$UPSTREAM_BRANCH..." -[ "$DRY_RUN" = "0" ] && git fetch "$UPSTREAM_REMOTE" "$UPSTREAM_BRANCH" - -UPSTREAM_REF="${UPSTREAM_REMOTE}/${UPSTREAM_BRANCH}" - -# ── 2. Check patch conflicts with upstream changes ────────────────── -MERGE_BASE=$(git merge-base HEAD "$UPSTREAM_REF" 2>/dev/null || echo "") -if [ -n "$MERGE_BASE" ]; then - CHANGED_FILES=$(git diff --name-only "$MERGE_BASE" "$UPSTREAM_REF" 2>/dev/null || true) - CHANGE_COUNT=$(echo "$CHANGED_FILES" | grep -c . 2>/dev/null || echo "0") - echo " $CHANGE_COUNT files changed upstream since common ancestor" - - if [ -n "$CHANGED_FILES" ] && [ -d local/patches ]; then - echo "" - echo "==> Checking patch conflict risks..." - for patch_file in local/patches/build-system/[0-9]*.patch; do - [ -f "$patch_file" ] || continue - PATCH_NAME=$(basename "$patch_file") - PATCHED_FILES=$(grep '^--- a/' "$patch_file" 2>/dev/null | sed 's|^--- a/||' | sort -u || true) - for pf in $PATCHED_FILES; do - if echo "$CHANGED_FILES" | grep -q "$pf" 2>/dev/null; then - echo " ⚠ CONFLICT RISK: $PATCH_NAME modifies $pf (also changed upstream)" - fi - done - done - - for patch_dir in local/patches/kernel local/patches/base; do - [ -f "$patch_dir/redox.patch" ] || continue - echo " ℹ $patch_dir/redox.patch — check manually if kernel/base changed upstream" - done - fi -else - echo " WARNING: Could not find common ancestor with upstream" -fi - -# ── 3. Summary ───────────────────────────────────────────────────── -AHEAD=$(git rev-list --count "$UPSTREAM_REF..HEAD" 2>/dev/null || echo "?") -BEHIND=$(git rev-list --count "HEAD..$UPSTREAM_REF" 2>/dev/null || echo "?") -echo "" -echo "=== Sync Summary ===" -echo "Upstream: $UPSTREAM_REF" -echo "Local: HEAD ($(git rev-parse --short HEAD))" -echo "Ahead: $AHEAD Red Bear OS commits" -echo "Behind: $BEHIND upstream commits" - -if [ "$NO_MERGE" = 1 ]; then - echo "" - echo "To merge manually:" - echo " git rebase $UPSTREAM_REF" - exit 0 -fi - -if [ "$DRY_RUN" = "1" ]; then - echo "" - echo " [dry-run] Would rebase onto $UPSTREAM_REF" - exit 0 -fi - -# ── 3.5. Check for uncommitted local/ changes ────────────────────── -if [ "$NO_MERGE" = "0" ] && [ "$DRY_RUN" = "0" ]; then - LOCAL_CHANGES="" - LOCAL_UNTRACKED="" - if [ -d "local" ]; then - LOCAL_CHANGES=$(cd local && git diff --name-only HEAD 2>/dev/null || true) - LOCAL_UNTRACKED=$(cd local && git ls-files --others --exclude-standard 2>/dev/null || true) - fi - - # Also check for uncommitted changes to tracked local/ files from repo root - ROOT_LOCAL_CHANGES=$(git diff --name-only HEAD -- local/ 2>/dev/null || true) - - if [ -n "$LOCAL_CHANGES" ] || [ -n "$LOCAL_UNTRACKED" ] || [ -n "$ROOT_LOCAL_CHANGES" ]; then - echo "" - echo "!! WARNING: Uncommitted changes detected in local/" - if [ -n "$ROOT_LOCAL_CHANGES" ]; then - echo " Modified files:" - echo "$ROOT_LOCAL_CHANGES" | head -10 | while read -r f; do echo " $f"; done - TOTAL=$(echo "$ROOT_LOCAL_CHANGES" | grep -c .) - [ "$TOTAL" -gt 10 ] && echo " ... and $((TOTAL - 10)) more" - fi - if [ -n "$LOCAL_UNTRACKED" ]; then - echo " Untracked files (NOT protected by stash):" - echo "$LOCAL_UNTRACKED" | head -5 | while read -r f; do echo " $f"; done - TOTAL=$(echo "$LOCAL_UNTRACKED" | grep -c .) - [ "$TOTAL" -gt 5 ] && echo " ... and $((TOTAL - 5)) more" - fi - echo "" - echo " git stash does NOT protect untracked files." - echo " Commit your local/ changes before syncing, or use --force to proceed anyway." - - if [ "$FORCE" = "0" ]; then - echo "" - echo " ABORT: Uncommitted local/ changes detected." - echo " Commit your changes first: git add local/ && git commit -m 'WIP'" - echo " Or use --force if you understand the risks (untracked files will be LOST)." - exit 1 - else - # --force with untracked files requires explicit confirmation - if [ -n "$LOCAL_UNTRACKED" ]; then - echo "" - echo "!! DANGER: --force with untracked files will DELETE them permanently. !!" - echo " git stash does NOT protect untracked files." - echo " Untracked files found:" - echo "$LOCAL_UNTRACKED" | head -10 | while read -r f; do echo " $f"; done - TOTAL=$(echo "$LOCAL_UNTRACKED" | grep -c .) - [ "$TOTAL" -gt 10 ] && echo " ... and $((TOTAL - 10)) more" - echo "" - read -p " Type 'YES_DELETE' to confirm destruction of untracked local/ files: " CONFIRM - if [ "$CONFIRM" != "YES_DELETE" ]; then - echo " Aborted. Your untracked files are safe." - exit 1 - fi - echo " Proceeding with --force — untracked files WILL be deleted..." - else - echo " --force specified, proceeding (tracked changes will be stashed)..." - fi - fi - fi -fi - -# ── 4. Stash uncommitted changes ──────────────────────────────────── -STASHED=0 -if ! git diff --quiet 2>/dev/null || ! git diff --cached --quiet 2>/dev/null; then - echo "==> Stashing uncommitted changes..." - git stash push -u -m "redbear-sync-$(date +%Y%m%d-%H%M%S)" - STASHED=1 -fi - -PREV_HEAD=$(git rev-parse HEAD) - -# ── 4.5. Verify overlay integrity before rebase ──────────────────── -echo "==> Verifying Red Bear overlay integrity before rebase..." -BROKEN_SYMLINKS="" -while IFS= read -r link; do - if [ ! -e "$link" ]; then - BROKEN_SYMLINKS="$BROKEN_SYMLINKS - $link -> $(readlink "$link")" - fi -done < <(find recipes -maxdepth 3 -type l 2>/dev/null) - -if [ -n "$BROKEN_SYMLINKS" ]; then - echo "!! WARNING: Broken symlinks detected in recipes/:" - echo "$BROKEN_SYMLINKS" | head -20 - TOTAL=$(echo "$BROKEN_SYMLINKS" | grep -c .) - [ "$TOTAL" -gt 20 ] && echo " ... and $((TOTAL - 20)) more" - echo "" - echo " These symlinks may break further during rebase." - echo " Run ./local/scripts/apply-patches.sh after rebase to recreate them." -fi - -# Check that key local/patches exist -for patch_file in local/patches/kernel/redox.patch local/patches/base/redox.patch local/patches/relibc/redox.patch; do - if [ ! -f "$patch_file" ]; then - echo "!! CRITICAL: Missing patch file: $patch_file" - echo " Cannot recover from rebase failure without this patch." - if [ "$FORCE" = "0" ]; then - exit 1 - fi - fi -done - -# ── 5. Rebase ─────────────────────────────────────────────────────── -echo "" -echo "==> Rebasing Red Bear OS commits onto $UPSTREAM_REF..." -echo " (this replays our $AHEAD commits on top of updated upstream)" - -if git rebase "$UPSTREAM_REF"; then - echo "" - echo "==> Rebase successful." -else - echo "" - echo "!! Rebase conflict. Options:" - echo " 1. Resolve conflicts: edit files, git add, git rebase --continue" - echo " 2. Abort: git rebase --abort" - echo " 3. Nuclear option (DESTRUCTIVE — loses uncommitted work):" - echo " git rebase --abort" - echo " git reset --hard $UPSTREAM_REF" - echo " ./local/scripts/apply-patches.sh --force" - echo "" - echo " Patches for recovery: local/patches/build-system/" - echo " Previous HEAD: $PREV_HEAD" - echo "" - echo " IMPORTANT: Before using the nuclear option, ensure all local/ changes" - echo " are committed. The nuclear option does NOT preserve uncommitted work." - echo " To recover to previous state: git reset --hard $PREV_HEAD" - exit 1 -fi - -# ── 6. Restore stash ──────────────────────────────────────────────── -if [ "$STASHED" = 1 ]; then - echo "==> Restoring stashed changes..." - if git stash pop; then - echo " Stash restored successfully." - else - echo "!! Stash pop had conflicts." - echo " Your changes are preserved in the stash." - echo " Options:" - echo " 1. Resolve conflicts in the working tree" - echo " 2. git checkout --theirs . && git stash drop" - echo " 3. git reset --hard && git stash pop (try again on clean tree)" - echo " List stashes: git stash list" - fi -fi - -# ── 7. Verify symlinks ───────────────────────────────────────────── -echo "==> Verifying recipe patch symlinks..." -if [ -f local/scripts/apply-patches.sh ]; then - bash local/scripts/apply-patches.sh -else - echo " apply-patches.sh not found — verify symlinks manually" - ls -la recipes/core/kernel/redox.patch recipes/core/base/redox.patch -fi - -if [ -x local/scripts/verify-overlay-integrity.sh ]; then - echo "==> Verifying overlay integrity..." - local/scripts/verify-overlay-integrity.sh --repair -fi +GREEN='\033[1;32m' +BLUE='\033[1;34m' +NC='\033[0m' echo "" -echo "==> Sync complete." -echo "==> Guarding recipe durability..." -./local/scripts/guard-recipes.sh --restore 2>/dev/null || echo " (guard-recipes.sh not found — run manually)" -echo " Previous HEAD: $PREV_HEAD" -echo " New HEAD: $(git rev-parse HEAD)" +echo -e "${GREEN}sync-upstream.sh has been retired.${NC}" echo "" -echo "Next: make all CONFIG_NAME=redbear-full" +echo "Red Bear OS is now a release-based fork." +echo "Current baseline: 0.1.0 (f55acba68)" +echo "Sources are immutable — never auto-refreshed from upstream." +echo "" +echo -e "${BLUE}Available commands:${NC}" +echo " check-upstream-releases.sh See new Redox snapshots (read-only)" +echo " provision-release.sh Provision a new release" +echo " restore-sources.sh Restore sources from archives" +echo "" +exit 0 diff --git a/local/scripts/validate-source-trees.py b/local/scripts/validate-source-trees.py new file mode 100755 index 00000000..85f3e45f --- /dev/null +++ b/local/scripts/validate-source-trees.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +"""Validate that all source trees required by a build config exist.""" +import sys, tomllib +from pathlib import Path + +PROJECT_ROOT = Path(__file__).resolve().parents[2] +CONFIG = sys.argv[1] if len(sys.argv) > 1 else "redbear-full" + +def build_lookup(): + lookup = {} + for root in (Path("recipes"), Path("local/recipes")): + for rt in root.rglob("recipe.toml"): + parts = rt.parts + if "source" in parts or "target" in parts: + continue + pkg = rt.parent.name + if pkg not in lookup: + lookup[pkg] = rt.parent + return lookup + +def resolve_config(cp, visited=None): + if visited is None: visited = set() + cp = cp.resolve() + if cp in visited: return {} + visited.add(cp) + with open(cp, "rb") as f: c = tomllib.load(f) + pkgs = dict(c.get("packages", {})) + for inc in c.get("include", []): + ip = cp.parent / inc + if ip.exists(): + incd = resolve_config(ip, visited) + for k, v in pkgs.items(): incd[k] = v + pkgs = incd + return pkgs + +def main(): + config_path = Path("config") / f"{CONFIG}.toml" + if not config_path.exists(): + print(f"Config not found: {config_path}", file=sys.stderr) + return 1 + + lookup = build_lookup() + pkgs = resolve_config(config_path) + + print(f"=== Validating source trees for config: {CONFIG} ===") + missing = 0 + present = 0 + for pkg_name, pkg_conf in sorted(pkgs.items()): + if str(pkg_conf) == "ignore": continue + # Meta packages have no source requirement + if pkg_name in ("libgcc", "libstdcxx"): + continue + rd = lookup.get(pkg_name) + if not rd: + print(f" NOT FOUND: {pkg_name}") + missing += 1 + continue + src = rd / "source" + if src.is_dir() and any(src.iterdir()): + present += 1 + else: + print(f" MISSING: {str(rd)}") + missing += 1 + + print(f"\n Total (config): {present + missing}") + print(f" Present: {present}") + print(f" Missing: {missing}") + if missing: + print("\nTo restore: ./local/scripts/restore-sources.sh --release=0.1.0") + return 1 + print("All source trees present.") + return 0 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/local/scripts/validate-source-trees.sh b/local/scripts/validate-source-trees.sh new file mode 100755 index 00000000..7d7fac16 --- /dev/null +++ b/local/scripts/validate-source-trees.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# validate-source-trees.sh — Check all required source trees exist before building. +# Delegates to validate-source-trees.py for config parsing and validation. +set -eo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +CONFIG="${1:-redbear-full}" +cd "$PROJECT_ROOT" +exec python3 "$SCRIPT_DIR/validate-source-trees.py" "$CONFIG" diff --git a/local/scripts/verify-patches.sh b/local/scripts/verify-patches.sh new file mode 100755 index 00000000..734f2531 --- /dev/null +++ b/local/scripts/verify-patches.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash +# verify-patches.sh — Check which Red Bear patches need rebasing against current source trees. +# +# Usage: +# ./local/scripts/verify-patches.sh [--component=base|kernel|relibc] [--all] +# +# Dry-runs all patches against their target source trees and reports: +# OK — patch applies cleanly +# REV — reversed/already applied (upstream absorbed) +# CONFLICT — genuine conflict, needs rebasing +# +# Exit code: number of CONFLICT patches + +set -eo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +COMPONENT="${1:-all}" +MODE="${2:-}" + +cd "$PROJECT_ROOT" + +GREEN='\033[1;32m' +RED='\033[1;31m' +YELLOW='\033[1;33m' +NC='\033[0m' + +ok=0 +rev=0 +conflict=0 + +check_patches() { + local patch_dir="$1" + local target_dir="$2" + local label="$3" + + [ -d "$patch_dir" ] || return + [ -d "$target_dir" ] || { echo " ${RED}SKIP${NC} $label: target not found"; return; } + + echo "=== $label ===" + for patch in "$patch_dir"/*.patch; do + [ -f "$patch" ] || continue + local name=$(basename "$patch") + local result=$(patch -p1 --dry-run -d "$target_dir" < "$patch" 2>&1) || true + + if echo "$result" | grep -q 'Reversed\|previously applied'; then + echo " ${YELLOW}REV${NC} $name (upstream absorbed)" + rev=$((rev + 1)) + elif echo "$result" | grep -q 'FAILED\|hunks\? FAILED'; then + echo " ${RED}CONFLICT${NC} $name" + conflict=$((conflict + 1)) + else + echo " ${GREEN}OK${NC} $name" + ok=$((ok + 1)) + fi + done +} + +case "$COMPONENT" in + base|all) + check_patches "local/patches/base" "recipes/core/base/source" "base" + ;; +esac +case "$COMPONENT" in + kernel|all) + check_patches "local/patches/kernel" "recipes/core/kernel/source" "kernel" + # Fallback: kernel source may be nested from archive extraction + if [ ! -d "recipes/core/kernel/source" ] && [ -d "recipes/core/kernel/kernel/source" ]; then + check_patches "local/patches/kernel" "recipes/core/kernel/kernel/source" "kernel" + fi + ;; +esac +case "$COMPONENT" in + relibc|all) + check_patches "local/patches/relibc" "recipes/core/relibc/source" "relibc" + ;; +esac + +echo "" +echo "=========================================" +echo " OK: $ok" +echo " Reversed: $rev (upstream absorbed)" +echo " Conflict: $conflict (needs rebase)" +echo "=========================================" + +exit $conflict diff --git a/local/scripts/verify-release-completeness.sh b/local/scripts/verify-release-completeness.sh new file mode 100755 index 00000000..1d4a8549 --- /dev/null +++ b/local/scripts/verify-release-completeness.sh @@ -0,0 +1,771 @@ +#!/usr/bin/env bash +# verify-release-completeness.sh — Run 7 mechanical completeness gates. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +RELEASE="" +USE_STAGING=0 +FAIL_COUNT=0 + +declare -A ENTRY_PRESENT=() +declare -A ENTRY_FIELDS=() +declare -A CONFIG_VISITED=() +declare -A CONFIG_PACKAGES=() +declare -A RECIPE_CACHE=() +declare -A CLOSURE_RECIPE_KEYS=() +declare -a CONFIG_ORDER=() + +usage() { + cat < [--staging] + +Run the 7 mechanical completeness gates for a Red Bear release directory. + +Options: + --release= Release version (for example 0.2.0) + --staging Verify sources/.staging/redbear- + -h, --help Show this help +EOF +} + +pass_gate() { + printf 'PASS: %s — %s\n' "$1" "$2" +} + +fail_gate() { + FAIL_COUNT=$((FAIL_COUNT + 1)) + printf 'FAIL: %s — %s\n' "$1" "$2" >&2 +} + +trim() { + local value="$1" + value="${value#"${value%%[![:space:]]*}"}" + value="${value%"${value##*[![:space:]]}"}" + printf '%s' "$value" +} + +json_unquote() { + local value="$1" + if [[ "$value" == '"'*'"' ]]; then + value="${value:1:${#value}-2}" + fi + printf '%s' "$value" +} + +json_tokenize() { + local json_file="$1" + + awk ' + BEGIN { + in_string = 0 + escape = 0 + token = "" + } + { + line = $0 "\n" + for (i = 1; i <= length(line); i++) { + c = substr(line, i, 1) + if (in_string) { + token = token c + if (escape) { + escape = 0 + continue + } + if (c == "\\") { + escape = 1 + continue + } + if (c == "\"") { + print token + token = "" + in_string = 0 + } + continue + } + + if (c ~ /[[:space:]]/) { + continue + } + if (c == "\"") { + in_string = 1 + escape = 0 + token = "\"" + continue + } + if (c ~ /[\{\}\[\]:,]/) { + print c + continue + } + + token = c + while (i + 1 <= length(line)) { + next_c = substr(line, i + 1, 1) + if (next_c ~ /[[:space:]\{\}\[\]:,]/) { + break + } + i++ + token = token next_c + } + print token + token = "" + } + } + END { + if (in_string) { + exit 1 + } + } + ' "$json_file" +} + +declare -a TOKENS=() +TOKEN_INDEX=0 +CURRENT_TOKEN='' + +current_token() { + if [ "$TOKEN_INDEX" -ge "${#TOKENS[@]}" ]; then + CURRENT_TOKEN='' + return + fi + CURRENT_TOKEN="${TOKENS[$TOKEN_INDEX]}" +} + +consume_token() { + current_token + TOKEN_INDEX=$((TOKEN_INDEX + 1)) +} + +expect_token() { + local expected="$1" + consume_token + if [ "$CURRENT_TOKEN" != "$expected" ]; then + printf 'JSON parse error: expected %s but found %s\n' "$expected" "$CURRENT_TOKEN" >&2 + exit 1 + fi +} + +skip_json_value() { + current_token + + case "$CURRENT_TOKEN" in + '{') + consume_token >/dev/null + current_token + while [ "$CURRENT_TOKEN" != '}' ]; do + consume_token >/dev/null + expect_token ':' + skip_json_value + current_token + if [ "$CURRENT_TOKEN" = ',' ]; then + consume_token >/dev/null + fi + current_token + done + expect_token '}' + ;; + '[') + consume_token >/dev/null + current_token + while [ "$CURRENT_TOKEN" != ']' ]; do + skip_json_value + current_token + if [ "$CURRENT_TOKEN" = ',' ]; then + consume_token >/dev/null + fi + current_token + done + expect_token ']' + ;; + *) + consume_token >/dev/null + ;; + esac +} + +store_entry_scalar() { + local entry="$1" + local key="$2" + local raw="$3" + local value="$raw" + + if [[ "$raw" == '"'*'"' ]]; then + value="$(json_unquote "$raw")" + fi + + ENTRY_FIELDS["$entry:$key"]="$value" +} + +parse_entry_object() { + local entry="$1" + local prefix="$2" + local field raw + + expect_token '{' + current_token + while [ "$CURRENT_TOKEN" != '}' ]; do + consume_token + field="$(json_unquote "$CURRENT_TOKEN")" + expect_token ':' + + current_token + case "$CURRENT_TOKEN" in + '{') + parse_entry_object "$entry" "${prefix}${field}." + ;; + '[') + skip_json_value + ;; + *) + consume_token + raw="$CURRENT_TOKEN" + store_entry_scalar "$entry" "${prefix}${field}" "$raw" + ;; + esac + + current_token + if [ "$CURRENT_TOKEN" = ',' ]; then + consume_token >/dev/null + fi + current_token + done + expect_token '}' +} + +parse_entries_object() { + local entry_name + + expect_token '{' + current_token + while [ "$CURRENT_TOKEN" != '}' ]; do + consume_token + entry_name="$(json_unquote "$CURRENT_TOKEN")" + ENTRY_PRESENT["$entry_name"]=1 + expect_token ':' + parse_entry_object "$entry_name" "" + + current_token + if [ "$CURRENT_TOKEN" = ',' ]; then + consume_token >/dev/null + fi + current_token + done + expect_token '}' +} + +parse_manifest_json() { + local manifest_json="$1" + local key + + if ! mapfile -t TOKENS < <(json_tokenize "$manifest_json"); then + printf 'failed to tokenize manifest JSON: %s\n' "$manifest_json" >&2 + exit 1 + fi + + TOKEN_INDEX=0 + expect_token '{' + current_token + while [ "$CURRENT_TOKEN" != '}' ]; do + consume_token + key="$(json_unquote "$CURRENT_TOKEN")" + expect_token ':' + if [ "$key" = 'entries' ]; then + parse_entries_object + else + skip_json_value + fi + current_token + if [ "$CURRENT_TOKEN" = ',' ]; then + consume_token >/dev/null + fi + current_token + done + expect_token '}' +} + +entry_field() { + printf '%s' "${ENTRY_FIELDS["$1:$2"]-}" +} + +first_nonempty_field() { + local entry="$1" + shift + local field value + + for field in "$@"; do + value="$(entry_field "$entry" "$field")" + if [ -n "$value" ] && [ "$value" != 'null' ] && [ "$value" != 'false' ]; then + printf '%s' "$value" + return + fi + done + + printf '' +} + +resolve_config_path() { + local base_file="$1" + local include_rel="$2" + local base_dir resolved_dir + + base_dir="$(cd "$(dirname "$base_file")" && pwd)" + resolved_dir="$(cd "$base_dir/$(dirname "$include_rel")" 2>/dev/null && pwd)" || return 1 + printf '%s/%s' "$resolved_dir" "$(basename "$include_rel")" +} + +collect_config_closure() { + local config_file="$1" + local rel_path section line trimmed include_text include_rel matched_include package_name package_value resolved + + if [ ! -f "$config_file" ]; then + printf 'missing config file in repo: %s\n' "$config_file" >&2 + exit 1 + fi + + rel_path="${config_file#"$PROJECT_ROOT/config/"}" + if [ "${CONFIG_VISITED["$rel_path"]-}" = '1' ]; then + return + fi + + CONFIG_VISITED["$rel_path"]=1 + CONFIG_ORDER+=("$rel_path") + section='' + + while IFS= read -r line || [ -n "$line" ]; do + trimmed="$(trim "$line")" + + if [[ "$trimmed" =~ ^include[[:space:]]*=[[:space:]]*\[(.*)\][[:space:]]*$ ]]; then + include_text="${BASH_REMATCH[1]}" + while [[ "$include_text" =~ \"([^\"]+)\" ]]; do + matched_include="${BASH_REMATCH[0]}" + include_rel="${BASH_REMATCH[1]}" + resolved="$(resolve_config_path "$config_file" "$include_rel")" || { + printf 'cannot resolve include %s from %s\n' "$include_rel" "$config_file" >&2 + exit 1 + } + collect_config_closure "$resolved" + include_text=${include_text#*${matched_include}} + done + continue + fi + + trimmed="${trimmed%%#*}" + trimmed="$(trim "$trimmed")" + [ -z "$trimmed" ] && continue + + if [[ "$trimmed" =~ ^\[(.+)\]$ ]]; then + section="${BASH_REMATCH[1]}" + continue + fi + + if [ "$section" = 'packages' ] && [[ "$trimmed" =~ ^([A-Za-z0-9._+-]+)[[:space:]]*=[[:space:]]*(.+)$ ]]; then + package_name="${BASH_REMATCH[1]}" + package_value="$(trim "${BASH_REMATCH[2]}")" + if [[ "$package_value" =~ ^\"ignore\"$ ]]; then + CONFIG_PACKAGES["$package_name"]='ignore' + else + CONFIG_PACKAGES["$package_name"]='present' + fi + fi + done < "$config_file" +} + +resolve_recipe_key() { + local package_name="$1" + local recipe_file match rel_path recipe_key + local -a matches=() + + if [ -n "${RECIPE_CACHE["$package_name"]-}" ]; then + printf '%s' "${RECIPE_CACHE["$package_name"]}" + return + fi + + while IFS= read -r recipe_file; do + [ -n "$recipe_file" ] || continue + matches+=("$recipe_file") + done < <(find -L "$PROJECT_ROOT/recipes" -path "*/${package_name}/recipe.toml" -not -path '*/source/*' -print 2>/dev/null | sort) + + if [ "${#matches[@]}" -eq 1 ]; then + match="${matches[0]}" + rel_path="${match#"$PROJECT_ROOT/recipes/"}" + recipe_key="${rel_path%/recipe.toml}" + RECIPE_CACHE["$package_name"]="$recipe_key" + printf '%s' "$recipe_key" + return + fi + + if [ "${#matches[@]}" -eq 0 ]; then + RECIPE_CACHE["$package_name"]='' + printf '' + return + fi + + printf '__AMBIGUOUS__:' + printf '%s' "${matches[0]#"$PROJECT_ROOT/recipes/"}" + local index + for ((index = 1; index < ${#matches[@]}; index++)); do + printf ',%s' "${matches[$index]#"$PROJECT_ROOT/recipes/"}" + done +} + +verify_archive_file() { + local entry="$1" + local kind="$2" + local directory="$3" + local file_name hash_value archive_path computed_hash + + file_name="$(first_nonempty_field "$entry" "$kind" "$kind.path")" + [ -n "$file_name" ] || return 0 + + case "$kind" in + archive) hash_value="$(first_nonempty_field "$entry" 'archive.blake3' 'blake3')" ;; + snapshot) hash_value="$(first_nonempty_field "$entry" 'snapshot.blake3' 'blake3')" ;; + *) hash_value="$(first_nonempty_field "$entry" 'blake3')" ;; + esac + + archive_path="$directory/$file_name" + if [ ! -f "$archive_path" ]; then + printf ' - %s: missing %s file %s\n' "$entry" "$kind" "$archive_path" >&2 + return 1 + fi + if [ -z "$hash_value" ]; then + printf ' - %s: missing BLAKE3 for %s file %s\n' "$entry" "$kind" "$file_name" >&2 + return 1 + fi + + computed_hash="$(b3sum "$archive_path" | awk '{print $1}')" + if [ "$computed_hash" != "$hash_value" ]; then + printf ' - %s: checksum mismatch for %s (expected %s, got %s)\n' "$entry" "$file_name" "$hash_value" "$computed_hash" >&2 + return 1 + fi + + return 0 +} + +run_gate_closure_completeness() { + local package_name recipe_key + local closure_ok=0 + local -a closure_missing=() closure_ambiguous=() + + for package_name in "${CONFIG_PACKAGES_SORTED[@]}"; do + recipe_key="$(resolve_recipe_key "$package_name")" + if [ -z "$recipe_key" ]; then + closure_missing+=("$package_name (no recipe path under recipes/)") + continue + fi + if [[ "$recipe_key" == __AMBIGUOUS__:* ]]; then + closure_ambiguous+=("$package_name (${recipe_key#__AMBIGUOUS__:})") + continue + fi + + CLOSURE_RECIPE_KEYS["$package_name"]="$recipe_key" + if [ -n "${ENTRY_PRESENT["$recipe_key"]-}" ]; then + closure_ok=$((closure_ok + 1)) + else + closure_missing+=("$package_name ($recipe_key)") + fi + done + + if [ "${#closure_missing[@]}" -eq 0 ] && [ "${#closure_ambiguous[@]}" -eq 0 ]; then + pass_gate '1/7 closure completeness' "$closure_ok closure packages all have manifest entries" + return + fi + + if [ "${#closure_missing[@]}" -gt 0 ]; then + printf ' Missing closure entries:\n' >&2 + printf ' %s\n' "${closure_missing[@]}" >&2 + fi + if [ "${#closure_ambiguous[@]}" -gt 0 ]; then + printf ' Ambiguous recipe matches:\n' >&2 + printf ' %s\n' "${closure_ambiguous[@]}" >&2 + fi + fail_gate '1/7 closure completeness' 'one or more closure packages could not be matched to a manifest entry' +} + +run_gate_git_provenance() { + local package_name recipe_key entry_type entry_rev + local git_checked=0 + local -a blank_rev=() + + for package_name in "${CONFIG_PACKAGES_SORTED[@]}"; do + recipe_key="${CLOSURE_RECIPE_KEYS["$package_name"]-}" + [ -n "$recipe_key" ] || continue + [ -n "${ENTRY_PRESENT["$recipe_key"]-}" ] || continue + entry_type="$(first_nonempty_field "$recipe_key" 'type')" + if [ "$entry_type" = 'git' ]; then + git_checked=$((git_checked + 1)) + entry_rev="$(trim "$(first_nonempty_field "$recipe_key" 'rev')")" + if [ -z "$entry_rev" ]; then + blank_rev+=("$recipe_key") + fi + fi + done + + if [ "${#blank_rev[@]}" -eq 0 ]; then + pass_gate '2/7 git provenance' "$git_checked closure git entries have non-blank rev values" + return + fi + + printf ' Blank rev entries:\n' >&2 + printf ' %s\n' "${blank_rev[@]}" >&2 + fail_gate '2/7 git provenance' 'one or more closure git entries have a blank rev' +} + +run_gate_archive_coverage() { + local entry_name archive_name snapshot_name target_name meta_value + local total_entries=0 + local -a coverage_missing=() + + while IFS= read -r entry_name; do + [ -n "$entry_name" ] || continue + total_entries=$((total_entries + 1)) + archive_name="$(first_nonempty_field "$entry_name" 'archive' 'archive.path')" + snapshot_name="$(first_nonempty_field "$entry_name" 'snapshot' 'snapshot.path')" + target_name="$(first_nonempty_field "$entry_name" 'target' 'same_as.target')" + meta_value="$(first_nonempty_field "$entry_name" 'meta' 'meta.kind')" + if [ -z "$archive_name" ] && [ -z "$snapshot_name" ] && [ -z "$target_name" ] && [ -z "$meta_value" ]; then + coverage_missing+=("$entry_name") + fi + done < <(printf '%s\n' "${!ENTRY_PRESENT[@]}" | sort) + + if [ "${#coverage_missing[@]}" -eq 0 ]; then + pass_gate '3/7 archive coverage' "$total_entries manifest entries all have archive, snapshot, target, or meta resolution" + return + fi + + printf ' Entries without resolution path:\n' >&2 + printf ' %s\n' "${coverage_missing[@]}" >&2 + fail_gate '3/7 archive coverage' 'one or more manifest entries have no resolution path' +} + +run_gate_archive_integrity() { + local entry_name archive_name snapshot_name + local archive_checks=0 + local -a integrity_failures=() + + while IFS= read -r entry_name; do + [ -n "$entry_name" ] || continue + archive_name="$(first_nonempty_field "$entry_name" 'archive' 'archive.path')" + snapshot_name="$(first_nonempty_field "$entry_name" 'snapshot' 'snapshot.path')" + + if [ -n "$archive_name" ]; then + archive_checks=$((archive_checks + 1)) + if ! verify_archive_file "$entry_name" archive "$RELEASE_DIR/tarballs"; then + integrity_failures+=("$entry_name") + fi + fi + if [ -n "$snapshot_name" ]; then + archive_checks=$((archive_checks + 1)) + if ! verify_archive_file "$entry_name" snapshot "$RELEASE_DIR/snapshots"; then + integrity_failures+=("$entry_name") + fi + fi + done < <(printf '%s\n' "${!ENTRY_PRESENT[@]}" | sort) + + if [ "${#integrity_failures[@]}" -eq 0 ]; then + pass_gate '4/7 archive integrity' "$archive_checks archive or snapshot payloads exist and match their BLAKE3 hashes" + return + fi + + fail_gate '4/7 archive integrity' 'one or more archive or snapshot payloads are missing or have hash mismatches' +} + +run_gate_same_as_validation() { + local entry_name entry_type target_name next_target next_type seen cursor + local same_as_checked=0 + local -a same_as_missing=() same_as_cycles=() + + while IFS= read -r entry_name; do + [ -n "$entry_name" ] || continue + entry_type="$(first_nonempty_field "$entry_name" 'type')" + target_name="$(first_nonempty_field "$entry_name" 'target' 'same_as.target')" + + if [ "$entry_type" != 'same_as' ] && [ -z "$(entry_field "$entry_name" 'same_as.target')" ] && [ -z "$target_name" ]; then + continue + fi + + same_as_checked=$((same_as_checked + 1)) + if [ -z "$target_name" ]; then + same_as_missing+=("$entry_name (blank target)") + continue + fi + if [ -z "${ENTRY_PRESENT["$target_name"]-}" ]; then + same_as_missing+=("$entry_name -> $target_name") + continue + fi + + seen="|$entry_name|" + cursor="$target_name" + while :; do + next_target="$(first_nonempty_field "$cursor" 'target' 'same_as.target')" + next_type="$(first_nonempty_field "$cursor" 'type')" + if [ "$next_type" != 'same_as' ] && [ -z "$(entry_field "$cursor" 'same_as.target')" ]; then + break + fi + if [ -z "$next_target" ]; then + same_as_missing+=("$cursor (blank target)") + break + fi + if [[ "$seen" == *"|$next_target|"* ]]; then + same_as_cycles+=("$entry_name -> $next_target") + break + fi + if [ -z "${ENTRY_PRESENT["$next_target"]-}" ]; then + same_as_missing+=("$cursor -> $next_target") + break + fi + seen+="$cursor|" + cursor="$next_target" + done + done < <(printf '%s\n' "${!ENTRY_PRESENT[@]}" | sort) + + if [ "${#same_as_missing[@]}" -eq 0 ] && [ "${#same_as_cycles[@]}" -eq 0 ]; then + pass_gate '5/7 same_as validation' "$same_as_checked same_as links resolve cleanly without cycles" + return + fi + + if [ "${#same_as_missing[@]}" -gt 0 ]; then + printf ' Missing same_as targets:\n' >&2 + printf ' %s\n' "${same_as_missing[@]}" >&2 + fi + if [ "${#same_as_cycles[@]}" -gt 0 ]; then + printf ' same_as cycles:\n' >&2 + printf ' %s\n' "${same_as_cycles[@]}" >&2 + fi + fail_gate '5/7 same_as validation' 'same_as target resolution failed or contains a cycle' +} + +run_gate_config_closure() { + local config_rel + local -a missing_configs=() + + for config_rel in "${CONFIG_ORDER[@]}"; do + if [ -f "$RELEASE_CONFIG_DIR/$config_rel" ] || [ -f "$RELEASE_CONFIG_DIR/$(basename "$config_rel")" ]; then + continue + fi + missing_configs+=("$config_rel") + done + + if [ "${#missing_configs[@]}" -eq 0 ]; then + pass_gate '6/7 config closure' "${#CONFIG_ORDER[@]} reachable config files are present in configs/" + return + fi + + printf ' Missing archived configs:\n' >&2 + printf ' %s\n' "${missing_configs[@]}" >&2 + fail_gate '6/7 config closure' 'one or more reachable config files are missing from configs/' +} + +run_gate_dirty_tree() { + local package_name recipe_key entry_type source_dir + local git_dirty_checked=0 + local -a dirty_recipes=() + + for package_name in "${CONFIG_PACKAGES_SORTED[@]}"; do + recipe_key="${CLOSURE_RECIPE_KEYS["$package_name"]-}" + [ -n "$recipe_key" ] || continue + [ -n "${ENTRY_PRESENT["$recipe_key"]-}" ] || continue + entry_type="$(first_nonempty_field "$recipe_key" 'type')" + if [ "$entry_type" != 'git' ]; then + continue + fi + + git_dirty_checked=$((git_dirty_checked + 1)) + source_dir="$PROJECT_ROOT/recipes/$recipe_key/source" + if ! git -C "$source_dir" rev-parse --is-inside-work-tree >/dev/null 2>&1; then + dirty_recipes+=("$recipe_key (source is not a git worktree: $source_dir)") + continue + fi + if ! git -C "$source_dir" diff --quiet; then + dirty_recipes+=("$recipe_key") + fi + done + + if [ "${#dirty_recipes[@]}" -eq 0 ]; then + pass_gate '7/7 dirty-tree check' "$git_dirty_checked closure git source trees are clean" + return + fi + + printf ' Dirty git source trees:\n' >&2 + printf ' %s\n' "${dirty_recipes[@]}" >&2 + fail_gate '7/7 dirty-tree check' 'one or more closure git source trees have uncommitted changes' +} + +while [ $# -gt 0 ]; do + case "$1" in + --release=*) RELEASE="${1#*=}" ;; + --staging) USE_STAGING=1 ;; + -h|--help) usage; exit 0 ;; + *) printf 'Unknown argument: %s\n' "$1" >&2; usage >&2; exit 1 ;; + esac + shift +done + +if [ -z "$RELEASE" ]; then + printf 'ERROR: --release is required\n' >&2 + usage >&2 + exit 1 +fi + +if ! command -v b3sum >/dev/null 2>&1; then + printf 'ERROR: b3sum is required\n' >&2 + exit 1 +fi +if ! command -v git >/dev/null 2>&1; then + printf 'ERROR: git is required\n' >&2 + exit 1 +fi + +if [ "$USE_STAGING" -eq 1 ]; then + RELEASE_DIR="$PROJECT_ROOT/sources/.staging/redbear-$RELEASE" +else + RELEASE_DIR="$PROJECT_ROOT/sources/redbear-$RELEASE" +fi + +MANIFEST_JSON="$RELEASE_DIR/manifest.json" +RELEASE_CONFIG_DIR="$RELEASE_DIR/configs" +ROOT_CONFIG="$PROJECT_ROOT/config/redbear-full.toml" + +if [ ! -d "$RELEASE_DIR" ]; then + printf 'ERROR: release directory not found: %s\n' "$RELEASE_DIR" >&2 + exit 1 +fi + +collect_config_closure "$ROOT_CONFIG" + +CONFIG_PACKAGES_SORTED=() +while IFS= read -r package_name; do + [ -n "$package_name" ] || continue + if [ "${CONFIG_PACKAGES["$package_name"]}" = 'present' ]; then + CONFIG_PACKAGES_SORTED+=("$package_name") + fi +done < <(printf '%s\n' "${!CONFIG_PACKAGES[@]}" | sort) + +if [ ! -f "$MANIFEST_JSON" ]; then + fail_gate '1/7 closure completeness' 'manifest.json is missing, so manifest-backed checks cannot run' + fail_gate '2/7 git provenance' 'manifest.json is missing, so git provenance cannot be verified' + fail_gate '3/7 archive coverage' 'manifest.json is missing, so resolution paths cannot be verified' + fail_gate '4/7 archive integrity' 'manifest.json is missing, so archive hashes cannot be verified' + fail_gate '5/7 same_as validation' 'manifest.json is missing, so same_as targets cannot be verified' + run_gate_config_closure + fail_gate '7/7 dirty-tree check' 'manifest.json is missing, so closure git source trees cannot be verified' +else + parse_manifest_json "$MANIFEST_JSON" + run_gate_closure_completeness + run_gate_git_provenance + run_gate_archive_coverage + run_gate_archive_integrity + run_gate_same_as_validation + run_gate_config_closure + run_gate_dirty_tree +fi + +printf '\n' +if [ "$FAIL_COUNT" -eq 0 ]; then + printf 'Release completeness PASSED for %s\n' "$RELEASE_DIR" + exit 0 +fi + +printf 'Release completeness FAILED for %s (%d gate(s) failed)\n' "$RELEASE_DIR" "$FAIL_COUNT" >&2 +exit 1 diff --git a/local/scripts/verify-sources-archived.sh b/local/scripts/verify-sources-archived.sh new file mode 100755 index 00000000..7527ab49 --- /dev/null +++ b/local/scripts/verify-sources-archived.sh @@ -0,0 +1,140 @@ +#!/usr/bin/env bash +# verify-sources-archived.sh — Verify release archive integrity. +# +# Usage: +# ./local/scripts/verify-sources-archived.sh --release=0.1.0 +# +# Checks that BLAKE3SUMS file exists and all archives match. +# If archives are in sources// format, verifies those too. +# Returns non-zero if any archive is missing or corrupted. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +RELEASE="" + +usage() { + cat < + +Verify release archive integrity. + +Options: + --release= Release version (e.g., 0.1.0) + -h, --help Show this help +EOF +} + +while [ $# -gt 0 ]; do + case "$1" in + --release=*) RELEASE="${1#*=}" ;; + -h|--help) usage; exit 0 ;; + *) echo "Unknown: $1"; usage >&2; exit 1 ;; + esac + shift +done + +if [ -z "$RELEASE" ]; then + echo "ERROR: --release is required" >&2 + exit 1 +fi + +ARCHIVE_DIR="$PROJECT_ROOT/sources/redbear-$RELEASE" +MANIFEST="$ARCHIVE_DIR/manifest.txt" + +GREEN='\033[1;32m' +RED='\033[1;31m' +NC='\033[0m' + +pass() { echo -e "${GREEN}PASS${NC}: $*"; } +fail() { echo -e "${RED}FAIL${NC}: $*"; } + +errors=0 + +# 1. Verify .complete sentry exists (release is sealed) +if [ -f "$ARCHIVE_DIR/.complete" ]; then + pass ".complete sentry: $(cat "$ARCHIVE_DIR/.complete")" +else + fail ".complete sentry NOT FOUND — release may be incomplete or corrupted" + errors=$((errors + 1)) +fi + +# 2. Verify configs +if [ -d "$ARCHIVE_DIR/configs" ]; then + config_count=$(ls "$ARCHIVE_DIR/configs"/*.toml 2>/dev/null | wc -l) + pass "configs: $config_count files" +else + fail "configs directory not found" + errors=$((errors + 1)) +fi + +# 3. Verify patches +if [ -d "$ARCHIVE_DIR/patches" ]; then + patch_count=$(ls "$ARCHIVE_DIR/patches"/*.patch 2>/dev/null | wc -l) + pass "patches: $patch_count files" +fi + +SOURCES_TARGET="$PROJECT_ROOT/sources/x86_64-unknown-redox" + +# 4. Check for BLAKE3SUMS +if [ -f "$ARCHIVE_DIR/BLAKE3SUMS" ]; then + pass "BLAKE3SUMS present ($(wc -l < "$ARCHIVE_DIR/BLAKE3SUMS") entries)" + # Verify checksums against actual archive files + verified=0 + failed_checksums=0 + while read -r hash filename; do + [ -z "$hash" ] && continue + archive_path="$ARCHIVE_DIR/tarballs/$filename" + if [ ! -f "$archive_path" ]; then + archive_path="$ARCHIVE_DIR/snapshots/$filename" + fi + if [ ! -f "$archive_path" ]; then + fail "archive missing: $filename" + errors=$((errors + 1)) + continue + fi + if command -v b3sum >/dev/null 2>&1; then + computed=$(b3sum "$archive_path" | awk '{print $1}') + else + fail "b3sum not available — cannot verify BLAKE3SUMS" + errors=$((errors + 1)) + break + fi + if [ "$computed" != "$hash" ]; then + fail "checksum mismatch: $filename (expected $hash, got $computed)" + failed_checksums=$((failed_checksums + 1)) + errors=$((errors + 1)) + else + verified=$((verified + 1)) + fi + done < "$ARCHIVE_DIR/BLAKE3SUMS" + if [ "$verified" -gt 0 ]; then + pass "checksums verified: $verified archives" + fi + if [ "$failed_checksums" -gt 0 ]; then + fail "$failed_checksums checksum mismatches" + fi +else + fail "BLAKE3SUMS not found in $ARCHIVE_DIR" + errors=$((errors + 1)) +fi + +# 5. Count archives in sources// +SOURCES_TARGET="$PROJECT_ROOT/sources/x86_64-unknown-redox" +if [ -d "$ARCHIVE_DIR/tarballs" ]; then + archive_count=$(ls "$ARCHIVE_DIR/tarballs"/*.tar.gz 2>/dev/null | wc -l) + pass "source archives: $archive_count files in $ARCHIVE_DIR/tarballs/" +fi + +echo "" +if [ "$errors" -eq 0 ]; then + echo -e "${GREEN}=========================================${NC}" + echo -e "${GREEN} Release $RELEASE: VERIFIED${NC}" + echo -e "${GREEN}=========================================${NC}" +else + echo -e "${RED}=========================================${NC}" + echo -e "${RED} Release $RELEASE: $errors error(s)${NC}" + echo -e "${RED}=========================================${NC}" + exit 1 +fi diff --git a/mk/config.mk b/mk/config.mk index a31e2890..f6657906 100644 --- a/mk/config.mk +++ b/mk/config.mk @@ -101,6 +101,11 @@ endif ifeq ($(REPO_OFFLINE),1) export COOKBOOK_OFFLINE=true endif +ifneq ($(REDBEAR_RELEASE),) + export REDBEAR_RELEASE + REPO_OFFLINE=1 + export COOKBOOK_OFFLINE=true +endif ifeq ($(REPO_DEBUG),1) export COOKBOOK_NOSTRIP=true export COOKBOOK_DEBUG=true diff --git a/mk/repo.mk b/mk/repo.mk index 3e0d810b..ea6d55a1 100644 --- a/mk/repo.mk +++ b/mk/repo.mk @@ -39,6 +39,9 @@ endif # Fetch all recipes source or binary from filesystem config fetch: $(FSTOOLS_TAG) FORCE +ifneq ($(REDBEAR_RELEASE),) + $(error fetch is disabled in release mode (REDBEAR_RELEASE=$(REDBEAR_RELEASE)). Sources are immutable.) +endif ifeq ($(PODMAN_BUILD),1) $(PODMAN_RUN) make $@ else diff --git a/sources/redbear-0.1.0/BLAKE3SUMS b/sources/redbear-0.1.0/BLAKE3SUMS new file mode 100644 index 00000000..9d658362 --- /dev/null +++ b/sources/redbear-0.1.0/BLAKE3SUMS @@ -0,0 +1,216 @@ +a31ebd5556523f41a65224633177608303909d2ee332a67d2dfa39868915606a archives-zstd.tar.gz +ea059a7222b0b79f1d4224fe844940e2bd5362247df94854f57bcf64b7fd3eb5 archives-zstd-v1.5.7-patched.tar.gz +3ee6d3d961b7671a3a7978222fd2d124934a2047797bbb3341438b2251d50ef0 branding-redbear-release-vunknown-patched.tar.gz +61983d26e05966da0a5e0abb0d68f719521aa416fd3aeadd8b93a8304fc84a60 core-base-initfs.tar.gz +3674a72d65977c03422127fee2b961ff1997f0854ad44f1651410840d37152ab core-base-initfs-v463f76b9-patched.tar.gz +61983d26e05966da0a5e0abb0d68f719521aa416fd3aeadd8b93a8304fc84a60 core-base.tar.gz +1415356f8557f1f3b5568eff10fcb99f87c754fcad254cde72caf003282270e7 core-base-v463f76b9-patched.tar.gz +aca55a89de555eb722bd002ab3f707e964001b9e2b62ca32168feda3ab94672f core-base-v463f76b-patched.tar.gz +76695e0ee48f379ff4301769e1587a5239c60b7796760c1d24b3925198a31dea core-bootloader.tar.gz +5af476ccd1bc93c89d0c650a57a17fd98b4bb2a4d2ad4af8b50b3f24523ed87b core-bootloader-vb22a35c-patched.tar.gz +e5652eaf14965efeaebd0aba5d6ddd205ba3308567693ddc93592ab9c32e4620 core-coreutils.tar.gz +ce33a62697205ee82ca1d7c4ce07fe5031307f8034d20851f4ee2ea49887e4ce core-coreutils-v5559e20-patched.tar.gz +19be72b52f4595797ecc72337dfad208a21e0765feff8768e653748de6033ae4 core-ext4d.tar.gz +f2906b6101db9842c6618978d97026ae6dbc9791c7410a98db84500787ded5b2 core-ext4d-vunknown-patched.tar.gz +9bc43eb5601d4f7d7a4bfed5b2743f2c48b9fb60ad2b1ebf41aec32ad7c0d8b1 core-extrautils.tar.gz +4346bdabe83b31fe4adf7cbddfa6674ec7b31f20bc25c7d8c6c5aebdd8e5b571 core-extrautils-vfb66941-patched.tar.gz +ec501aa5fd302e853befd2119a44f4040d3dda09c0e704949c39d507d52354e9 core-fatd.tar.gz +cf45041abc4d198467091cdc16d5d8375b2545521dc986130f01abd46f8e0c6d core-fatd-vunknown-patched.tar.gz +cc4e08580e1d2ea4f4d98fd17f17c03894984ca166b6cb6ab6c38946c2301a07 core-findutils.tar.gz +fedfb7851f7bfcbf75e4980b04ed59d4ac524f9d9a3b73786b6e5469c876962b core-findutils-v116c044-patched.tar.gz +c59f3bec37b179df5e895de1fa726a0b27c7979f6c9d023e1167db1443bad9b7 core-grub-v2.12-patched.tar.gz +844ddbc8c107f2d7626f14303092aa40468c439e23c1a2aeaca77e520cbd51cb core-installer.tar.gz +a791231835630e84da0f4fd45f4d39d7e1ae92a877baa3756ca4d4bddca98273 core-installer-v948bfdc-patched.tar.gz +54c14322d0aa2dba5077fcfbb35b83f3b818411e2b65f38857fc4c8cf040b8b3 core-ion.tar.gz +dfc55adc306c136ddc4d754256e2d2f1474c4c44e5e40da1ae03faa32f4684fb core-ion-v1440704f-patched.tar.gz +b65b124363fda03508fa2aebb0b0bc1351a3cea0af6cb53c123487c6e5688e36 core-kernel-v866dfad-patched.tar.gz +c5f66c8e0c47bde679b2d2e3f6d4f4d9b7072925dd3923664f9b057743bd54e4 core-netdb.tar.gz +cf4dc0636fe527815efe23a7c4004728d43d56ca07fab785170dca39b2ca700d core-netdb-v2c15606-patched.tar.gz +e036b781f0e5570d2c9ed99a0bf38468fd4322409bc6f2bbc8e1108debd915cc core-netutils.tar.gz +0724f5e41c8c5cdeaae8e1c23eef2832f05ae2b94e7ef96bfff8d2178bcc174c core-netutils-v40a573b-patched.tar.gz +5a7f70553dd588fa65332e12cc21ce59de3dc05dbac73064dad70d29e0e26511 core-pkgutils.tar.gz +787557d5430033d0eed827e029a23f56439c4c76bc12f639c32245fccaec3f9b core-pkgutils-v70c5067-patched.tar.gz +889dd144e5d6c8a90da00d83545686ae3ac0a1258f5e092bcd1c3e0cd26c4371 core-redoxfs.tar.gz +a9886db5a6b2ff5ce2fd1ad3167b1a6c1fdf509c819ccaf9d59e439370621ece core-redoxfs-vb596776-patched.tar.gz +67d6cfb8eb3f7164212858cd27f49e950e1b7fc00ac03bd7a6af738a735af0c2 core-relibc-v861bbb0-patched.tar.gz +7203243b8bf1eb0d2f24dce55e31e16229a817ef14c372a1b9b88653d3896d91 core-userutils.tar.gz +14e7288f3cc7ae01c60c2a3ceef634dcc1c7dda87f5d88fec1624bb122138cef core-userutils-v0c5274f-patched.tar.gz +87f09948dde5800459bd92b242391dcfc0684d91a6590318a06be354d8e7c3c2 core-uutils.tar.gz +501572a66e107bc4ae07d0336d09d59da475bcc36c688487fac2d06c384ffeb7 core-uutils-v1f7c81f-patched.tar.gz +3bb4b54cc021f5b4633d8d36d16c808911d7de2590172b3768fe9d0c7eecbd39 dev-cmake-v4.0.3-patched.tar.gz +ee26032e11572ddfc35010b6bc2fc26a069ceda6ec01293be103fa7006d7c1e7 dev-gcc13-v8e365ce7e5-patched.tar.gz +59ed3971c39dee06327da06a92545e4f70eddbbdc4ebf73fc7e075e1ee046dd4 dev-git.tar.gz +230a1a25c1b3d3782ac61eedd900052b075b055a944ce4cdec356ffb779410d5 dev-git-v2.13.1-patched.tar.gz +0cb8eaec77c01c7668f8ac13656a252cdee92f97ff24f9cfd9cb39125e8e2b5a dev-llvm21.tar.gz +a508f9987095a8a45428763a6b7d74ad8852a89d46c4696fec7bb671ba02d846 dev-llvm21-v250d0b022e-patched.tar.gz +e046d500a60a3266dd04b63b92b85cbba3ddfc76c1d163e7f63e015e4162badc dev-pciids.tar.gz +7c65ff91a06b0c30759ca657f6652b4c4a0e6cc4da7ff939715359eb1af49549 dev-pciids-vfae84a8-patched.tar.gz +1af52b8b8bcd2f04626db16a491f5285ac8b9a2354c6e7c28ad57cf46fb82a26 download-curl-vf50c28394-patched.tar.gz +02b7f74d6149e3eea2e1e069f61595ae0943328cd5906f1dd84b407f26ab41d1 drivers-linux-kpi.tar.gz +c1dad118b2fa2d477b4a6540225ca4278a869667aeca8fb05cbf1887987709b0 drivers-linux-kpi-vunknown-patched.tar.gz +9a00ac5a5576b2699d5b0613010853614aafdc9fc2e6ce6e0ba72c4164624c7f drivers-redbear-btusb.tar.gz +ae5ea46a07572ca35f2df89650fd15f6637e7a5893ea20300ebae967b4b9b275 drivers-redbear-btusb-vunknown-patched.tar.gz +b1fc69a7fdf3b0556165207531429adc47eff7aac9cc80d2ec87777d1ab71104 drivers-redbear-iwlwifi.tar.gz +0bfbc2d093b829d1f2ba2e1eefa55a88198525d08daa91bcdf475c306bbbc6bd drivers-redbear-iwlwifi-vunknown-patched.tar.gz +95899472009b85ba2b29fdc77e7ebdad926f81c86d071ca1c4f56b3e4617501c drivers-redox-driver-sys.tar.gz +bddd6f7c4f48ecfcc0f91644da1a4f3e796225fcb2595b6fee44006c0ff4afa9 drivers-redox-driver-sys-vunknown-patched.tar.gz +7c51a7d0bbe6871c0199bde1527d455662b6c8430c061470a4962084b373ad26 files-mc-v4.8.33-patched.tar.gz +1372f921f5d095953d20e253662881cc2ba51ee8eda9c7ebcc24388ffc30c527 fonts-dejavu-v2.37-patched.tar.gz +031881eb827deb0c545bef5d8ebc7f1c4c2989dc0ab67972ac33d288821acdaa fonts-freefont-vunknown-patched.tar.gz +5d31b6dbcf47f2c8626c9d6efbbcfd3246a2589e36f92c54dbd62cf5c83b148d fuse-e2fsprogs-v1.47.1-patched.tar.gz +97313c39dac45eca01dd46c96813473e426a99fe91d5971ad56b9bab53106063 gpu-amdgpu-vunknown-patched.tar.gz +dd64e143ca81e3d8a8935f9a8421c66f2dfe4e774ef0d57a8799f670dc20cf55 gpu-redox-drm.tar.gz +aaec2ebff059ed9a23a11d9b333c130b5bbfaaeb68b14d270a6279120de86c2a gpu-redox-drm-vunknown-patched.tar.gz +d4f6e46eb431869b728a0d9ae8e9a9690bb635755a5b0ff6a7c34bdc08a54429 icons-hicolor-icon-theme.tar.gz +adcc4573c1ef28ec48b497d59b7d92865c7086a1269c6082038356e3fb8cf10a icons-hicolor-icon-theme-v8d22bbf-patched.tar.gz +6a7c47b0f20e3d391aead41bc5a097aaf4bb95c3e74e2f32be3f8439bc71a5a7 icons-pop-icon-theme.tar.gz +1d1b966a61a02458662d8ae7ad40fc662d80e3ac2c91f8e01d4f7b2d31e2022d icons-pop-icon-theme-v1a575a8-patched.tar.gz +894cccef17471956656757a8e6eec4b6b85d1b66568f08b61b95cbb3b375b92e kde-kdecoration-v6.3.4-patched.tar.gz +ee62bf2441e9763e4cde26c0dbf980425e595663fc412da56c7f338d89020cf1 kde-kf6-attica-v6.10.0-patched.tar.gz +1b4667ac737250698eb5406d0b06476b048821e0f9aced7fa2a7f6f3ab69fb6f kde-kf6-extra-cmake-modules-v6.10.0-patched.tar.gz +9e109258bec7005d63ef4f547957105e989848f7d66d559af24645460e096aff kde-kf6-karchive-v6.10.0-patched.tar.gz +00e72e346159e3ca9a236b6fa13b840f347e2997e11d83f762ca4c7fa789745c kde-kf6-kauth-v6.10.0-patched.tar.gz +37be8a15b00a623d546e14ae4f34d8c8a6a8f4941edc81ce7d04fa06b601b78d kde-kf6-kbookmarks-v6.10.0-patched.tar.gz +682291f3bdc84c9c78b8f20546d681773766381934f3db58e353bf7320801402 kde-kf6-kcmutils-v6.10.0-patched.tar.gz +a20c7cc62c5e80206625feecf7719a90884a5cb5a97b00e08751f5588362643b kde-kf6-kcodecs-v6.10.0-patched.tar.gz +ea9bf7bfcabf062b16be1b7d29571762591cb785bdc6b247e84497c9cc46bd13 kde-kf6-kcolorscheme-v6.10.0-patched.tar.gz +ae69e3d4f04760acc209deef12d27acb729fb11c1e3a070d3ddd036f073c8878 kde-kf6-kcompletion-v6.10.0-patched.tar.gz +8c2a7e62c5170ceca56fff639aeaa404aa73f4916c64cd8a54081fcc1318ce8b kde-kf6-kconfig-v6.10.0-patched.tar.gz +e726a54383744a6e90f4de558a8b7962c1f6d658a8848df18e49da0748342b81 kde-kf6-kconfigwidgets-v6.10.0-patched.tar.gz +f31f1a855b7c8952f3afc067f6fbc968e8d6ddb8bf8997830d72d937aa738603 kde-kf6-kcoreaddons-v6.10.0-patched.tar.gz +9fec6ce5988b055c530b1f5a2bcea096dcacd85c56781c51086a93213965a0e0 kde-kf6-kcrash-v6.10.0-patched.tar.gz +6ca5d940920a86c79ac96d09d42bf1245bd99301fb50deca65f6dd509f312f01 kde-kf6-kdbusaddons-v6.10.0-patched.tar.gz +217fa5a3088fe2974f0b69db48adde58fe85234a2275510ce09aea028dc59360 kde-kf6-kdeclarative-v6.10.0-patched.tar.gz +8aa9c0df4e7383af69da6a0adc670728365270074c9b838f46760a31f6ab1ac9 kde-kf6-kded6-v6.10.0-patched.tar.gz +21c2f0e2282e499e0915023b19c9d7be74bd80c2450de2157667c17832107200 kde-kf6-kglobalaccel-v6.10.0-patched.tar.gz +955c1a31361812c65cadcf903d631335413073ac1bc52317c996cf808e5b992f kde-kf6-kguiaddons-v6.10.0-patched.tar.gz +b123dcdd8aa0a4cf66aaef6dc12d3ac1f4e77d5139f1d07fe76b89ae0400eb3a kde-kf6-ki18n-v6.10.0-patched.tar.gz +d81ec6333c08ebd850843888942aaea884bbe6cff682b974004195781762db03 kde-kf6-kiconthemes-v6.10.0-patched.tar.gz +3aff2490c3d354f052ad1d536ac1e05d0c78a710946c652f568ea2287d91651c kde-kf6-kidletime-v6.10.0-patched.tar.gz +ee1281eaaafc1e3966387292a9cfb36930c5c610a98cee8dddec0a7a1674b218 kde-kf6-kio-v6.10.0-patched.tar.gz +05a9d9f1b3469cd369a5f5d9c336232338529c4161298cb64dbc5fab253ad4e5 kde-kf6-kitemmodels-v6.10.0-patched.tar.gz +ab0623ff7695e1f99367f592f59d81d856164d3f4727a0473ab761a36c4e2737 kde-kf6-kitemviews-v6.10.0-patched.tar.gz +4cf81e73c14f24ee4adc8c909d4475e9f43e7f1d7e6b0d91ab23ce2c55514248 kde-kf6-kjobwidgets-v6.10.0-patched.tar.gz +94d02004033a2f718abefb164d7126b48cd7795ff523318372568f7608d706fe kde-kf6-knewstuff-v6.10.0-patched.tar.gz +d4783658caa5141b25aced8b5211c343bffb50a9425d50c8abc5193365976892 kde-kf6-knotifications-v6.10.0-patched.tar.gz +3d84d8a9a3248ffc0d28d014912db2570ac582c0792a09585c1b1dc4319a1978 kde-kf6-kpackage-v6.10.0-patched.tar.gz +7284d9158a17718239687ec032b21bee57cbd0ac6d446b9e5d35181bf2f28e1a kde-kf6-kservice-v6.10.0-patched.tar.gz +06da94541e03d74c187788ff79e7669b4e2f743fb320b677891bef77c852f69a kde-kf6-ktextwidgets-v6.10.0-patched.tar.gz +d837dd42e5c873860b8e8d1c6ca8a5147bc86bf4fc420cd2ea60e8e2c43209f1 kde-kf6-kwallet-v6.10.0-patched.tar.gz +b2154c5c74f140816cbdafb19111b787ede155da3c78b6fbf52c7bc45cab1409 kde-kf6-kwayland-v6.3.4-patched.tar.gz +b36ec1f1cd4959b2d8e4543a595a6a9de526143b5147ded18c78e3300ec3bd8a kde-kf6-kwidgetsaddons-v6.10.0-patched.tar.gz +23e2bdab499dc5898df859dc0e3690b27f88b1e22db829798e8b6d60252267e2 kde-kf6-kwindowsystem-v6.10.0-patched.tar.gz +7d73a7dd6a72c7f03644a4afa5ee2c57a3f4d04aad1725977ebb16325f39c47c kde-kf6-kxmlgui-v6.10.0-patched.tar.gz +bd6ea6cc02c6e4020e80f307cd5d6adabd70b05d228bdb4f584a5ddc7284ffce kde-kf6-solid-v6.10.0-patched.tar.gz +a577a8e3b70252156a8c622c71417e726606de37ed8a9c043d2e2359fc3b60b5 kde-kf6-sonnet-v6.10.0-patched.tar.gz +95419c92002d893914fd31ffb48f71a0f34f07dc7bdce7427f0a0fd8493601ed kde-kglobalacceld-v6.0.0-patched.tar.gz +4443841da66842d557cbdbbf7971d56554548ae98a44f023507542176e683117 kde-kirigami-v6.10.0-patched.tar.gz +be98158da9f17efcc18c56f9cd132e68b300fcbb594ebe860a8b5357f18c8bd0 kde-kwin-v6.3.4-patched.tar.gz +c1010706dfb8e00d349b24a2b3a525aedcc1d2e41227ee1673dd071cef8359ef kde-plasma-wayland-protocols-v1.16.0-patched.tar.gz +1c8b0f5038240f01f1aac9f5cdf5f963ad355bca87118239aafa20b2a1b51fd2 libs-expat-v2.5.0-patched.tar.gz +4953f6e273501761d0e6c2460b09b50922ebd53c2ad499c6aabce71c21708af9 libs-glib-v2.87-patched.tar.gz +0e73529eac6877af802a680ea66291a7222327bdb313b7b44364cb2297d231c5 libs-lcms2-stub-vunknown-patched.tar.gz +9b2f35c1fafb93de041105dd421b687fe05e7e6d6a8d1a5a5727c4992d6d39b5 libs-libdisplay-info-stub-vunknown-patched.tar.gz +a0a37aaa219a1ab096ad294df04029a16b7bd2313b8002ebbdaf959fd6583225 libs-libepoxy-stub-vunknown-patched.tar.gz +31828be1d165a7902487333e997cdab5d1b9429a12b5171d447afb92d424cbf7 libs-libevdev-vunknown-patched.tar.gz +288d95219a2c0e4bd56fcdaea926ce9a04582bb80272dda4199a1c907c0f649b libs-libffi-v3.4.5-patched.tar.gz +da3aca123e4668eabed1423698db6d4f39b27992743f72b89015ed962489b209 libs-libgcc-vunknown-patched.tar.gz +d142df1affbc425a9e7542b4f8c9248f1149811c3b9da740ff3c33b3b3759b50 libs-libgmp-v6.3.0-patched.tar.gz +bf9804f179eaf2548091931b91ae1fc5cd5a01b330dc85bc6bf097a9d2fbc0e9 libs-libiconv-v1.17-patched.tar.gz +d9a76013904ac6497d4bef0305ac17efa1bdd1a3adebc1bfa437b6e3948ed358 libs-libinput-vunknown-patched.tar.gz +09f63d7254e75360f0bae505336c750778313ed05ad69c50f6b6da208d197153 libs-liborbital.tar.gz +f6714b4200ca33c9660009ec6487fbadf7afc56aa42537641ad75323d5a55af4 libs-liborbital-v9958363-patched.tar.gz +cbfd9505dce77c42ce76d97d6d0a37abc50396ca95e8c214cbd26acbbc924c48 libs-libqrencode-vunknown-patched.tar.gz +fd80625619d81631862572e466430d143e057f880f0299f20851e4e39f53265b libs-libstdcxx-v3-v8e365ce7e5-patched.tar.gz +5d0312ddf758cc34ef7ee4428e1f7f6e3d9b7d8a64ec29cca888483a2daaa58b libs-libstdcxx-vunknown-patched.tar.gz +f500f0bc22c3e586035f0a8d197f5f0b60dfcd3284f10f878470491eb6d54963 libs-libudev-stub-vunknown-patched.tar.gz +f1e5de47ba6353b671a43938ccd430ce5f7d75063ee33e6eb2bf60bf8d302ff9 libs-libxcvt-stub-vunknown-patched.tar.gz +efab18ac232c641efd116915cce08afe279e5c768f9447fedb44af349fdc809b libs-libxml2-v2.11-patched.tar.gz +ad11eb07412afcd561fb8d1a5a33cc71cc98be4af498ebb3deedfca89ad99066 libs-linux-input-headers-v1.13.2-patched.tar.gz +10733f31293e2b4edf2a48296dde7ec174b8273d86fc80294d38523554fa7132 libs-linux-input-headers-vunknown-patched.tar.gz +6be00852e063056feb93adc2befba6738de148df910a953b5f0cd4ea1f2a4c1f libs-mesa.tar.gz +7a6e727621e38ab1a376458a4693a0dabd02240f8c09eff8e94d7fb097866d30 libs-mesa-v0ecd6b66c-patched.tar.gz +17b8f98b55298356745ba2533c374671ed5971786db64919641a1c2b9e698669 libs-mesa-va86b5f3ac-patched.tar.gz +646c4e0b4100edd2dbcb43a7bea6f71e83bd15798be4a35af15a53d209a2149e libs-ncurses-v6.6-patched.tar.gz +7b7d6bd13a85825c2b2154f2a1ce5833e97cbfdda75186e36574f89c3189039e libs-ncursesw-vunknown-patched.tar.gz +8dd4168855f5022f5982aa0d556cf659e4cf0e778441d0469674f1c856f9b6aa libs-nghttp2-v1.64.0-patched.tar.gz +27236afc509099c0ff9491a81d2afa982483ffd5a7f4b373a9c6c8719965a59c libs-pcre2-v10.45-patched.tar.gz +d8fba8c19b69d3b0cb07473f0071ff077e8a0d411638b9a89858fe16e8e27217 libs-readline-v8.3-patched.tar.gz +981bd0d9bfd57f38af1fde45b3cef7300ff67b133083092e7b3f7330c50e4e2c libs-termcap-v1.3.1-patched.tar.gz +33e9d23d697c53245d54152ec9dbbe6d21543034003104d21dd8eb180936cf43 libs-zbus.tar.gz +9130fffd118e2f0ad61cf0b98b26c1dcf0caee5aac456b04cd536c15e1abe3a1 libs-zbus-vunknown-patched.tar.gz +93a7d8d5f25ff9f9a143d649161ebb857054bf133ca5a2246e66455dcd4f4c6e libs-zlib-v1.3-patched.tar.gz +02b7f74d6149e3eea2e1e069f61595ae0943328cd5906f1dd84b407f26ab41d1 local-recipes-drivers-linux-kpi.tar.gz +95899472009b85ba2b29fdc77e7ebdad926f81c86d071ca1c4f56b3e4617501c local-recipes-drivers-redox-driver-sys.tar.gz +2c40bc71a01e4a35786644052999c2a1a5207d2100592cbd5e2002418409ec12 local-recipes-gpu-redox-drm.tar.gz +413a49795400c91e2bb29eb6a7269567b4cd261fbc46bbaf466f91023d31108a monitors-bottom-v7cf3105d-patched.tar.gz +e0a529cda4be63780a00f842194bf3e8293855c3533cc7802bee400712a8f1cd monitors-htop-vc9444cd3-patched.tar.gz +841a848f29847679e9f2f1f70cbe7f2cd6b2a1ee3023872272dc77f24f1a11b4 other-ca-certificates-v8139d99-patched.tar.gz +3ae5efcd34b7460b83173168b779d1a6c2a04554623d468fdf62cccd27b61d4c other-libevdev-v1.13.2-patched.tar.gz +2fe33d35f4599d0ddf6972afeec393c4d761d9886e61f4af14a06225363b56b1 other-libinput-v1.30.2-patched.tar.gz +26db541bf0c6f29b03e38f4c419f1e5aa933280e6ba666de63b64613bfcc316f other-libxkbcommon-v1.7.0-patched.tar.gz +4295d94e0c4cf13d2563246928e4bd27a38dabef39cbb62cd9652ae3aac4f72a other-shared-mime-info-v2.4-patched.tar.gz +9c93841855c95f98eb1be3972f0b63dbb62eef004577c19fd87e783f849075f9 other-terminfo-vdc5712b-patched.tar.gz +33a00587669db9e3170ebc56d30cf9d24caa19b2b1a0d445c62eef535df900a8 qt-qt6-sensors-v6.11.0-patched.tar.gz +6e569d30e590028b100bb6a8a19e9845a4f4d2054e83171b7ccd938c43700197 qt-qtbase-v6.11-patched.tar.gz +1537c65840f5f739be7e111946981ce65cd22c256cdd1fa4b4db8919f661ea67 qt-qtdeclarative-v6.11-patched.tar.gz +8c76cd8bd68377ba046e7c1ba9355ca9205706dadec963a90ba004cad729b453 qt-qtshadertools-v6.11-patched.tar.gz +d92e719a14e7ab3f18ed8a6878c6ac6a5dd16e9b36afc470b18b0970204c4f9a qt-qtsvg-v6.11-patched.tar.gz +5d59ddd50ebd5a6addcbad446405561db9583c129c1d2c7e203dbb8ffd5e47be qt-qtwayland-v6.11-patched.tar.gz +369ad527a41701a8e99b5fe515c77b525812f5c8d2bce11d9961840ea7277b8d recipes-core-base.tar.gz +1eec412c012d34a2fc24ba3e414e9e043ef0321c71138f7b2a0e7a136c834ef1 recipes-core-kernel.tar.gz +93e05db4d8876d004be593fde5ce8e32da68f32b548aaa30a38db28c8357d57c recipes-core-relibc.tar.gz +c2449ef6c4bb04b1fb11998b89bcbf53176637bac2d60c5c9d35e667eca47550 redbear-full-patches.tar.gz +0836215d5f5a04b6fc846f8f98b4dd5dfd98c1f4ca7fbeca47b5cd01b2bc5c56 redbear-full-recipes.tar.gz +6ee867a2defcd6a013ff2622913ba6de4425728ba2ba6b8544828d89563f4765 services-dbus-v1.16.2-patched.tar.gz +2ce71338a83c0b17c257b3057d2c9988ff8bf958043d2a4d76d2fe60e383a815 services-seatd-v3f1eb28-patched.tar.gz +8d576fd91b66cea8842f878a382c141261b390210e0e011390968b670359b272 shells-bash-v5.2.15-patched.tar.gz +38628def4aad9fa4aef950da9e2de420973f9adf5d7cc606c5fa19ac799e8801 shells-zsh-v5.9-patched.tar.gz +85a683e768c3d36d9234b2a7ad1a8c4e4c124c75921c1dafddd82d3c42351014 system-cub-vunknown-patched.tar.gz +4916b35d619780fbf8a574c277a97c05d283b838743aa5eda8503b79452f1b06 system-evdevd-vunknown-patched.tar.gz +9fc9633a945c4d365b0e1eb34f8d18a5c417ba11979ff79ba3687eb28b9fd5c1 system-firmware-loader-vunknown-patched.tar.gz +d4cb8ed32b54271ffaf55019e27b8ecc065f0a2d40c93e40b64cab8e0dfc459b system-iommu-vunknown-patched.tar.gz +694a9dbe3a8e78469078ca77868eb4e197041c19e70d3fec774ccd248b5f565a system-redbear-authd-vunknown-patched.tar.gz +acaffb939037d7f47e59d529989fa1d07a730f8b110d0543359c890f43473e77 system-redbear-btctl-vunknown-patched.tar.gz +933e4150fe644908ed14215ce65adfa7c4109045a439cf6d4b6915918886bb42 system-redbear-dbus-services-vunknown-patched.tar.gz +2093c20a3c7469d8ccb745ddf89166f20b18897ebad93a51deb52e5b1754a565 system-redbear-firmware-vunknown-patched.tar.gz +d800b22a152be6644b49b6e94a62aaa3d79fcae7b123f38b307e06a93abf63ff system-redbear-greeter-vunknown-patched.tar.gz +d16af63308960d92ab53cc36ec42d8881f498e6df76f622887eb347a533fc4d4 system-redbear-hwutils-vunknown-patched.tar.gz +5e9a0d4515ae3723ffa3d168587622b17aadc3dce3de6cb3517d6bb2ead8bc58 system-redbear-info-vunknown-patched.tar.gz +6b373aa9efa9cc443546424ead7e059292b9fc18a7d30e61ee78bf45fce4eda8 system-redbear-login-protocol-vunknown-patched.tar.gz +899d7f873581329c105a34d6ed250c7d480a846c7ec1d2517bf40f701650b6bf system-redbear-meta-vunknown-patched.tar.gz +60d83c89ff4616dfb897aa1407cb4d50df9eea90fef0d0af4b0052208b87f7a8 system-redbear-mtr-vunknown-patched.tar.gz +431d8f00a1e69abaab7efd569d901f143aa1ab025447ccc1ec4a54f9fb01eb52 system-redbear-netctl-console-vunknown-patched.tar.gz +03be200f30feed1df7ffd4c398927227d3bedb5c98914a8b8182885f70955171 system-redbear-netctl-vunknown-patched.tar.gz +02e497ed346e3be6ba2fb0696bbcfb3677405a7a7b3041686d74b9b3ea74e60e system-redbear-netstat-vunknown-patched.tar.gz +4614f0094a0e3a4fbd9020bbf97af30614ae774c5bd5b6b768b73befe0ae52f4 system-redbear-nmap-vunknown-patched.tar.gz +e28d97371b53a94e38d85d27dcacd0c20702df07a0f43a672dcea91026086197 system-redbear-notifications-vunknown-patched.tar.gz +5a00ee80093d75b49fb261ee975337159de5678e805ce4f45e0172bbcd5154b2 system-redbear-passwd-vunknown-patched.tar.gz +f278f8b45413a7dc0794e4e20d0e85c4f9ab10c053c87aa2ab64168b8ce9dccf system-redbear-polkit-vunknown-patched.tar.gz +b73f27fa09f69b35119714156c0c1d51ed606a426ab1c075961433a13712c0ed system-redbear-quirks-vunknown-patched.tar.gz +3212767ffb06b426fb6183e20efe240bc6625d6bc4570fdc52d4c145f8aefc75 system-redbear-sessiond-vunknown-patched.tar.gz +6a56c56497e460a16011265f59ff79b3ec09f49268865aef194dfdb86baa4381 system-redbear-session-launch-vunknown-patched.tar.gz +a945b57d5db1573bb372a9c990ee603688371e3dcfbcc6f8658e4899e63b31ab system-redbear-statusnotifierwatcher-vunknown-patched.tar.gz +39a66dc9db654771752019e7880a537b64a197d50da4b3de218bfe8b97cc7679 system-redbear-traceroute-vunknown-patched.tar.gz +d1554c1ac0a2bd1d8517d077748a8f6df202a3df19a5f641afe1048fb710e718 system-redbear-udisks-vunknown-patched.tar.gz +78f7cba0fd6615e636f3de67cd5071116f7207d4295ba6053a3b03d3cef7c63b system-redbear-upower-vunknown-patched.tar.gz +3a5df1ea45ae41391999c2368a5ddb68111bd6d507fd475077df26ab1162be21 system-redbear-wifictl-vunknown-patched.tar.gz +7ebecf99b79e3c778ab754afc8cc37b19e7bafc7cc253f77ac300bb8f3c38453 system-udev-shim-vunknown-patched.tar.gz +368eac6b32215479386cbc890fa3ed3664c8b2f93057d46c440533e9fa916860 tests-redox-drm-prime-test-vunknown-patched.tar.gz +a6c6bdea2db16ea80746b0694fd0139f91b45d8884d3902eda3d12225a806366 tests-relibc-phase1-tests-vunknown-patched.tar.gz +59d31683475d60881c265dcd90940117e8989af494d114865cbe2045f731e446 tests-relibc-tests-bins-v861bbb0-patched.tar.gz +cd5cbb14c0fd791324b9a9092204e6506df3ef329384d4b673ae773e15d7bb3b tests-relibc-tests-v861bbb0-patched.tar.gz +abacbb13c8cd322b6354195073955413aaf47f33b0068f574a7be856632b9d5f text-kibi-v274b371-patched.tar.gz +872bbd46650c1d95bec31ae41583c93c10c57354cea98237ff7ac4ddd8e6ff4d tls-openssl3-v3.5.3-patched.tar.gz +ed67149137d05652604a7b413b1e8e01d35f43220a28b79ca6e2b3ea2d51caa4 tools-bzip2-v1.0.8-patched.tar.gz +e641a5081e426ebb6e040faa341317bb86fd517102517912edd34fbe300a5357 tools-diffutils-v3.6-patched.tar.gz +a44c1070830b55046eadff1c00b2cda203aae2b5455def1c31b271437bfeab61 tools-gettext-v0.22.5-patched.tar.gz +9142b86c6540364d521f4d6377f37d6fa3ee91f9b83287de321d1e35aa26a485 tools-patchelf-v0.18.0-patched.tar.gz +baae7c04d9d3d19ec03eeb352f0958c4fd3f1194911803a88485d92b484b6a28 tools-xz-v5.2.13-patched.tar.gz +cd580d47ad6392da89101229af43a875ce0d84378abf5a666dd65c83bdbb3d5f tui-mc-v4.8.33-patched.tar.gz +9ca92b1564dcdbcaf7a9acb7489f4f8e8ab998d6f714139e9072d24cce4d354f wayland-libwayland-v1.24.0-patched.tar.gz +e08c34a729eb71f93e194b801dcd219c958e9a3a999e3c90b18e901e748530ed wayland-qt6-wayland-smoke-vunknown-patched.tar.gz +3fe4ebda86ca28acaede4f1379fba0eceb18a65e3cbc6cd6a372e741cc317149 wayland-redbear-compositor-vunknown-patched.tar.gz +a517ec643eb7b373b7bf9748e353c688a7ea0f7ef070f66e92b9d8f879fbfa25 wayland-wayland-protocols-v1.38-patched.tar.gz +09ed779f68238b71d1b68d9d8c6248e2e6598cb28b0c20f0348b723f3b6d5577 x11-libdrm-v2.4.125-patched.tar.gz +c455fe44cde03931c96f8c183776e83b34f4e08a0e64fdd87aa3f74b8c846729 x11-mesa-x11-va86b5f3ac-patched.tar.gz diff --git a/sources/redbear-0.1.0/configs/.config b/sources/redbear-0.1.0/configs/.config new file mode 100644 index 00000000..aa9b67cc --- /dev/null +++ b/sources/redbear-0.1.0/configs/.config @@ -0,0 +1 @@ +PODMAN_BUILD?=0 diff --git a/sources/redbear-0.1.0/configs/base.toml b/sources/redbear-0.1.0/configs/base.toml new file mode 100644 index 00000000..e7a04f3d --- /dev/null +++ b/sources/redbear-0.1.0/configs/base.toml @@ -0,0 +1,320 @@ +# Base configuration: This configuration is meant to be included by +# other configurations rather than use directly. It is the greatest +# common divisor of all other configurations and misses several +# parts necessary to create a bootable system. + +# General settings +[general] +# Do not prompt if settings are not defined +prompt = false + +[packages] +base = {} +base-initfs = {} +bootloader = {} +kernel = {} +libgcc = {} +libstdcxx = {} +netdb = {} +netutils = {} +relibc = {} +userutils = {} +uutils = {} + +## Configuration files +[[files]] +path = "/usr/lib/init.d/00_base.service" +data = """ +[unit] +description = "Base environment setup (tmpdir)" + +[service] +cmd = "ion" +args = ["-c", "rm -rf /tmp; mkdir -m a=rwxt /tmp"] +type = "oneshot" +""" + +[[files]] +path = "/usr/lib/init.d/00_sudo.service" +data = """ +[unit] +description = "Sudo privilege daemon" + +[service] +cmd = "sudo" +args = ["--daemon"] +type = "oneshot_async" +""" + +[[files]] +path = "/etc/login_schemes.toml" +data = """ +[user_schemes.root] +schemes = ["*"] +[user_schemes.user] +schemes = [ + # Kernel schemes + "debug", + "event", + "memory", + "pipe", + "serio", + "irq", + "time", + "sys", + # Base schemes + "rand", + "null", + "zero", + "log", + # Network schemes + "ip", + "icmp", + "tcp", + "udp", + # IPC schemes + "shm", + "chan", + "uds_stream", + "uds_dgram", + # File schemes + "file", + # Display schemes + "display.vesa", + "display*", + # Other schemes + "pty", + "sudo", + "audio", +] +""" + +[[files]] +path = "/etc/hostname" +data = "redbear" + +## Default net configuration (optimized for QEMU) +[[files]] +path = "/etc/net/dns" +data = """ +9.9.9.9 +""" + +[[files]] +path = "/etc/net/ip" +data = """ +10.0.2.15 +""" + +[[files]] +path = "/etc/net/ip_router" +data = """ +10.0.2.2 +""" + +[[files]] +path = "/etc/net/ip_subnet" +data = """ +255.255.255.0 +""" + +# https://www.freedesktop.org/software/systemd/man/latest/os-release.html +[[files]] +path = "/usr/lib/os-release" +data = """ +PRETTY_NAME="Red Bear OS 0.1.0" +NAME="Red Bear OS" +VERSION_ID="0.1.0" +VERSION="0.1.0" +ID="redbear-os" +ID_LIKE="redox-os" + +HOME_URL="https://github.com/vasilito/Red-Bear-OS-3" +DOCUMENTATION_URL="https://doc.redox-os.org/" +SUPPORT_URL="https://github.com/vasilito/Red-Bear-OS-3/issues" +""" +# FIXME maybe add VARIANT= and VARIANT_ID= keys depending on the chosen configuration? + +[[files]] +path = "/etc/os-release" +data = "../usr/lib/os-release" +symlink = true + +[[files]] +path = "/etc/pkg.d/50_redox" +data = "https://static.redox-os.org/pkg" + +## /usr and symlinks for usrmerge +[[files]] +path = "/usr" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/usr/bin" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/bin" +data = "usr/bin" +symlink = true + +[[files]] +path = "/usr/include" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/include" +data = "usr/include" +symlink = true + +[[files]] +path = "/usr/lib" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/lib" +data = "usr/lib" +symlink = true + +[[files]] +path = "/usr/libexec" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/usr/share" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/share" +data = "usr/share" +symlink = true + +[[files]] +path = "/ui" +data = "usr/share/ui" +symlink = true + +## legacy display font directory +[[files]] +path = "/usr/share/ui/fonts" +data = "/usr/share/fonts" +symlink = true + +## legacy display icon directory +[[files]] +path = "/usr/share/ui/icons" +data = "/usr/share/icons" +symlink = true + +## /var +[[files]] +path = "/var" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/var/cache" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/var/lib" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/var/lock" +data = "" +directory = true +mode = 0o1777 + +[[files]] +path = "/var/log" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/var/run" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/var/tmp" +data = "" +directory = true +mode = 0o1777 + +## Device file symlinks +[[files]] +path = "/dev/null" +data = "/scheme/null" +symlink = true + +[[files]] +path = "/dev/random" +data = "/scheme/rand" +symlink = true + +[[files]] +path = "/dev/urandom" +data = "/scheme/rand" +symlink = true + +[[files]] +path = "/dev/zero" +data = "/scheme/zero" +symlink = true + +[[files]] +path = "/dev/tty" +data = "libc:tty" +symlink = true + +[[files]] +path = "/dev/stdin" +data = "libc:stdin" +symlink = true + +[[files]] +path = "/dev/stdout" +data = "libc:stdout" +symlink = true + +[[files]] +path = "/dev/stderr" +data = "libc:stderr" +symlink = true + +# User settings +[users.root] +password = "password" +uid = 0 +gid = 0 +shell = "/usr/bin/ion" + +[users.user] +# Password is unset +password = "" +shell = "/usr/bin/ion" + +# Group settings +[groups.sudo] +gid = 1 +members = ["user"] diff --git a/sources/redbear-0.1.0/configs/minimal.toml b/sources/redbear-0.1.0/configs/minimal.toml new file mode 100644 index 00000000..faa3cf47 --- /dev/null +++ b/sources/redbear-0.1.0/configs/minimal.toml @@ -0,0 +1,56 @@ +# Minimal configuration + +include = ["base.toml"] + +# General settings +[general] +# Filesystem size in MiB +filesystem_size = 196 + +# Package settings +[packages] +ca-certificates = {} +coreutils = {} +extrautils = {} +ion = {} +pkgutils = {} +kibi = {} + +[[files]] +path = "/usr/lib/init.d/29_activate_console.service" +data = """ +[unit] +description = "Activate console VT" +requires_weak = ["00_base.target"] + +[service] +cmd = "inputd" +args = ["-A", "2"] +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/30_console.service" +data = """ +[unit] +description = "Console terminals" +requires_weak = ["29_activate_console.service"] + +[service] +cmd = "getty" +args = ["2"] +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/31_debug_console.service" +data = """ +[unit] +description = "Debug console" +requires_weak = ["29_activate_console.service"] + +[service] +cmd = "getty" +args = ["/scheme/debug/no-preserve", "-J"] +type = "oneshot_async" +""" diff --git a/sources/redbear-0.1.0/configs/redbear-bluetooth-experimental.toml b/sources/redbear-0.1.0/configs/redbear-bluetooth-experimental.toml new file mode 100644 index 00000000..fd8cfd0d --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-bluetooth-experimental.toml @@ -0,0 +1,17 @@ +# Red Bear OS Bluetooth Experimental Profile +# +# Standalone build target for the first bounded Bluetooth slice. +# +# This profile extends the existing minimal Red Bear baseline but keeps Bluetooth wiring isolated to +# this profile instead of leaking it into the shared device-service fragments used by all images. +# The current slice is explicit-startup, USB-attached, BLE-first, and intentionally not wired to +# USB-class autospawn yet. + +include = ["redbear-minimal.toml", "redbear-bluetooth-services.toml"] + +[general] +filesystem_size = 2048 + +[packages] +redbear-btusb = {} +redbear-btctl = {} diff --git a/sources/redbear-0.1.0/configs/redbear-bluetooth-services.toml b/sources/redbear-0.1.0/configs/redbear-bluetooth-services.toml new file mode 100644 index 00000000..29408c4b --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-bluetooth-services.toml @@ -0,0 +1,17 @@ +# Red Bear OS Bluetooth experimental service wiring +# +# Kept in a dedicated included fragment so the Bluetooth profile can inject +# bounded runtime files and service units without relying on profile-local +# [[files]] behavior. + +[[files]] +path = "/var/lib/bluetooth" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/var/run/redbear-btusb" +data = "" +directory = true +mode = 0o755 diff --git a/sources/redbear-0.1.0/configs/redbear-device-services.toml b/sources/redbear-0.1.0/configs/redbear-device-services.toml new file mode 100644 index 00000000..63d9271f --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-device-services.toml @@ -0,0 +1,829 @@ +# Red Bear OS shared device-service wiring +# +# Shared by profiles that ship the firmware/input/Wi-Fi control compatibility stack. + +[packages] +redbear-quirks = {} +pciids = {} +fatd = {} + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/usr/lib/init.d/12_boot-late.target" +data = """ +[unit] +description = "Late boot services target" +requires_weak = [ + "00_base.target", +] +""" + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/lib/drivers.d" +data = "" +directory = true +mode = 0o755 + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/lib/drivers.d/00-storage.toml" +data = """ +[[driver]] +name = "nvmed" +description = "NVMe storage driver" +priority = 100 +command = ["/usr/lib/drivers/nvmed"] + +[[driver.match]] +class = 1 +subclass = 8 + +[[driver]] +name = "ahcid" +description = "AHCI SATA driver" +priority = 100 +command = ["/usr/lib/drivers/ahcid"] + +[[driver.match]] +class = 1 +subclass = 6 + +[[driver]] +name = "ided" +description = "PATA IDE driver" +priority = 100 +command = ["/usr/lib/drivers/ided"] + +[[driver.match]] +class = 1 +subclass = 1 + +[[driver]] +name = "virtio-blkd" +description = "VirtIO block device driver" +priority = 100 +command = ["/usr/lib/drivers/virtio-blkd"] + +[[driver.match]] +vendor = 0x1AF4 +device = 0x1001 +class = 1 +subclass = 0 +""" + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/lib/drivers.d/10-network.toml" +data = """ +[[driver]] +name = "e1000d" +description = "Intel Gigabit Ethernet" +priority = 50 +command = ["/usr/lib/drivers/e1000d"] + +[[driver.match]] +vendor = 0x8086 +class = 2 + +[[driver]] +name = "rtl8168d" +description = "Realtek 8168/8125 Ethernet" +priority = 50 +command = ["/usr/lib/drivers/rtl8168d"] + +[[driver.match]] +vendor = 0x10EC +class = 2 + +[[driver]] +name = "rtl8139d" +description = "Realtek 8139 Ethernet" +priority = 50 +command = ["/usr/lib/drivers/rtl8139d"] + +[[driver.match]] +vendor = 0x10EC +device = 0x8139 + +[[driver]] +name = "ixgbed" +description = "Intel 10 Gigabit Ethernet" +priority = 50 +command = ["/usr/lib/drivers/ixgbed"] + +[[driver.match]] +vendor = 0x8086 +class = 2 +subclass = 0 + +[[driver]] +name = "virtio-netd" +description = "VirtIO network driver" +priority = 50 +command = ["/usr/lib/drivers/virtio-netd"] + +[[driver.match]] +vendor = 0x1AF4 +class = 2 +""" + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/lib/drivers.d/20-usb.toml" +data = """ +[[driver]] +name = "xhcid" +description = "xHCI USB host controller" +priority = 80 +command = ["/usr/lib/drivers/xhcid"] + +[[driver.match]] +class = 0x0C +subclass = 0x03 +prog_if = 0x30 + +[[driver]] +name = "ehcid" +description = "EHCI USB 2.0 host controller" +priority = 80 +command = ["/usr/lib/drivers/ehcid"] + +# EHCI now owns a simple /scheme/usb controller surface for per-port status and +# control-transfer pass-through while the wider USB stack continues converging. + +[[driver.match]] +class = 0x0C +subclass = 0x03 +prog_if = 0x20 + +[[driver]] +name = "ohcid" +description = "OHCI USB 1.1 host controller" +priority = 80 +command = ["/usr/lib/drivers/ohcid"] + +[[driver.match]] +class = 0x0C +subclass = 0x03 +prog_if = 0x10 + +[[driver]] +name = "uhcid" +description = "UHCI USB 1.1 host controller (Intel)" +priority = 80 +command = ["/usr/lib/drivers/uhcid"] + +[[driver.match]] +class = 0x0C +subclass = 0x03 +prog_if = 0x00 +""" + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/lib/drivers.d/30-graphics.toml" +data = """ +[[driver]] +name = "vesad" +description = "VESA BIOS display driver" +priority = 60 +command = ["/usr/lib/drivers/vesad"] + +[[driver.match]] +class = 0x03 + +[[driver]] +name = "redox-drm" +description = "DRM/KMS display driver (AMD + Intel)" +priority = 60 +command = ["/usr/bin/redox-drm"] + +[[driver.match]] +class = 0x03 +""" + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/lib/drivers.d/40-input.toml" +data = """ +[[driver]] +name = "ps2d" +description = "PS/2 keyboard and mouse driver" +priority = 90 +command = ["/usr/lib/drivers/ps2d"] +""" + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/lib/drivers.d/50-audio.toml" +data = """ +[[driver]] +name = "ihdad" +description = "Intel HD Audio driver" +priority = 40 +command = ["/usr/lib/drivers/ihdad"] + +[[driver.match]] +vendor = 0x8086 +class = 0x04 + +[[driver]] +name = "ac97d" +description = "AC'97 audio codec driver" +priority = 40 +command = ["/usr/lib/drivers/ac97d"] + +[[driver.match]] +class = 0x04 +subclass = 0x01 +""" + +[[files]] +path = "/lib/drivers.d/70-usb-class.toml" +data = """ +[[driver]] +name = "redbear-acmd" +description = "USB CDC ACM serial driver" +priority = 70 +command = ["/usr/bin/redbear-acmd"] + +[[driver]] +name = "redbear-ecmd" +description = "USB CDC ECM/NCM ethernet driver" +priority = 70 +command = ["/usr/bin/redbear-ecmd"] + +[[driver]] +name = "redbear-usbaudiod" +description = "USB Audio Class driver" +priority = 70 +command = ["/usr/bin/redbear-usbaudiod"] +""" + +# Profiles that include this fragment should start `driver-manager` instead of +# `pcid-spawner`; the manager performs the PCI bind/channel handoff itself. +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/usr/lib/init.d/00_driver-manager.service" +data = """ +[unit] +description = "PCI driver spawner" +requires_weak = [ + "00_base.target", +] + +[service] +cmd = "pcid-spawner" +type = "oneshot" +""" + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/lib/drivers.d" +data = "" +directory = true +mode = 0o755 + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/usr/lib/init.d/10_evdevd.service" +data = """ +[unit] +description = "Evdev input daemon" +requires_weak = [ + "12_boot-late.target", + "00_pcid-spawner.service", +] + +[service] +cmd = "evdevd" +type = "oneshot_async" +""" + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/etc/firmware-fallbacks.d" +data = "" +directory = true +mode = 0o755 + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/usr/lib/init.d/15_cpufreqd.service" +data = """ +[unit] +description = "CPU frequency scaling daemon" +requires_weak = ["12_boot-late.target"] + +[service] +cmd = "/usr/bin/cpufreqd" +type = "oneshot_async" +""" + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/usr/lib/init.d/15_thermald.service" +data = """ +[unit] +description = "Thermal management daemon" +requires_weak = ["12_boot-late.target"] + +[service] +cmd = "/usr/bin/thermald" +type = "oneshot_async" +""" + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/usr/lib/init.d/15_hwrngd.service" +data = """ +[unit] +description = "Hardware RNG entropy daemon" +requires_weak = ["00_base.target"] + +[service] +cmd = "/usr/bin/hwrngd" +type = "oneshot_async" +""" + +# Firmware fallback chain configs +[[files]] +path = "/etc/firmware-fallbacks.d/00-amdgpu.toml" +data = """ +[[fallback]] +pattern = "amdgpu/dmcub_dcn31.bin" +chain = ["amdgpu/dmcub_dcn30.bin", "amdgpu/dmcub_dcn20.bin"] + +[[fallback]] +pattern = "amdgpu/dmcub_dcn30.bin" +chain = ["amdgpu/dmcub_dcn20.bin"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/10-iwlwifi.toml" +data = """ +[[fallback]] +pattern = "iwlwifi-bz-b0-gf-a0-92.ucode" +chain = ["iwlwifi-bz-b0-gf-a0-83.ucode", "iwlwifi-bz-b0-gf-a0-77.ucode"] +""" + +[[files]] +path = "/etc/firmware-fallbacks.d/20-intel-dmc.toml" +data = """ +[[fallback]] +pattern = "i915/adlp_dmc_ver2_16.bin" +chain = ["i915/adlp_dmc_ver2_14.bin", "i915/adlp_dmc_ver2_12.bin"] +""" +[[files]] +path = "/usr/lib/init.d/13_driver-params.service" +data = """ +[unit] +description = "Driver parameter scheme" +requires_weak = ["00_driver-manager.service"] + +[service] +cmd = "/usr/bin/driver-params" +type = { scheme = "driver-params" } +""" + +[[files]] +path = "/usr/lib/init.d/16_redbear-acmd.service" +data = """ +[unit] +description = "USB CDC ACM serial daemon" +requires_weak = ["12_boot-late.target"] + +[service] +cmd = "/usr/bin/redbear-acmd" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/16_redbear-ecmd.service" +data = """ +[unit] +description = "USB CDC ECM/NCM ethernet daemon" +requires_weak = ["12_boot-late.target"] + +[service] +cmd = "/usr/bin/redbear-ecmd" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/16_redbear-usbaudiod.service" +data = """ +[unit] +description = "USB Audio Class daemon" +requires_weak = ["12_boot-late.target"] + +[service] +cmd = "/usr/bin/redbear-usbaudiod" +type = "oneshot_async" +""" diff --git a/sources/redbear-0.1.0/configs/redbear-full.toml b/sources/redbear-0.1.0/configs/redbear-full.toml new file mode 100644 index 00000000..e09118a3 --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-full.toml @@ -0,0 +1,408 @@ +# Red Bear OS Full Configuration +# Desktop/graphics ISO for bare metal and QEMU. +# +# Build: make live CONFIG_NAME=redbear-full +# QEMU: make all CONFIG_NAME=redbear-full && make qemu +# +# Extends redbear-mini with the full desktop/graphics stack: +# Wayland, Qt6, KF6, KWin, Mesa, DRM drivers, firmware, greeter. + +include = ["redbear-mini.toml"] + +[general] +filesystem_size = 4096 +efi_partition_size = 16 + +[users.messagebus] +uid = 100 +gid = 100 +name = "messagebus" +home = "/nonexistent" +shell = "/usr/bin/ion" + +[users.root] +shell = "/usr/bin/zsh" + +[packages] +# Runtime driver parameter control surface. +driver-params = {} + +# Firmware loading +redbear-firmware = {} +firmware-loader = {} + +# NUMA topology discovery (userspace daemon) +numad = {} + +# GPU/graphics stack +redox-drm = {} +mesa = {} +libdrm = {} + +libwayland = {} +wayland-protocols = {} +# redbear-compositor = {} + +# Keyboard/input +# libxkbcommon = {} # build needed +# xkeyboard-config = {} # build needed +libevdev = {} +# libinput = {} # WIP: missing libepoll-shim recipe dependency + +# Qt6 stack +qtbase = {} +qtdeclarative = {} +qtsvg = {} +qtwayland = {} +qt6-wayland-smoke = {} +qt6-sensors = {} + +# KF6 Frameworks — explicit real-build surface in alphabetical order +# kirigami: blocked (QML gate — QQuickWindow/QQmlEngine headers don't exist on Redox) +kf6-kio = {} +# kde-cli-tools = {} # blocked: direct repo cook fails + +kdecoration = {} +kf6-attica = {} +kf6-karchive = {} +kf6-kauth = {} +kf6-kbookmarks = {} +kf6-kcmutils = {} +kf6-kcodecs = {} +kf6-kcolorscheme = {} +kf6-kcompletion = {} +kf6-kconfig = {} +kf6-kconfigwidgets = {} +kf6-kcoreaddons = {} +kf6-kcrash = {} +kf6-kdbusaddons = {} +kf6-kdeclarative = {} +kf6-kded6 = {} +kf6-kguiaddons = {} +kf6-ki18n = {} +kf6-kiconthemes = {} +kf6-kidletime = {} +kf6-kitemmodels = {} +kf6-kitemviews = {} +kf6-kjobwidgets = {} +kf6-knotifications = {} +kf6-kpackage = {} +kf6-kservice = {} +kf6-ktextwidgets = {} +kf6-kwayland = {} +kf6-kwidgetsaddons = {} +kf6-kxmlgui = {} +kf6-prison = {} +kf6-solid = {} +kf6-sonnet = {} +kf6-knewstuff = {} +kf6-kwallet = {} +kglobalacceld = {} + +# kwin = {} # Blocked: Qt6 Wayland plugin import error (QML gate) + +# Plasma + app packages — blocked on kirigami (QML gate) +# plasma-framework = {} +# plasma-workspace = {} +# plasma-desktop = {} + +redbear-authd = {} +redbear-session-launch = {} +seatd = {} +redbear-greeter = {} +amdgpu = {} + +# Core Red Bear umbrella package +redbear-meta = {} + +# Phase 1 runtime validation tests (POSIX: signalfd, timerfd, eventfd, shm_open, sem_open, waitid) +relibc-phase1-tests = {} + +# Desktop fonts and icons +dejavu = {} +freefont = {} +hicolor-icon-theme = {} +pop-icon-theme = {} + +# Suppress legacy desktop packages +orbdata = "ignore" +orbital = "ignore" +orbterm = "ignore" +orbutils = "ignore" +cosmic-edit = "ignore" +cosmic-files = "ignore" +cosmic-icons = "ignore" +cosmic-term = "ignore" +curl = "ignore" +git = "ignore" +mc = "ignore" +#curl = "ignore" # suppressed: cascade rebuild +#git = "ignore" # suppressed: cascade rebuild +#konsole = {} # WIP: recipe exists, not yet built — blocked by libiconv fetch +#kf6-pty = {} # WIP: recipe exists, not yet built + +[[files]] +path = "/lib/firmware/amdgpu" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/usr/lib/fonts" +data = "/usr/share/fonts" +symlink = true + +[[files]] +path = "/usr/lib/init.d/05_boot-essential.target" +data = """ +[unit] +description = "Boot essential services target" +requires_weak = [ + "00_base.target", +] +""" + +[[files]] +path = "/usr/lib/init.d/13_iommu.service" +data = """ +[unit] +description = "IOMMU DMA remapping daemon" +requires_weak = [ + "12_boot-late.target", + "00_pcid-spawner.service", +] + +[service] +cmd = "/usr/bin/iommu" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/12_dbus.service" +data = """ +[unit] +description = "D-Bus system bus" +requires_weak = [ + "12_boot-late.target", +] + +[service] +cmd = "dbus-daemon" +args = ["--system", "--nopidfile"] +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/13_redbear-sessiond.service" +data = """ +[unit] +description = "Red Bear session broker (org.freedesktop.login1)" +requires_weak = [ + "12_dbus.service", +] + +[service] +cmd = "redbear-sessiond" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/13_seatd.service" +data = """ +[unit] +description = "seatd seat management daemon" +requires_weak = [ + "12_dbus.service", + "13_redbear-sessiond.service", +] + +[service] +cmd = "/usr/bin/seatd" +args = ["-l", "info"] +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/14_redbear-upower.service" +data = """ +[unit] +description = "UPower D-Bus service (org.freedesktop.UPower)" +requires_weak = [ + "12_dbus.service", +] + +[service] +cmd = "redbear-upower" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/14_redbear-udisks.service" +data = """ +[unit] +description = "UDisks2 D-Bus service (org.freedesktop.UDisks2)" +requires_weak = [ + "12_dbus.service", +] + +[service] +cmd = "redbear-udisks" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/14_redbear-polkit.service" +data = """ +[unit] +description = "PolicyKit1 D-Bus service (org.freedesktop.PolicyKit1)" +requires_weak = [ + "12_dbus.service", +] + +[service] +cmd = "redbear-polkit" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/19_redbear-authd.service" +data = """ +[unit] +description = "Red Bear authentication daemon" +requires_weak = [ + "12_dbus.service", +] + +[service] +cmd = "/usr/bin/redbear-authd" +envs = { QT_PLUGIN_PATH = "/usr/plugins", QT_QPA_PLATFORM_PLUGIN_PATH = "/usr/plugins/platforms", QML2_IMPORT_PATH = "/usr/qml", XCURSOR_THEME = "Pop", XKB_CONFIG_ROOT = "/usr/share/X11/xkb", KWIN_DRM_DEVICES = "/scheme/drm/card0" } +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/20_display.service" +data = """ +[unit] +description = "KDE session assembly helper" +requires_weak = [ + "12_dbus.service", + "13_redbear-sessiond.service", + "13_seatd.service", + "19_redbear-authd.service", +] + +[service] +cmd = "/usr/bin/redbear-session-launch" +args = ["--username", "root", "--mode", "session", "--session", "kde-wayland", "--vt", "4", "--runtime-dir", "/tmp/run/redbear-display-session", "--wayland-display", "wayland-display"] +envs = { QT_PLUGIN_PATH = "/usr/plugins", QT_QPA_PLATFORM_PLUGIN_PATH = "/usr/plugins/platforms", QML2_IMPORT_PATH = "/usr/qml", XCURSOR_THEME = "Pop", XKB_CONFIG_ROOT = "/usr/share/X11/xkb", REDBEAR_KDE_SESSION_BACKEND = "virtual", REDBEAR_KDE_SESSION_STATE_DIR = "/run/redbear-display-session" } +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/20_greeter.service" +data = """ +[unit] +description = "Red Bear greeter service" +requires_weak = [ + "00_pcid-spawner.service", + "12_dbus.service", + "13_redbear-sessiond.service", + "13_seatd.service", + "19_redbear-authd.service", +] + +[service] +cmd = "/usr/bin/redbear-greeterd" +envs = { VT = "3", REDBEAR_GREETER_USER = "greeter", KWIN_DRM_DEVICES = "/scheme/drm/card0", REDBEAR_DRM_WAIT_SECONDS = "10" } +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/29_activate_console.service" +data = """ +[unit] +description = "Activate fallback console VT" +requires_weak = [ + "05_boot-essential.target", +] + +[service] +cmd = "inputd" +args = ["-A", "2"] +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/30_console.service" +data = """ +[unit] +description = "Console terminals" +requires_weak = [ + "29_activate_console.service", +] + +[service] +cmd = "getty" +args = ["2"] +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/31_debug_console.service" +data = """ +[unit] +description = "Debug console" +requires_weak = [ + "29_activate_console.service", +] + +[service] +cmd = "getty" +args = ["/scheme/debug/no-preserve", "-J"] +type = "oneshot_async" +""" + +[users.greeter] +password = "" +uid = 101 +gid = 101 +name = "greeter" +home = "/nonexistent" +shell = "/usr/bin/ion" + +[groups.greeter] +gid = 101 +members = ["greeter"] + +[groups.messagebus] +gid = 100 +members = ["messagebus"] + +[[files]] +path = "/etc/pcid.d/ihdgd.toml" +data = """ +[[drivers]] +name = "Intel GPU (VGA compatible)" +class = 0x03 +vendor = 0x8086 +subclass = 0x00 +command = ["redox-drm"] + +[[drivers]] +name = "Intel GPU (3D controller)" +class = 0x03 +vendor = 0x8086 +subclass = 0x02 +command = ["redox-drm"] +""" + +[[files]] +path = "/etc/pcid.d/virtio-gpud.toml" +data = """ +[[drivers]] +name = "VirtIO GPU" +class = 0x03 +vendor = 0x1af4 +subclass = 0x00 +command = ["redox-drm"] +""" diff --git a/sources/redbear-0.1.0/configs/redbear-greeter-services.toml b/sources/redbear-0.1.0/configs/redbear-greeter-services.toml new file mode 100644 index 00000000..5ee08677 --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-greeter-services.toml @@ -0,0 +1,127 @@ +# Red Bear greeter/login service wiring +# +# This fragment is intended to be included by the active desktop/graphics target. + +[[files]] +path = "/usr/lib/init.d/05_boot-essential.target" +data = """ +[unit] +description = "Boot essential services target" +requires_weak = [ + "00_base.target", +] +""" + +[users.greeter] +password = "" +uid = 101 +gid = 101 +name = "greeter" +home = "/nonexistent" +shell = "/usr/bin/ion" + +[groups.greeter] +gid = 101 +members = ["greeter"] + +[packages] +redbear-authd = {} +redbear-session-launch = {} +redbear-greeter = {} + +[[files]] +path = "/usr/lib/init.d/19_redbear-authd.service" +data = """ +[unit] +description = "Red Bear authentication daemon" +requires_weak = [ + "12_dbus.service", +] + +[service] +cmd = "redbear-authd" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/20_display.service" +data = """ +[unit] +description = "Compositor proof (Phase 2: KWin virtual + Qt6 smoke + 60s survival)" +requires_weak = [ + "12_dbus.service", + "13_redbear-sessiond.service", + "13_seatd.service", +] + +[service] +cmd = "redbear-validation-session" +envs = { VT = "3" } +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/20_greeter.service" +data = """ +[unit] +description = "Red Bear greeter service (experimental — Phase 3 user session bring-up)" +requires_weak = [ + "00_pcid-spawner.service", + "12_dbus.service", + "13_redbear-sessiond.service", + "13_seatd.service", + "19_redbear-authd.service", +] + +[service] +cmd = "/usr/bin/redbear-greeterd" +envs = { VT = "3", REDBEAR_GREETER_USER = "greeter", KWIN_DRM_DEVICES = "/scheme/drm/card0", REDBEAR_DRM_WAIT_SECONDS = "10" } +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/30_console.service" +data = """ +[unit] +description = "Console terminals" +requires_weak = [ + "29_activate_console.service", +] + +[service] +cmd = "getty" +args = ["2"] +type = "oneshot_async" +respawn = true +""" + +[[files]] +path = "/usr/lib/init.d/29_activate_console.service" +data = """ +[unit] +description = "Activate fallback console VT" +requires_weak = [ + "05_boot-essential.target", +] + +[service] +cmd = "inputd" +args = ["-A", "2"] +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/31_debug_console.service" +data = """ +[unit] +description = "Debug console" +requires_weak = [ + "29_activate_console.service", +] + +[service] +cmd = "getty" +args = ["/scheme/debug/no-preserve", "-J"] +type = "oneshot_async" +respawn = true +""" diff --git a/sources/redbear-0.1.0/configs/redbear-grub-policy.toml b/sources/redbear-0.1.0/configs/redbear-grub-policy.toml new file mode 100644 index 00000000..33e8098d --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-grub-policy.toml @@ -0,0 +1,9 @@ +# Red Bear OS shared GRUB policy fragment +# Use with any redbear-* profile to make GRUB first-class in installer flows. + +[general] +bootloader = "grub" +efi_partition_size = 16 + +[packages] +grub = {} diff --git a/sources/redbear-0.1.0/configs/redbear-grub.toml b/sources/redbear-0.1.0/configs/redbear-grub.toml new file mode 100644 index 00000000..27e3e2f1 --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-grub.toml @@ -0,0 +1,16 @@ +# Red Bear OS GRUB Configuration +# Text-only ISO with GRUB boot manager for bare metal. +# +# Build: make live CONFIG_NAME=redbear-grub +# +# Identical to redbear-mini but uses GNU GRUB as the boot manager +# instead of the Redox EFI bootloader. + +include = ["redbear-mini.toml", "redbear-grub-policy.toml"] + +[general] +bootloader = "grub" +efi_partition_size = 16 + +[packages] +grub = {} diff --git a/sources/redbear-0.1.0/configs/redbear-legacy-base.toml b/sources/redbear-0.1.0/configs/redbear-legacy-base.toml new file mode 100644 index 00000000..225ad88c --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-legacy-base.toml @@ -0,0 +1,51 @@ +# Red Bear OS overrides for base init services. +# +# 00_base.service: stripped base setup (tmpdir only, no sudo — sudo runs from +# base.toml's 00_sudo.service). ipcd and ptyd are started by +# 00_ipcd.service and 00_ptyd.service from the base recipe. +# 00_drivers / 10_net: no longer overridden — the legacy scripts were removed +# from base.toml. The retained 00_pcid-spawner.service unit name now +# launches driver-manager so existing init ordering remains stable. +# 00_pcid-spawner.service: compatibility wrapper for driver-manager. The base +# recipe uses type="oneshot" which blocks init until pcid-spawner exits. +# Running driver-manager here with oneshot_async keeps the historic unit +# name for downstream `requires_weak` consumers while moving PCI driver +# spawning to the manager that performs bind/channel handoff. + +[packages] +zsh = {} + +[[files]] +path = "/usr/lib/init.d/00_base.service" +data = """ +[unit] +description = "Base environment setup (tmpdir)" + +[service] +cmd = "ion" +args = ["-c", "rm -rf /tmp; mkdir -m a=rwxt /tmp"] +type = "oneshot" +""" + +[[files]] +path = "/etc/init.d/20_audiod.service" +data = """ +[unit] +description = "Audio multiplexer" +default_dependencies = false + +[service] +cmd = "audiod" +type = "oneshot_async" +""" + +[[files]] +path = "/etc/init.d/00_pcid-spawner.service" +data = """ +[unit] +description = "PCI driver spawner" + +[service] +cmd = "pcid-spawner" +type = "oneshot" +""" diff --git a/sources/redbear-0.1.0/configs/redbear-legacy-desktop.toml b/sources/redbear-0.1.0/configs/redbear-legacy-desktop.toml new file mode 100644 index 00000000..59d962ae --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-legacy-desktop.toml @@ -0,0 +1,20 @@ +# Red Bear OS overrides for legacy desktop init services. +# Blank the display and console services inherited from desktop-minimal.toml. +# These intentional empty overrides prevent the inherited services from launching; +# the active redbear-full config provides its own display/console/greeter services. + +[[files]] +path = "/usr/lib/init.d/20_display.service" +data = "" + +[[files]] +path = "/usr/lib/init.d/29_activate_console.service" +data = "" + +[[files]] +path = "/usr/lib/init.d/30_console.service" +data = "" + +[[files]] +path = "/usr/lib/init.d/31_debug_console.service" +data = "" diff --git a/sources/redbear-0.1.0/configs/redbear-mini.toml b/sources/redbear-0.1.0/configs/redbear-mini.toml new file mode 100644 index 00000000..baa760c8 --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-mini.toml @@ -0,0 +1,467 @@ +# Red Bear OS Mini Configuration +# Text-only ISO for console/recovery/install on bare metal. +# +# Build: make live CONFIG_NAME=redbear-mini +# +# Target contract: +# - text-login live/recovery/install surface +# - boot framebuffer for VT text consoles via vesad + fbcond +# - all non-graphics, non-firmware packages from the full profile +# - no linux-firmware payload, no firmware-loader, no GPU/display drivers + +include = ["minimal.toml", "redbear-legacy-base.toml", "redbear-netctl.toml", "redbear-device-services.toml"] + +[general] +filesystem_size = 1536 + +[users.messagebus] +uid = 100 +gid = 100 +name = "messagebus" +home = "/nonexistent" +shell = "/usr/bin/ion" + +[packages] +# Red Bear OS branding and host utilities. +redbear-release = {} +redbear-hwutils = {} +redbear-quirks = {} + +# Device driver infrastructure (pcid-spawner is built by the base recipe; +# driver-manager requires driver config migration and is not yet ready) +ehcid = {} +ohcid = {} +uhcid = {} + +# Redox-native netctl tooling. +redbear-netctl = {} +redbear-netctl-console = {} +redbear-netstat = {} +redbear-traceroute = {} +redbear-mtr = {} +redbear-nmap = {} + +# Wi-Fi control daemon (firmware-loader excluded — no firmware blobs in mini). +redbear-wifictl = {} + +# Diagnostics and shell-side utilities. +mc = "ignore" +redbear-info = {} + +# Keep package builder utility in live environment. +cub = {} +cpufreqd = {} +thermald = {} +hwrngd = {} +redbear-acmd = {} +redbear-ecmd = {} +redbear-usbaudiod = {} +driver-params = {} + +# ── PCI device database (critical for PCI driver matching) ── +pciids = {} + +# ── Filesystem support ── +ext4d = {} +fatd = {} +redoxfs = {} + +# ── System installer ── +installer = {} + +# ── Input / device management ── +evdevd = {} +udev-shim = {} + +# ── D-Bus IPC and session services ── +dbus = {} +redbear-sessiond = {} +redbear-dbus-services = {} +redbear-notifications = {} +redbear-upower = {} +redbear-udisks = {} +redbear-polkit = {} + +# ── IOMMU DMA remapping ── +iommu = {} + +# ── Standard CLI tools (from server profile) ── +bash = {} +bottom = {} +#curl = {} # suppressed: nghttp2 dependency chain fails; curl not needed for boot/recovery +diffutils = {} +findutils = {} +#git = {} # suppressed: cascading rebuild; git not needed for boot/recovery +htop = {} +#mc = {} # suppressed: C99 format warning errors in compilation + +# ── Build / packaging utilities ── +# patchelf = {} # requires strtold which is missing in relibc +shared-mime-info = {} + +# VT/getty/login chain: initfs starts inputd + vesad + fbcond in phase 1, +# then minimal.toml legacy 30_console runs inputd -A 2 + getty 2 + getty debug. + +[[files]] +path = "/etc/netctl/active" +data = "wired-dhcp\n" + +[[files]] +path = "/etc/init.d/10_smolnetd.service" +data = """ +[unit] +description = "Network stack (non-blocking on live-mini)" +requires_weak = [ + "00_pcid-spawner.service", +] + +[service] +cmd = "netstack" +type = "oneshot_async" +""" + +[[files]] +path = "/etc/init.d/10_dhcpd.service" +data = """ +[unit] +description = "DHCP client daemon (non-blocking on live-mini)" +requires_weak = [ + "10_smolnetd.service", +] + +[service] +cmd = "dhcpd" +args = ["-f"] +type = "oneshot_async" +""" + +[[files]] +path = "/etc/issue" +data = """ +########## Red Bear OS ######### +# Login with the following: # +# `user` # +# `root`:`password` # +################################ +""" + +[[files]] +path = "/etc/motd" +data = """ + + _ _ + | | (_) + | | ___ _ ___ _ __ _ _ ___ + | |/ / || |/ _ \\ | '_ \\| | | / __| + | < | || | (_) || |_) | |_| \\__ \\ + |_|\\_\\|_|/ |\\___/ | .__/ \\__,_|___/ + |__/ | | + |_| + + Red Bear OS v0.2.0 "Liliya" — Built on Redox OS + Type 'help' for available commands. +""" + +[[files]] +path = "/etc/init.d/20_audiod.service" +data = """ +[unit] +description = "Audio multiplexer (non-blocking on live-mini)" +requires_weak = [ + "00_base.target", +] + +[service] +cmd = "audiod" +type = "oneshot_async" +""" + +[[files]] +path = "/etc/init.d/02_serial_probe.service" +data = """ +[unit] +description = "Serial boot probe marker" +requires_weak = [ + "00_base.target", +] + +[service] +cmd = "echo" +args = ["RB_SERIAL_PROBE_OK"] +type = "oneshot" +""" + +[[files]] +path = "/etc/init.d/00_gpiod.service" +data = """ +[unit] +description = "GPIO controller registry (non-blocking on live-mini)" +requires_weak = [ + "00_base.target", +] + +[service] +cmd = "gpiod" +type = { scheme = "gpio" } +""" + +[[files]] +path = "/etc/init.d/00_i2cd.service" +data = """ +[unit] +description = "I2C adapter registry (non-blocking on live-mini)" +requires_weak = [ + "00_base.target", +] + +[service] +cmd = "i2cd" +type = { scheme = "i2c" } +""" + +[[files]] +path = "/etc/init.d/00_i2c-dw-acpi.service" +data = """ +[unit] +description = "DesignWare ACPI I2C controller (non-blocking)" +requires_weak = [ + "00_i2cd.service", +] + +[service] +cmd = "dw-acpi-i2cd" +type = "oneshot_async" +""" + +[[files]] +path = "/etc/init.d/00_intel-gpiod.service" +data = """ +[unit] +description = "Intel ACPI GPIO registrar (non-blocking)" +requires_weak = [ + "00_gpiod.service", + "00_i2cd.service", +] + +[service] +cmd = "intel-gpiod" +type = "oneshot_async" +""" + +[[files]] +path = "/etc/init.d/00_i2c-gpio-expanderd.service" +data = """ +[unit] +description = "I2C GPIO expander companion bridge (non-blocking on live-mini)" +requires_weak = [ + "00_i2cd.service", + "00_gpiod.service", +] + +[service] +cmd = "i2c-gpio-expanderd" +type = "oneshot_async" +""" + +[[files]] +path = "/etc/init.d/00_i2c-hidd.service" +data = """ +[unit] +description = "ACPI I2C HID bring-up daemon (non-blocking)" +requires_weak = [ + "00_i2cd.service", + "00_i2c-dw-acpi.service", + "00_intel-gpiod.service", + "00_i2c-gpio-expanderd.service", +] + +[service] +cmd = "i2c-hidd" +type = "oneshot_async" +""" + +[[files]] +path = "/etc/init.d/00_ucsid.service" +data = """ +[unit] +description = "USB-C UCSI topology detector (non-blocking on live-mini)" +requires_weak = [ + "00_base.target", + "00_i2cd.service", +] + +[service] +cmd = "ucsid" +type = { scheme = "ucsi" } +""" + +[[files]] +path = "/usr/lib/init.d/12_boot-late.target" +data = """ +[unit] +description = "Late boot services target" +requires_weak = [ + "00_base.target", +] +""" + +[[files]] +path = "/usr/lib/init.d/11_udev.service" +data = """ +[unit] +description = "udev compatibility shim" +requires_weak = [ + "12_boot-late.target", + "00_pcid-spawner.service", +] + +[service] +cmd = "udev-shim" +type = { scheme = "udev" } +""" + +[[files]] +path = "/usr/lib/init.d/10_evdevd.service" +data = """ +[unit] +description = "Evdev input daemon" +requires_weak = [ + "12_boot-late.target", + "00_pcid-spawner.service", +] + +[service] +cmd = "evdevd" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/11_wifictl.service" +data = """ +[unit] +description = "Wi-Fi control daemon" +requires_weak = [ + "12_boot-late.target", + "00_pcid-spawner.service", +] + +[service] +cmd = "redbear-wifictl" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/12_dbus.service" +data = """ +[unit] +description = "D-Bus system bus" +requires_weak = [ + "12_boot-late.target", +] + +[service] +cmd = "dbus-daemon" +args = ["--system"] +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/13_redbear-sessiond.service" +data = """ +[unit] +description = "Red Bear session broker (org.freedesktop.login1)" +requires_weak = [ + "12_dbus.service", +] + +[service] +cmd = "redbear-sessiond" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/13_iommu.service" +data = """ +[unit] +description = "IOMMU DMA remapping daemon" +requires_weak = [ + "12_boot-late.target", + "00_pcid-spawner.service", +] + +[service] +cmd = "/usr/bin/iommu" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/14_redbear-upower.service" +data = """ +[unit] +description = "UPower D-Bus service (org.freedesktop.UPower)" +requires_weak = [ + "12_dbus.service", +] + +[service] +cmd = "redbear-upower" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/14_redbear-udisks.service" +data = """ +[unit] +description = "UDisks2 D-Bus service (org.freedesktop.UDisks2)" +requires_weak = [ + "12_dbus.service", +] + +[service] +cmd = "redbear-udisks" +type = "oneshot_async" +""" + +[[files]] +path = "/usr/lib/init.d/14_redbear-polkit.service" +data = """ +[unit] +description = "PolicyKit1 D-Bus service (org.freedesktop.PolicyKit1)" +requires_weak = [ + "12_dbus.service", +] + +[service] +cmd = "redbear-polkit" +type = "oneshot_async" +""" + +[[files]] +path = "/var/lib/dbus" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/run/dbus" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/etc/pcid.d/ihdgd.toml" +data = """ +# redbear-live-mini: text-only image; override upstream ihdgd config with empty file +""" + +[[files]] +path = "/etc/pcid.d/virtio-gpud.toml" +data = """ +# redbear-live-mini: text-only image; override upstream virtio-gpud config with empty file +""" + +[[files]] +path = "/etc/pcid.d/00_text_mode_gpu_mask.toml" +data = """ +# redbear-live-mini: no display driver matched; class 0x03 devices are skipped +""" diff --git a/sources/redbear-0.1.0/configs/redbear-netctl.toml b/sources/redbear-0.1.0/configs/redbear-netctl.toml new file mode 100644 index 00000000..b50a0613 --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-netctl.toml @@ -0,0 +1,106 @@ +# Red Bear OS shared network profile wiring +# +# Shared by redbear-minimal, redbear-desktop, redbear-full, and redbear-kde. + +[[files]] +path = "/etc/netctl" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/etc/netctl/examples" +data = "" +directory = true +mode = 0o755 + +[[files]] +path = "/etc/netctl/examples/wired-dhcp" +data = """ +Description='Red Bear wired DHCP profile' +Interface=eth0 +Connection=ethernet +IP=dhcp +""" + +[[files]] +path = "/etc/netctl/wired-dhcp" +data = """ +Description='Red Bear wired DHCP profile' +Interface=eth0 +Connection=ethernet +IP=dhcp +""" + +[[files]] +path = "/etc/netctl/examples/wired-static" +data = """ +Description='Red Bear wired static profile' +Interface=eth0 +Connection=ethernet +IP=static +Address=('192.168.1.10/24') +Gateway='192.168.1.1' +DNS=('1.1.1.1') +""" + +[[files]] +path = "/etc/netctl/examples/wifi-dhcp" +data = """ +Description='Red Bear Wi-Fi DHCP profile' +Interface=wlan0 +Connection=wifi +SSID='example-ssid' +Security=wpa2-psk +Key='example-passphrase' +IP=dhcp +""" + +[[files]] +path = "/etc/netctl/examples/wifi-open" +data = """ +Description='Red Bear Wi-Fi open-network profile' +Interface=wlan0 +Connection=wifi +SSID='example-open-ssid' +Security=open +IP=dhcp +""" + +[[files]] +path = "/etc/netctl/examples/wifi-open-bounded" +data = """ +Description='Red Bear Wi-Fi bounded lifecycle profile' +Interface=wlan0 +Connection=wifi +SSID='example-open-ssid' +Security=open +IP=bounded +""" + +[[files]] +path = "/etc/netctl/wifi-open-bounded" +data = """ +Description='Red Bear Wi-Fi bounded lifecycle profile' +Interface=wlan0 +Connection=wifi +SSID='example-open-ssid' +Security=open +IP=bounded +""" + +[[files]] +path = "/usr/lib/init.d/12_netctl.service" +data = """ +[unit] +description = "Network profile application" +requires_weak = [ + "10_smolnetd.service", + "10_dhcpd.service", +] + +[service] +cmd = "redbear-netctl" +args = ["--boot"] +type = "oneshot_async" +""" diff --git a/sources/redbear-0.1.0/configs/redbear-wifi-experimental.toml b/sources/redbear-0.1.0/configs/redbear-wifi-experimental.toml new file mode 100644 index 00000000..350eb5a0 --- /dev/null +++ b/sources/redbear-0.1.0/configs/redbear-wifi-experimental.toml @@ -0,0 +1,20 @@ +# Red Bear OS Wi-Fi Experimental Profile +# +# Standalone tracked build target for the current bounded Intel Wi-Fi slice. +# +# This profile extends the existing minimal Red Bear baseline but switches the default active profile +# to the bounded Wi-Fi path and adds the first Intel driver-side package on top of the shared +# firmware/control/profile tooling. + +include = ["redbear-minimal.toml"] + +[general] +filesystem_size = 2048 + +[packages] +# First bounded Intel driver-side package +redbear-iwlwifi = {} + +[[files]] +path = "/etc/netctl/active" +data = "wifi-open-bounded\n" diff --git a/sources/redbear-0.1.0/manifest.txt b/sources/redbear-0.1.0/manifest.txt new file mode 100644 index 00000000..84960b73 --- /dev/null +++ b/sources/redbear-0.1.0/manifest.txt @@ -0,0 +1,385 @@ +# Red Bear OS 0.1.0 Release Manifest +# Generated: vie 01 may 2026 10:31:16 WEST +# Build system: f55acba68 + +# Total: 404 recipes (139 git, 165 tar) + +archives/lz4 tar=https://github.com/lz4/lz4/releases/download/v1.10.0/lz4-1.10.0.tar.gz blake3=3e69fd475e7852e17594985528b5232afeba7d3d56cfebe2e89071768b2ab36a +archives/zstd tar=https://github.com/facebook/zstd/releases/download/v1.5.7/zstd-1.5.7.tar.gz blake3=730dca31244abd219e995f03a55d95b2cfb4b3e16cda055a79fa6f30a4f0e1db +artwork/pop-wallpapers git=https://github.com/pop-os/wallpapers rev= +artwork/ubuntu-wallpapers tar=https://launchpad.net/ubuntu/+archive/primary/+sourcefiles/ubuntu-wallpapers/23.10.4/ubuntu-wallpapers_23.10.4.orig.tar.gz blake3=1e479d0aa48fe3f2961a2dac28c3ed397a29616cf6e7d73f5ceb6fabfd6449e1 +branding/redbear-release path=source +core/base git=https://gitlab.redox-os.org/redox-os/base.git rev=463f76b9608a896e6f6c9f63457f57f6409873c7 +core/base-initfs same_as=../base +core/binutils git=https://gitlab.redox-os.org/redox-os/binutils.git rev= +core/bootloader git=https://gitlab.redox-os.org/redox-os/bootloader.git rev=b22a35c +core/contain git=https://gitlab.redox-os.org/redox-os/contain.git rev= +core/coreutils git=https://gitlab.redox-os.org/redox-os/coreutils.git rev=5559e20 +core/dash git=https://gitlab.redox-os.org/redox-os/dash.git rev= +core/ext4d path=source +core/extrautils git=https://gitlab.redox-os.org/redox-os/extrautils.git rev=fb66941 +core/fatd path=source +core/findutils git=https://gitlab.redox-os.org/redox-os/findutils.git rev=116c044 +core/grub tar=https://ftp.gnu.org/gnu/grub/grub-2.12.tar.xz blake3=13c48453f9becf4a6e49618749dc7cb83a2c4a0d7600eeeadc6c7c2772c0b877 +core/installer git=https://gitlab.redox-os.org/redox-os/installer.git rev=948bfdc +core/ion git=https://gitlab.redox-os.org/redox-os/ion.git rev=1440704f +core/kernel git=https://gitlab.redox-os.org/redox-os/kernel.git rev=866dfad +core/netdb git=https://gitlab.redox-os.org/redox-os/netdb.git rev=2c15606 +core/netutils git=https://gitlab.redox-os.org/redox-os/netutils.git rev=40a573b +core/pkgar git=https://gitlab.redox-os.org/redox-os/pkgar.git rev= +core/pkgutils git=https://gitlab.redox-os.org/redox-os/pkgutils.git rev=70c5067 +core/profiled git=https://gitlab.redox-os.org/redox-os/profiled.git rev= +core/redoxfs git=https://gitlab.redox-os.org/redox-os/redoxfs.git rev=b596776 +core/relibc git=https://gitlab.redox-os.org/redox-os/relibc.git rev=861bbb0 +core/strace git=https://gitlab.redox-os.org/redox-os/strace-redox.git rev= +core/userutils git=https://gitlab.redox-os.org/redox-os/userutils.git rev=0c5274f +core/uutils git=https://github.com/uutils/coreutils rev=1f7c81f5d2d3e56c518349c0392158871a1ea9ec +demos/cmatrix git=https://github.com/abishekvashok/cmatrix rev= +demos/cpal git=https://gitlab.redox-os.org/redox-os/cpal.git rev= +demos/dynamic-example git=https://gitlab.redox-os.org/redox-os/dynamic-example.git rev= +demos/exampled git=https://gitlab.redox-os.org/redox-os/exampled.git rev= +demos/glutin git=https://gitlab.redox-os.org/redox-os/glutin.git rev= +demos/iced git=https://gitlab.redox-os.org/redox-os/iced.git rev= +demos/orbclient git=https://gitlab.redox-os.org/redox-os/orbclient.git rev= +demos/pixelcannon git=https://github.com/jackpot51/pixelcannon.git rev= +demos/winit git=https://github.com/pop-os/winit.git rev= +dev/autoconf tar=https://ftp.gnu.org/gnu/autoconf/autoconf-2.71.tar.xz blake3=da1cc8af8551c343de9f42af0ae53fd7dff3623487157623892b6cd7e3bb5692 +dev/automake tar=https://ftp.gnu.org/gnu/automake/automake-1.16.5.tar.xz blake3=f42cfc333aaaa11f2bcb05b5b0273b8706c820c22f9ba4367f7eb920551695cd +dev/binutils-gdb git=https://gitlab.redox-os.org/redox-os/binutils-gdb rev= +dev/clang21 same_as=../llvm21 +dev/cmake tar=https://github.com/Kitware/CMake/releases/download/v4.0.3/cmake-4.0.3.tar.gz blake3= +dev/crates-io-index git=https://github.com/rust-lang/crates.io-index.git rev= +dev/fontconfig tar=https://www.freedesktop.org/software/fontconfig/release/fontconfig-2.16.0.tar.xz blake3=5c95d48f5b9150f4a06d8acac12c25edaac956007df95a3bf527df02a5908f0e +dev/gcc13 git=https://gitlab.redox-os.org/redox-os/gcc rev=8e365ce7e5 +dev/gdbserver git=https://gitlab.redox-os.org/redox-os/gdbserver.git rev= +dev/gdk-pixbuf tar=https://ftp.gnome.org/pub/gnome/sources/gdk-pixbuf/2.44/gdk-pixbuf-2.44.4.tar.xz blake3=94db7bebffbd6be84a1b58a05771e411e9f7c16b06d73fcedaf0e6c0e552be9c +dev/git tar=https://www.kernel.org/pub/software/scm/git/git-2.13.1.tar.xz blake3=bc78271bffd60c5b8b938d8c08fd74dc2de8d21fbaf8f8e0e3155436d9263f17 +dev/gitoxide git=https://github.com/Byron/gitoxide.git rev= +dev/gnu-make tar=http://ftp.gnu.org/gnu/make/make-4.4.tar.gz blake3=1a0e5353205e106bd9b3c0f4a5f37ee1156a1e1c8feb771d1b4842c216612cba +dev/hello-world-examples git=https://github.com/leachim6/hello-world rev= +dev/jq tar=https://github.com/jqlang/jq/releases/download/jq-1.8.1/jq-1.8.1.tar.gz blake3= +dev/lci git=https://github.com/jD91mZM2/rust-lci rev= +dev/libtool git=https://gitlab.redox-os.org/redox-os/libtool rev= +dev/lld21 same_as=../llvm21 +dev/llvm18 git=https://gitlab.redox-os.org/redox-os/llvm-project.git rev= +dev/llvm21 git=https://gitlab.redox-os.org/redox-os/llvm-project.git rev=250d0b022e +dev/lua54 tar=https://lua.org/ftp/lua-5.4.7.tar.gz blake3=e51c2f347e3185479d5ff95cae8ac77511db486853269443c56bedaa0a6ae629 +dev/luajit git=https://luajit.org/git/luajit.git rev=a4f56a459a588ae768801074b46ba0adcfb49eb1 +dev/luarocks git=https://github.com/luarocks/luarocks.git rev= +dev/nasm tar=https://gstreamer.freedesktop.org/src/mirror/nasm-2.14.02.tar.xz blake3=f66c0cc852c3b9e3321f57c33ef336e17a128bd3d854ee095aae7e6f64629f20 +dev/patch tar=https://ftp.gnu.org/gnu/patch/patch-2.7.6.tar.xz blake3=d46d14c12aa4ea51e356bf92091c368fd871e1d770b94bc29027886737aecd5f +dev/pciids git=https://github.com/pciutils/pciids.git rev=1f4ff7c +dev/php84 tar=https://www.php.net/distributions/php-8.4.17.tar.xz blake3=a8478dddd948d4b26e51c5727ac0895440da76e8ad9be947098a4284ca0b7f2a +dev/pkg-config tar=https://pkg-config.freedesktop.org/releases/pkg-config-0.29.2.tar.gz blake3=713372b09a1fafeec130dc9bf812a3880f2a90496af5d2194e508d91ccf667d0 +dev/python312 tar=https://www.python.org/ftp/python/3.12.12/Python-3.12.12.tar.xz blake3=29636fdae3e0ee8d0fe585e528c9376fe43876f5f3f0f7892140567946fd907b +dev/redoxer git=https://gitlab.redox-os.org/redox-os/redoxer rev= +dev/rust git=https://gitlab.redox-os.org/redox-os/rust.git rev= +dev/rustpython git=https://github.com/RustPython/RustPython rev=2025-10-13-main-51 +doc/book git=https://gitlab.redox-os.org/redox-os/book.git rev= +drivers/ehcid path=source +drivers/linux-kpi path=source +drivers/ohcid path=source +drivers/redbear-btusb path=source +drivers/redbear-iwlwifi path=source +drivers/redox-driver-core path=source +drivers/redox-driver-pci path=source +drivers/redox-driver-sys path=source +drivers/uhcid path=source +drivers/usb-core path=source +emulators/dosbox tar=https://sourceforge.net/projects/dosbox/files/dosbox/0.74-3/dosbox-0.74-3.tar.gz/download blake3=8bc50ffdba20579fb3080a0dca32cb939c8a3c19259aed026482c6ac069b0007 +emulators/flycast git=https://github.com/jackpot51/flycast.git rev= +emulators/libretro-super git=https://github.com/jackpot51/libretro-super.git rev= +emulators/mednafen tar=https://mednafen.github.io/releases/files/mednafen-1.29.0.tar.xz blake3=c75c1044cdc9328b2349915a67972d6135c77eb53eb0d995788f22b7daacf79b +emulators/mgba tar=https://github.com/mgba-emu/mgba/archive/0.10.5.tar.gz blake3=a1b9e797a5058f5264d276805aef5643b7ea460916e491a0098ba32d87f1519e +emulators/retroarch git=https://github.com/jackpot51/retroarch.git rev= +emulators/rs-nes git=https://gitlab.redox-os.org/redox-os/rs-nes.git rev= +emulators/rust64 git=https://gitlab.redox-os.org/redox-os/rust64.git rev= +emulators/rustual-boy git=https://gitlab.redox-os.org/redox-os/rustual-boy.git rev= +emulators/rvvm git=https://github.com/LekKit/RVVM.git rev= +emulators/scummvm tar=https://downloads.scummvm.org/frs/scummvm/2.0.0/scummvm-2.0.0.tar.xz blake3=02e6791fd43ad3cb4238c07d23350ca1459a0f692689e585dba1d46648f64327 +files/hf git=https://github.com/sorairolake/hf rev= +fonts/dejavu tar=http://sourceforge.net/projects/dejavu/files/dejavu/2.37/dejavu-fonts-ttf-2.37.tar.bz2 blake3=b702bac8a0f8e0802758549da3b4d8041c3c83c3894e1e8a960eab53af18cce8 +fonts/freefont tar=https://ftp.gnu.org/gnu/freefont/freefont-otf-20120503.tar.gz blake3=e950397741d84981106cf648fbc143c7827b61d637c86c916232d47aabdfe253 +fonts/ibm-plex tar=https://github.com/IBM/plex/archive/refs/tags/v6.3.0.tar.gz blake3=6c67f5bf8069762eea1e31f5cca5b4e6f57ea1151b34b338046c7976072ccdef +fonts/intel-one-mono tar=https://github.com/intel/intel-one-mono/archive/refs/tags/V1.3.0.tar.gz blake3=9caff71b0a9fe8627253c55889964612ea4ae144584a283cd2fe88b7a14a4140 +fonts/noto-color-emoji git=https://github.com/googlefonts/noto-emoji rev=e8073ab740292f8d5f19b5de144087ac58044d06 +fonts/ttf-hack tar=https://github.com/source-foundry/Hack/releases/download/v3.003/Hack-v3.003-ttf.tar.xz blake3=acd40f61f6f512b0808d4bf530ab4aeb5a8ec3aa1f65bf5a1d08964d1bc3d044 +games/classicube git=https://github.com/jackpot51/ClassiCube.git rev= +games/devilutionx tar=https://github.com/diasurgical/devilutionX/archive/refs/tags/1.5.4.tar.gz blake3=d4a61ff3a7c69d86a29158918aad48ab9c4866c6a22a3e8da5feadbb7d23b3ca +games/eduke32 tar=https://dukeworld.com/eduke32/synthesis/20181010-7067/eduke32_src_20181010-7067.tar.xz blake3=b0b759fe9ca51849f42669e4832ae1ae1f9ad7938529769108f7cf6a6a176558 +games/freeciv tar=https://files.freeciv.org/stable/freeciv-3.1.4.tar.xz blake3=212630af5e50fb72662ca62a71cdd57318d0cf309b53e46377dd24c8199923a4 +games/freedoom git=https://gitlab.redox-os.org/redox-os/freedoom.git rev= +games/game-2048 git=https://gitlab.redox-os.org/redox-os/2048-rs.git rev= +games/gigalomania git=https://gitlab.redox-os.org/redox-os/gigalomania.git rev= +games/hematite git=https://gitlab.redox-os.org/redox-os/hematite.git rev= +games/neverball tar=https://neverball.org/neverball-1.6.0.tar.gz blake3=74f3b68595f475e89fd2ca8b5fc349837ff36fbbe141f321dfc232dbf8fccf51 +games/neverball-sols same_as=../neverball +games/openjazz tar=https://github.com/AlisterT/openjazz/archive/refs/tags/20240919.tar.gz blake3=c419066dd7bf50510c5ef0746fc47450ab8f5a17a0010a1bc0ad67d0e63538da +games/openjk git=https://github.com/jackpot51/OpenJK rev= +games/openttd git=https://github.com/OpenTTD/OpenTTD.git rev=231402fb4bea0a0d6a16cef90764d9e7aa699c53 +games/openttd-opengfx git=https://gitlab.redox-os.org/redox-os/openttd-opengfx.git rev= +games/openttd-openmsx git=https://gitlab.redox-os.org/redox-os/openttd-openmsx.git rev= +games/openttd-opensfx git=https://gitlab.redox-os.org/redox-os/openttd-opensfx.git rev= +games/opentyrian git=https://github.com/opentyrian/opentyrian rev= +games/prboom tar=https://downloads.sourceforge.net/project/prboom/prboom%20stable/2.5.0/prboom-2.5.0.tar.gz blake3=24c1b9b5aa15fd73e59162055f2c6d8faa82759b76ddfca9828cd2a5c8dc6b2a +games/quakespasm git=https://github.com/sezero/quakespasm rev=cc32abe09ed417ce3be10af300d2dc2f686349ba +games/redox-games git=https://gitlab.redox-os.org/redox-os/games.git rev= +games/sm64ex git=https://github.com/jackpot51/sm64ex.git rev= +games/sopwith tar=https://github.com/fragglet/sdl-sopwith/releases/download/sdl-sopwith-1.8.4/sopwith-1.8.4.tar.gz blake3=44e1404a9c4bea257d7778d2a4b1512231603a74b0a7b18eac5d18f36730ed3e +games/spacecadetpinball git=https://gitlab.redox-os.org/xTibor/SpaceCadetPinball.git rev= +gpu/amdgpu path=source +gpu/redox-drm path=source +graphics/procedural-wallpapers-rs git=https://github.com/lukas-kirschner/procedural-wallpapers-rs.git rev= +gui/installer-gui git=https://gitlab.redox-os.org/redox-os/installer-gui.git rev= +gui/orbdata git=https://gitlab.redox-os.org/redox-os/orbdata.git rev= +gui/orbital git=https://gitlab.redox-os.org/redox-os/orbital.git rev= +gui/orbterm git=https://gitlab.redox-os.org/redox-os/orbterm.git rev= +gui/orbutils git=https://gitlab.redox-os.org/redox-os/orbutils.git rev= +gui/orbutils-background same_as=../orbutils +icons/cosmic-icons git=https://github.com/pop-os/cosmic-icons.git rev=f93dcdfa1060c2cf3f8cf0b56b0338292edcafa5 +icons/hicolor-icon-theme git=https://gitlab.freedesktop.org/xdg/default-icon-theme.git rev=8d22bbf +icons/pop-icon-theme git=https://github.com/pop-os/icon-theme.git rev=1a575a8 +kde/breeze tar=https://invent.kde.org/plasma/breeze/-/archive/v6.3.4/breeze-v6.3.4.tar.gz blake3= +kde/kde-cli-tools tar=https://invent.kde.org/plasma/kde-cli-tools/-/archive/v6.3.4/kde-cli-tools-v6.3.4.tar.gz blake3= +kde/kdecoration tar=https://invent.kde.org/plasma/kdecoration/-/archive/v6.3.4/kdecoration-v6.3.4.tar.gz blake3= +kde/kf6-attica tar=https://invent.kde.org/frameworks/attica/-/archive/v6.10.0/attica-v6.10.0.tar.gz blake3= +kde/kf6-extra-cmake-modules tar=https://invent.kde.org/frameworks/extra-cmake-modules/-/archive/v6.10.0/extra-cmake-modules-v6.10.0.tar.gz blake3= +kde/kf6-karchive tar=https://invent.kde.org/frameworks/karchive/-/archive/v6.10.0/karchive-v6.10.0.tar.gz blake3= +kde/kf6-kauth tar=https://invent.kde.org/frameworks/kauth/-/archive/v6.10.0/kauth-v6.10.0.tar.gz blake3= +kde/kf6-kbookmarks tar=https://invent.kde.org/frameworks/kbookmarks/-/archive/v6.10.0/kbookmarks-v6.10.0.tar.gz blake3= +kde/kf6-kcmutils tar=https://invent.kde.org/frameworks/kcmutils/-/archive/v6.10.0/kcmutils-v6.10.0.tar.gz blake3= +kde/kf6-kcodecs tar=https://invent.kde.org/frameworks/kcodecs/-/archive/v6.10.0/kcodecs-v6.10.0.tar.gz blake3= +kde/kf6-kcolorscheme tar=https://invent.kde.org/frameworks/kcolorscheme/-/archive/v6.10.0/kcolorscheme-v6.10.0.tar.gz blake3= +kde/kf6-kcompletion tar=https://invent.kde.org/frameworks/kcompletion/-/archive/v6.10.0/kcompletion-v6.10.0.tar.gz blake3= +kde/kf6-kconfig tar=https://invent.kde.org/frameworks/kconfig/-/archive/v6.10.0/kconfig-v6.10.0.tar.gz blake3= +kde/kf6-kconfigwidgets tar=https://invent.kde.org/frameworks/kconfigwidgets/-/archive/v6.10.0/kconfigwidgets-v6.10.0.tar.gz blake3= +kde/kf6-kcoreaddons tar=https://invent.kde.org/frameworks/kcoreaddons/-/archive/v6.10.0/kcoreaddons-v6.10.0.tar.gz blake3= +kde/kf6-kcrash tar=https://invent.kde.org/frameworks/kcrash/-/archive/v6.10.0/kcrash-v6.10.0.tar.gz blake3= +kde/kf6-kdbusaddons tar=https://invent.kde.org/frameworks/kdbusaddons/-/archive/v6.10.0/kdbusaddons-v6.10.0.tar.gz blake3= +kde/kf6-kdeclarative tar=https://invent.kde.org/frameworks/kdeclarative/-/archive/v6.10.0/kdeclarative-v6.10.0.tar.gz blake3= +kde/kf6-kded6 tar=https://invent.kde.org/frameworks/kded/-/archive/v6.10.0/kded-v6.10.0.tar.gz blake3= +kde/kf6-kglobalaccel tar=https://invent.kde.org/frameworks/kglobalaccel/-/archive/v6.10.0/kglobalaccel-v6.10.0.tar.gz blake3= +kde/kf6-kguiaddons tar=https://invent.kde.org/frameworks/kguiaddons/-/archive/v6.10.0/kguiaddons-v6.10.0.tar.gz blake3= +kde/kf6-ki18n tar=https://invent.kde.org/frameworks/ki18n/-/archive/v6.10.0/ki18n-v6.10.0.tar.gz blake3= +kde/kf6-kiconthemes tar=https://invent.kde.org/frameworks/kiconthemes/-/archive/v6.10.0/kiconthemes-v6.10.0.tar.gz blake3= +kde/kf6-kidletime tar=https://invent.kde.org/frameworks/kidletime/-/archive/v6.10.0/kidletime-v6.10.0.tar.gz blake3= +kde/kf6-kio tar=https://invent.kde.org/frameworks/kio/-/archive/v6.10.0/kio-v6.10.0.tar.gz blake3= +kde/kf6-kirigami tar=https://invent.kde.org/frameworks/kirigami/-/archive/v6.10.0/kirigami-v6.10.0.tar.gz blake3=d0964890aa6523f7067510bb7e6c784ba77611952d952bfdd6422a58a23664f6 +kde/kf6-kitemmodels tar=https://invent.kde.org/frameworks/kitemmodels/-/archive/v6.10.0/kitemmodels-v6.10.0.tar.gz blake3= +kde/kf6-kitemviews tar=https://invent.kde.org/frameworks/kitemviews/-/archive/v6.10.0/kitemviews-v6.10.0.tar.gz blake3= +kde/kf6-kjobwidgets tar=https://invent.kde.org/frameworks/kjobwidgets/-/archive/v6.10.0/kjobwidgets-v6.10.0.tar.gz blake3= +kde/kf6-knotifications tar=https://invent.kde.org/frameworks/knotifications/-/archive/v6.10.0/knotifications-v6.10.0.tar.gz blake3= +kde/kf6-kpackage tar=https://invent.kde.org/frameworks/kpackage/-/archive/v6.10.0/kpackage-v6.10.0.tar.gz blake3= +kde/kf6-kservice tar=https://invent.kde.org/frameworks/kservice/-/archive/v6.10.0/kservice-v6.10.0.tar.gz blake3= +kde/kf6-ktextwidgets tar=https://invent.kde.org/frameworks/ktextwidgets/-/archive/v6.10.0/ktextwidgets-v6.10.0.tar.gz blake3= +kde/kf6-kwallet tar=https://invent.kde.org/frameworks/kwallet/-/archive/v6.10.0/kwallet-v6.10.0.tar.gz blake3= +kde/kf6-kwayland tar=https://download.kde.org/stable/plasma/6.3.4/kwayland-6.3.4.tar.xz blake3= +kde/kf6-kwidgetsaddons tar=https://invent.kde.org/frameworks/kwidgetsaddons/-/archive/v6.10.0/kwidgetsaddons-v6.10.0.tar.gz blake3= +kde/kf6-kwindowsystem tar=https://invent.kde.org/frameworks/kwindowsystem/-/archive/v6.10.0/kwindowsystem-v6.10.0.tar.gz blake3= +kde/kf6-kxmlgui tar=https://invent.kde.org/frameworks/kxmlgui/-/archive/v6.10.0/kxmlgui-v6.10.0.tar.gz blake3= +kde/kf6-prison tar=https://invent.kde.org/frameworks/prison/-/archive/v6.10.0/prison-v6.10.0.tar.gz blake3= +kde/kf6-pty tar=https://invent.kde.org/frameworks/kpty/-/archive/v6.10.0/kpty-v6.10.0.tar.gz blake3= +kde/kf6-solid tar=https://invent.kde.org/frameworks/solid/-/archive/v6.10.0/solid-v6.10.0.tar.gz blake3= +kde/kf6-sonnet tar=https://invent.kde.org/frameworks/sonnet/-/archive/v6.10.0/sonnet-v6.10.0.tar.gz blake3= +kde/kglobalacceld tar=https://invent.kde.org/plasma/kglobalacceld/-/archive/v6.0.0/kglobalacceld-v6.0.0.tar.gz blake3= +kde/kirigami tar=https://invent.kde.org/frameworks/kirigami/-/archive/v6.10.0/kirigami-v6.10.0.tar.gz blake3=d0964890aa6523f7067510bb7e6c784ba77611952d952bfdd6422a58a23664f6 +kde/konsole tar=https://invent.kde.org/utilities/konsole/-/archive/v24.08.3/konsole-v24.08.3.tar.gz blake3= +kde/kwin tar=https://invent.kde.org/plasma/kwin/-/archive/v6.3.4/kwin-v6.3.4.tar.gz blake3=2aa1e234a75b0aa94f0da3a74d93e2a8e49b30a3afb12dc24b2ecd3abaa94e7f +kde/plasma-desktop tar=https://invent.kde.org/plasma/plasma-desktop/-/archive/v6.3.4/plasma-desktop-v6.3.4.tar.gz blake3= +kde/plasma-framework tar=https://invent.kde.org/frameworks/plasma-framework/-/archive/v6.10.0/plasma-framework-v6.10.0.tar.gz blake3= +kde/plasma-wayland-protocols tar=https://invent.kde.org/libraries/plasma-wayland-protocols/-/archive/v1.16.0/plasma-wayland-protocols-v1.16.0.tar.gz blake3= +kde/plasma-workspace tar=https://invent.kde.org/plasma/plasma-workspace/-/archive/v6.3.4/plasma-workspace-v6.3.4.tar.gz blake3= +libs/atk tar=https://download.gnome.org/sources/atk/2.38/atk-2.38.0.tar.xz blake3=cbc1b7ba03009ee5cc0e646d8a86117e0d65bf8d105f2e8714fbde0299a8012e +libs/cairo tar=https://www.cairographics.org/releases/cairo-1.18.4.tar.xz blake3=b9fa14e02f85ec4e72396c62236c98502d04dbbdf8daf01ab9557a1c7aa7106e +libs/duktape tar=https://duktape.org/duktape-2.7.0.tar.xz blake3=b0a17da888847bc9c73624ae3ba7f858ec327a9bbce9d287aee6a2489e518448 +libs/expat tar=https://github.com/libexpat/libexpat/releases/download/R_2_5_0/expat-2.5.0.tar.xz blake3=ea89dd9a5a2e48d5e44fed38554b36a8f2e365a5091a99d08e30bfb1c15dda5e +libs/ffmpeg6 tar=https://ffmpeg.org/releases/ffmpeg-6.0.tar.xz blake3=4879074c357102f85932673044c57c144b0c188ae58edec2a115965536ee340f +libs/freetype2 tar=https://sourceforge.net/projects/freetype/files/freetype2/2.13.3/freetype-2.13.3.tar.xz/download blake3=07a01894ccdb584943ce817b57341a8595ce9a92bfaa77c602ec4757dfabd5e2 +libs/fribidi tar=https://github.com/fribidi/fribidi/releases/download/v1.0.16/fribidi-1.0.16.tar.xz blake3=c16ee250f73f149d7d52dc7d285eb73ac755bad7907d237391e23f429b2b71d5 +libs/glib tar=https://download.gnome.org/sources/glib/2.87/glib-2.87.0.tar.xz blake3=26b77ae24bc02f85d1c6742fe601167b056085f117cda70da7b805cefa6195e9 +libs/gstreamer tar=https://gitlab.freedesktop.org/gstreamer/gstreamer/-/archive/1.24.12/gstreamer-1.24.12.tar.gz blake3=181daf73050f7472ec656e7461b7f67028d6002c1133870576033a32e43a364f +libs/harfbuzz tar=https://github.com/harfbuzz/harfbuzz/releases/download/11.0.1/harfbuzz-11.0.1.tar.xz blake3=51f0edaaf2e9b7a7176d3252f15d03d409ef7ad35f77b050c407de89f85b77c5 +libs/jansson tar=https://github.com/akheron/jansson/releases/download/v2.10/jansson-2.10.tar.gz blake3=3c74f374a6c7ac5e323f72d87e49e5309ca922ca26cfe4992873b31f28776624 +libs/lcms2-stub path=source +libs/libarchive tar=https://libarchive.org/downloads/libarchive-3.6.2.tar.xz blake3=f98695fe81235a74fa3fc2c3ba0f0d4f13ea15f9be3850b83e304cf5d78be710 +libs/libatomic same_as=../../dev/gcc13 +libs/libcosmic git=https://gitlab.redox-os.org/redox-os/libcosmic.git rev= +libs/libdisplay-info-stub path=source +libs/libepoxy-stub path=source +libs/libffi tar=https://github.com/libffi/libffi/releases/download/v3.4.5/libffi-3.4.5.tar.gz blake3=f9a2cfe1d2ac8d211c18c99f9cfafe5537925101bfb92c2d44d844680dd82264 +libs/libflac tar=https://github.com/xiph/flac/releases/download/1.5.0/flac-1.5.0.tar.xz blake3=2adca3cd8da4b577ebb9c12e73c91cf6f6a7feb7485b3f003853b82710bada84 +libs/libgmp tar=https://ftp.gnu.org/gnu/gmp/gmp-6.3.0.tar.xz blake3=fffe4996713928ae19331c8ef39129e46d3bf5b7182820656fd4639435cd83a4 +libs/libiconv tar=https://ftp.gnu.org/gnu/libiconv/libiconv-1.17.tar.gz blake3=820b3b9fd3e2181bfb95475f01e9a3451e6d751e4f8c98ebcdcca1d8aa720f7f +libs/libjpeg tar=https://github.com/libjpeg-turbo/libjpeg-turbo/releases/download/3.1.0/libjpeg-turbo-3.1.0.tar.gz blake3=3efc14da55c56fc0a6a50f109d9e1ee8a91f5ae7dd17a21d3aebe04a65f3ee96 +libs/libmodplug1 tar=https://pilotfiber.dl.sourceforge.net/project/modplug-xmms/libmodplug/0.8.9.0/libmodplug-0.8.9.0.tar.gz blake3=01d71f7fe4e1abeb848db02b74c70ab2fd51e824f5ea7e9e18631571a76c3592 +libs/libmpfr tar=https://www.mpfr.org/mpfr-current/mpfr-4.2.2.tar.xz blake3=11d59d061ef8db588650bc7dc5172594a6e5aad013994801c6f63011a62b191d +libs/libnettle tar=https://ftp.gnu.org/gnu/nettle/nettle-3.9.1.tar.gz blake3=e4bfbda32f4fdf5ed96c152efe3a3867193b690faa5378d02a2a6fd052ee3393 +libs/libogg tar=https://github.com/xiph/ogg/releases/download/v1.3.4/libogg-1.3.4.tar.xz blake3=1cffbe7c498555ddfdb1390d7a38179c4bead6129ea37b1b1d54f3a76b816304 +libs/libopus tar=https://downloads.xiph.org/releases/opus/opus-1.6.1.tar.gz blake3=874bd7d28e24f10d88105c7d846a2e5bf085284af91a0ee36b05674a8f78e759 +libs/liborbital git=https://gitlab.redox-os.org/redox-os/liborbital.git rev=cdb08e8 +libs/libpng tar=https://github.com/pnggroup/libpng/archive/refs/tags/v1.6.46.tar.gz blake3=36f4bbb48c70975116b00ab0cff577931b96f703b2774ac3b33131d001419435 +libs/libpsl tar=https://github.com/rockdaboot/libpsl/releases/download/0.21.5/libpsl-0.21.5.tar.lz blake3=91318b7b876b12ff4649b7a0d6f6ed4ab1ab44f48a49508c8978ab7b4ccf3298 +libs/libqrencode tar=https://github.com/fukuchi/libqrencode/archive/refs/tags/v4.1.1.tar.gz blake3=78bf8bbcfb037140d0e98dc355e77416c9c9b0fd3ac12fd6e767b07d68f60f8c +libs/libsodium tar=https://github.com/jedisct1/libsodium/archive/1.0.16.tar.gz blake3=2482633f872c173f9a42e6badb44c3efb042e783e664fdf8b1046babfa2405e7 +libs/libssh2 tar=https://www.libssh2.org/download/libssh2-1.10.0.tar.gz blake3=2447216ce82c1d22301456bb02f60dfb6688f1461417b90f900c099a87f1292f +libs/libstdcxx-v3 same_as=../../dev/gcc13 +libs/libudev-stub path=source +libs/liburcu tar=https://lttng.org/files/urcu/userspace-rcu-0.14.0.tar.bz2 blake3= +libs/libuv tar=https://dist.libuv.org/dist/v1.51.0/libuv-v1.51.0.tar.gz blake3=e8b5e68bc2d0776ac4ea67df59d694fca58d5cc570c103443a2284e723d01fc2 +libs/libvorbis tar=https://github.com/xiph/vorbis/releases/download/v1.3.7/libvorbis-1.3.7.tar.xz blake3=c67f3f74ec26d93a5571c4404a64eb6e6587d7d77b46b552f7b410f5bc5b1f03 +libs/libxcvt-stub path=source +libs/libxml2 tar=https://download.gnome.org/sources/libxml2/2.11/libxml2-2.11.3.tar.xz blake3=0653d3750576299c4cb88740942165671b576ff93019f3d669b3f37136225ab7 +libs/lua-compat-53 git=https://github.com/lunarmodules/lua-compat-5.3.git rev= +libs/luv git=https://github.com/luvit/luv.git rev= +libs/mesa git=https://gitlab.redox-os.org/redox-os/mesa.git rev=0ecd6b66c +libs/mesa-glu tar=https://archive.mesa3d.org/glu/glu-9.0.3.tar.xz blake3=beed1665ed983540e7502289ec50c7e66d840820af3e9ef21c9c4a7e9686ab9f +libs/mpc tar=https://ftp.gnu.org/gnu/mpc/mpc-1.3.1.tar.gz blake3= +libs/ncurses tar=https://ftp.gnu.org/gnu/ncurses/ncurses-6.6.tar.gz blake3=fbec55697a01f99b9cc3f25be55e73ae7091f4c53e5d81a1ea15734c4e5b7238 +libs/ncursesw same_as=../ncurses +libs/nghttp2 tar=https://github.com/nghttp2/nghttp2/releases/download/v1.64.0/nghttp2-1.64.0.tar.xz blake3=1bbc08de4816769d800c42f501a00c1ba3f5efa1b76e1f65d2e5bdf3aa30354d +libs/openssl1 git=https://gitlab.redox-os.org/redox-os/openssl.git rev= +libs/opusfile tar=https://downloads.xiph.org/releases/opus/opusfile-0.12.tar.gz blake3=1b6a5c371a0ea2ae8e37ab2e921388dfef9252dbf7f60045192dabbdd898f2bf +libs/pango tar=https://download.gnome.org/sources/pango/1.56/pango-1.56.3.tar.xz blake3=78542feaaf007c1d648b94c4e9b6655ed7515d27ce434766aea99bef886c21ac +libs/pcre tar=https://mirrors.gigenet.com/OSDN//sfnet/p/pc/pcre/pcre/8.42/pcre-8.42.tar.gz blake3=12d515ba12a816994def6b1e7196b5783fd2cfe495733a9167fa4d71dbe10248 +libs/pcre2 tar=https://github.com/PCRE2Project/pcre2/releases/download/pcre2-10.45/pcre2-10.45.tar.bz2 blake3=aea544846f9a03c1ec62c9f8d1c9a4187cc3cce557e53e6876eb6a58c7cdd9fe +libs/pixman tar=https://www.cairographics.org/releases/pixman-0.46.0.tar.xz blake3=379369245a0bbd13784bf550c87622964a6aba87edf598ffa137dc10201746e0 +libs/readline tar=https://ftp.gnu.org/gnu/readline/readline-8.3.tar.gz blake3=7109f094062bda387a0c16b4875375b96e36437bebbbd8d8f91bb27ba01d687f +libs/redox-fatfs git=https://gitlab.redox-os.org/redox-os/redox-fatfs.git rev= +libs/sdl-gfx tar=https://sourceforge.net/projects/sdlgfx/files/SDL_gfx-2.0.25.tar.gz blake3=e6f571a38e51d369b010f4b10eb35b95e3d2edae2edd796241c47ea8376581e6 +libs/sdl1 git=https://gitlab.redox-os.org/redox-os/sdl1.2.git rev= +libs/sdl1-image tar=https://www.libsdl.org/projects/SDL_image/release/SDL_image-1.2.12.tar.gz blake3=731a6f8cad9fff22c82394bd1c0c34ce4aa60fa8923f3755a3e3239f1e269389 +libs/sdl1-mixer tar=https://www.libsdl.org/projects/SDL_mixer/release/SDL_mixer-1.2.12.tar.gz blake3=ef23bab2d42250dfdc51ce6939ee7b393973ff11a0dd3481f32180b489d2661c +libs/sdl1-ttf tar=https://www.libsdl.org/projects/SDL_ttf/release/SDL_ttf-2.0.11.tar.gz blake3=a684e57553e43b55ab28b064d1d5d44b8749299f259da31a62d671fc1d5505ee +libs/sdl2 git=https://gitlab.redox-os.org/redox-os/sdl2.git rev= +libs/sdl2-gfx tar=http://www.ferzkopp.net/Software/SDL2_gfx/SDL2_gfx-1.0.4.tar.gz blake3=2e9bd2dc0f004349b51418f33219ebf5cd69f25ed0ba660373652a662cbb857c +libs/sdl2-image tar=https://www.libsdl.org/projects/SDL_image/release/SDL2_image-2.0.4.tar.gz blake3= +libs/sdl2-mixer tar=https://www.libsdl.org/projects/SDL_mixer/release/SDL2_mixer-2.8.1.tar.gz blake3=fa0798ce7ffdb5f89545311292374e5b7af479df8bc99a4aacfb40d2ab2f8384 +libs/sdl2-ttf tar=https://www.libsdl.org/projects/SDL_ttf/release/SDL2_ttf-2.0.15.tar.gz blake3=9814a07f33a3501b414f0fc7fa962e7d7ffc56748406f3798b7698b8d7e7fe12 +libs/termcap tar=https://ftp.gnu.org/gnu/termcap/termcap-1.3.1.tar.gz blake3=57c095e0bb6e60e7b4a0597f51f7ac15b501ca0f95d37424d8d13978d28b8da3 +libs/unibilium tar=https://github.com/neovim/unibilium/archive/refs/tags/v2.1.2.tar.gz blake3=856a7593a412942f4716bb55bfdd225f3ce92cb013b9d4a44693255f0570b1c7 +libs/utf8proc tar=https://github.com/JuliaStrings/utf8proc/archive/refs/tags/v2.10.0.tar.gz blake3=6f675db5d1ae55ad0825351ba9c58a5b5c24c862f559cc7bfed1cb63c1185594 +libs/zbus path=source +libs/zlib tar=https://www.zlib.net/fossils/zlib-1.3.tar.gz blake3=ec1abc6f672a7a6ee6f49ba544cc9529f73121b478310473be44fee22a140ebf +math/orbcalculator git=https://gitlab.redox-os.org/redox-os/orbcalculator.git rev= +net/nginx tar=https://nginx.org/download/nginx-1.28.0.tar.gz blake3= +net/openssh tar=https://cdn.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-9.8p1.tar.gz blake3= +net/redox-ssh git=https://gitlab.redox-os.org/redox-os/redox-ssh.git rev= +net/rsync tar=https://download.samba.org/pub/rsync/src/rsync-3.4.1.tar.gz blake3= +other/ca-certificates git=https://gitlab.redox-os.org/redox-os/ca-certificates.git rev=8139d99 +other/cookbook git=https://gitlab.redox-os.org/redox-os/redox.git rev= +other/generaluser-gs git=https://gitlab.redox-os.org/redox-os/generaluser-gs.git rev= +other/jeremy git=https://gitlab.redox-os.org/jackpot51/jeremy.git rev= +other/rustconf2025 git=https://github.com/jackpot51/rustconf2025.git rev= +other/shared-mime-info tar=https://gitlab.freedesktop.org/xdg/shared-mime-info/-/archive/2.4/shared-mime-info-2.4.tar.gz blake3=ad130f2f923ab3d5455c643e6257abf3598339fdd134ad0fac4e5dbbbf070eb9 +other/terminfo git=https://github.com/sajattack/terminfo rev=dc5712b +qt/qt6-sensors tar=https://download.qt.io/official_releases/qt/6.11/6.11.0/submodules/qtsensors-everywhere-src-6.11.0.tar.xz blake3= +shells/bash tar=https://ftp.gnu.org/gnu/bash/bash-5.2.15.tar.gz blake3=c1548e3f2a9b6de5296e18c28b3d2007985e647273e03f039efd3e489edaa41f +shells/nushell git=https://github.com/nushell/nushell rev=172a070a4bbeff15a289813bc73d4628a3032210 +shells/zsh tar=https://github.com/zsh-users/zsh/archive/refs/tags/zsh-5.9.tar.gz blake3=a15b94fae03e87aba6fc6a27df3c98e610b85b0c7c0fc90248f07fdcb8816860 +sound/freepats git=https://gitlab.redox-os.org/redox-os/freepats.git rev= +sound/rodioplay git=https://gitlab.redox-os.org/redox-os/rodioplay.git rev= +sound/timidity git=https://gitlab.redox-os.org/redox-os/timidity.git rev= +system/cpufreqd path=source +system/cub path=source +system/driver-manager path=source +system/driver-params path=source +system/evdevd path=source +system/firmware-loader path=source +system/hwrngd path=source +system/iommu path=source +system/numad path=source +system/redbear-acmd path=source +system/redbear-authd path=source +system/redbear-btctl path=source +system/redbear-dbus-services path=files +system/redbear-ecmd path=source +system/redbear-firmware path=source +system/redbear-greeter path=source +system/redbear-hwutils path=source +system/redbear-info path=source +system/redbear-login-protocol path=source +system/redbear-meta path=source +system/redbear-mtr path=source +system/redbear-netctl path=source +system/redbear-netctl-console path=source +system/redbear-netstat path=source +system/redbear-nmap path=source +system/redbear-notifications path=source +system/redbear-polkit path=source +system/redbear-quirks path=source +system/redbear-session-launch path=source +system/redbear-sessiond path=source +system/redbear-traceroute path=source +system/redbear-udisks path=source +system/redbear-upower path=source +system/redbear-usbaudiod path=source +system/redbear-wifictl path=source +system/thermald path=source +system/udev-shim path=source +terminal/bash-completion tar=https://github.com/scop/bash-completion/releases/download/2.17.0/bash-completion-2.17.0.tar.xz blake3= +terminal/pls git=https://github.com/pls-rs/pls rev= +terminal/zoxide git=https://github.com/ajeetdsouza/zoxide rev= +tests/acid git=https://gitlab.redox-os.org/redox-os/acid.git rev= +tests/acid-bins same_as=../acid +tests/benchmarks git=https://gitlab.redox-os.org/redox-os/benchmarks rev= +tests/iperf3 tar=https://downloads.es.net/pub/iperf/iperf-3.20.tar.gz blake3= +tests/openposixtestsuite git=https://gitlab.redox-os.org/redox-os/openposixtestsuite.git rev= +tests/os-test git=https://gitlab.com/sortix/os-test rev= +tests/os-test-bins same_as=../os-test +tests/os-test-result same_as=../os-test +tests/redox-drm-prime-test path=source +tests/redox-posix-tests git=https://gitlab.redox-os.org/redox-os/redox-posix-tests.git rev= +tests/relibc-phase1-tests path=source +tests/relibc-tests same_as=../../core/relibc +tests/relibc-tests-bins same_as=../../core/relibc +tests/schedrs git=https://gitlab.redox-os.org/akshitgaur2005/schedrs.git rev= +tests/sysbench git=https://github.com/akopytov/sysbench.git rev= +tests/vttest tar=https://invisible-island.net/archives/vttest/vttest-20140305.tgz blake3=b515b9a5e1f1498ed99e1a1c172fbcfdf2b7a214e185bd2005cc994407ded89e +tools/bzip2 tar=https://sourceware.org/pub/bzip2/bzip2-1.0.8.tar.gz blake3=97af3f520629c65fe41292f77e6ca798fe594d7987bfb2aebe7c6fcdc7ab5ed2 +tools/cleye git=https://gitlab.redox-os.org/redox-os/cleye.git rev= +tools/cosmic-edit git=https://github.com/pop-os/cosmic-edit.git rev=epoch-1.0.8 +tools/cosmic-files git=https://github.com/pop-os/cosmic-files.git rev=epoch-1.0.8 +tools/cosmic-reader git=https://github.com/pop-os/cosmic-reader.git rev=epoch-1.0.8 +tools/cosmic-settings git=https://github.com/pop-os/cosmic-settings.git rev= +tools/cosmic-store git=https://github.com/pop-os/cosmic-store.git rev=epoch-1.0.8 +tools/cosmic-term git=https://github.com/pop-os/cosmic-term.git rev=epoch-1.0.8 +tools/cosmic-text git=https://github.com/pop-os/cosmic-text.git rev= +tools/diffutils tar=https://ftp.gnu.org/gnu/diffutils/diffutils-3.6.tar.xz blake3=086a95093c15edcdb826e75ff4de6c2213de6fbd2eb13538d07bdc3286dfb4a4 +tools/fd git=https://github.com/sharkdp/fd.git rev=840a565d3aadbeb303b10a01c0aa3561924dfc46 +tools/file tar=https://astron.com/pub/file/file-5.46.tar.gz blake3= +tools/friar git=https://github.com/jackpot51/friar.git rev= +tools/gettext tar=https://ftp.gnu.org/gnu/gettext/gettext-0.22.5.tar.gz blake3=cb3f3a34da7ce1a92746df81f5b78c5d53841973a24eb80ab76537263d380ec0 +tools/gnu-binutils tar=https://ftp.gnu.org/gnu/binutils/binutils-2.43.1.tar.xz blake3=f074c81313b70eabc58ce9a9411cd771c5fa2433792d0ad8abcc45f603f58ed6 +tools/gnu-grep tar=https://ftp.gnu.org/gnu/grep/grep-3.1.tar.xz blake3=46b6e24dfa1b0f309f4eae3c450d612396c8faa6510b53a55f629e4f4c70b4a3 +tools/helix git=https://github.com/greyshaman/helix.git rev= +tools/libc-bench tar=https://www.etalabs.net/releases/libc-bench-20110206.tar.gz blake3=64093102f29faa76da455f55a7b4be25b6d74d5c3d6fe88dbbc38aaae185182f +tools/lsd git=https://github.com/lsd-rs/lsd rev= +tools/nano tar=https://www.nano-editor.org/dist/v7/nano-7.2.tar.xz blake3= +tools/onefetch git=https://github.com/o2sh/onefetch rev= +tools/patchelf tar=https://github.com/NixOS/patchelf/releases/download/0.18.0/patchelf-0.18.0.tar.bz2 blake3=f843b32bdf3ee8a1f465e92d3fef34f30c48ccef9c112fdb793e2e7f2ae7283a +tools/pathfinder git=https://gitlab.redox-os.org/redox-os/pathfinder.git rev= +tools/perg git=https://github.com/guerinoni/perg.git rev=e206fab6bbd9c363c686fa7503d318304e48ddbe +tools/periodictable git=https://gitlab.redox-os.org/redox-os/periodictable.git rev= +tools/powerline git=https://github.com/jD91mZM2/powerline-rs rev= +tools/ripgrep git=https://github.com/jackpot51/ripgrep.git rev= +tools/schismtracker tar=https://github.com/schismtracker/schismtracker/archive/20181223.tar.gz blake3=057e973f4f84cf898e2240d67c0e92f25086d8b9ffdc7e0c7ef81dd8dc81bc70 +tools/sed tar=https://ftp.gnu.org/gnu/sed/sed-4.4.tar.xz blake3=a88c12b2b4304e53e3c7ae2eb0499d02e28873c1b9e1a6871e5347c6886a1ecd +tools/shellharden git=https://github.com/anordal/shellharden.git rev=bd24c99d5d1e76452b6d0749404837c1c95d923c +tools/shellstorm git=https://gitlab.redox-os.org/redox-os/shellstorm.git rev= +tools/smith git=https://gitlab.redox-os.org/redox-os/Smith.git rev= +tools/sodium git=https://gitlab.redox-os.org/redox-os/sodium.git rev= +tools/tokei git=https://github.com/XAMPPRocky/tokei.git rev= +tools/twin-commander git=https://github.com/kivimango/twin-commander.git rev= +tools/vim tar=https://github.com/vim/vim/archive/refs/tags/v9.1.0821.tar.gz blake3=d1f5802ceb047b09143f1764bf4016f084cf7e6c026c7047919264c9f262a5dd +tools/xz tar=https://github.com/tukaani-project/xz/releases/download/v5.2.13/xz-5.2.13.tar.gz blake3=edc6350542e8cb7188a878135e5b9bd592d687e5b47451ca1c89d51cc4bc6b53 +tui/goaccess tar=https://tar.goaccess.io/goaccess-1.9.4.tar.gz blake3=a7a7641c98956e8941191956129141e071321851d004269c7d21bce536d9224a +tui/mc tar=https://ftp.osuosl.org/pub/midnightcommander/mc-4.8.33.tar.xz blake3=cad9c1587f2976b9e42016191a72c4f23a07222c96ec7a9454a1a66ce639ac63 +tui/mdp git=https://github.com/visit1985/mdp.git rev= +tui/ncdu tar=https://dev.yorhel.nl/download/ncdu-1.22.tar.gz blake3=b7838c03ded7207a328a26c840ec3d62d3be6bbf7269a70ea3430c6cbf065960 +video/sdl-player git=https://gitlab.redox-os.org/redox-os/sdl-player.git rev= +wayland/qt6-wayland-smoke path=source +wayland/redbear-compositor path=source +wayland/seatd-redox tar=https://git.sr.ht/~kennylevinsen/seatd/archive/0.9.1.tar.gz blake3= +wayland/smallvil git=https://github.com/jackpot51/smithay rev= +web/netsurf tar=https://download.netsurf-browser.org/netsurf/releases/source-full/netsurf-all-3.11.tar.gz blake3=cd406668a9ed5712efac1a8685125b83626690b73bbc6cb5de82ef00e3f65087 +web/website git=https://gitlab.redox-os.org/redox-os/website rev= \ No newline at end of file diff --git a/sources/redbear-0.1.0/patches/P4-s3-suspend-resume.patch b/sources/redbear-0.1.0/patches/P4-s3-suspend-resume.patch new file mode 100644 index 00000000..9b223b31 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P4-s3-suspend-resume.patch @@ -0,0 +1,1084 @@ +diff --git a/Cargo.toml b/Cargo.toml +index 6d4f059..e05f723 100644 +--- a/Cargo.toml ++++ b/Cargo.toml +@@ -12,6 +12,7 @@ cc = "1.0" + toml = "0.8" + + [dependencies] ++acpi_ext = { package = "acpi", git = "https://gitlab.redox-os.org/redox-os/acpi.git", branch = "redox-6.x" } + arrayvec = { version = "0.7.4", default-features = false } + bitfield = "0.13.2" + bitflags = "2" +diff --git a/build.rs b/build.rs +index 96c3ea5..751746c 100644 +--- a/build.rs ++++ b/build.rs +@@ -77,6 +77,7 @@ fn main() { + } + "x86_64" => { + println!("cargo::rerun-if-changed=src/asm/x86_64/trampoline.asm"); ++ println!("cargo::rerun-if-changed=src/asm/x86_64/s3_wakeup.asm"); + + let status = Command::new("nasm") + .arg("-f") +@@ -89,6 +90,18 @@ fn main() { + if !status.success() { + panic!("nasm failed with exit status {}", status); + } ++ ++ let status = Command::new("nasm") ++ .arg("-f") ++ .arg("bin") ++ .arg("-o") ++ .arg(format!("{}/s3_wakeup", out_dir)) ++ .arg("src/asm/x86_64/s3_wakeup.asm") ++ .status() ++ .expect("failed to run nasm"); ++ if !status.success() { ++ panic!("nasm failed with exit status {}", status); ++ } + } + "riscv64" => { + println!("cargo::rustc-cfg=dtb"); +diff --git a/src/acpi/mod.rs b/src/acpi/mod.rs +index 59e3526..b3b80f0 100644 +--- a/src/acpi/mod.rs ++++ b/src/acpi/mod.rs +@@ -82,6 +82,14 @@ impl Rxsdt for RxsdtEnum { + + pub static RXSDT_ENUM: Once = Once::new(); + ++#[derive(Clone, Copy, Debug)] ++pub struct AcpiRootInfo { ++ pub revision: u8, ++ pub root_sdt_address: PhysicalAddress, ++} ++ ++pub static ACPI_ROOT_INFO: Once = Once::new(); ++ + /// Parse the ACPI tables to gather CPU, interrupt, and timer information + pub unsafe fn init(already_supplied_rsdp: Option<*const u8>) { + unsafe { +@@ -94,6 +102,15 @@ pub unsafe fn init(already_supplied_rsdp: Option<*const u8>) { + let rsdp_opt = Rsdp::get_rsdp(already_supplied_rsdp); + + if let Some(rsdp) = rsdp_opt { ++ let root_info = ACPI_ROOT_INFO.call_once(|| AcpiRootInfo { ++ revision: rsdp.revision(), ++ root_sdt_address: rsdp.sdt_address(), ++ }); ++ ++ if root_info.root_sdt_address != rsdp.sdt_address() || root_info.revision != rsdp.revision() { ++ error!("ACPI_ROOT_INFO already initialized with a different RSDP root"); ++ } ++ + debug!("SDT address: {:#x}", rsdp.sdt_address().data()); + let rxsdt = get_sdt(rsdp.sdt_address(), &mut KernelMapper::lock_rw()); + +diff --git a/src/acpi/rsdp.rs b/src/acpi/rsdp.rs +index f10c5ac..5e93a9f 100644 +--- a/src/acpi/rsdp.rs ++++ b/src/acpi/rsdp.rs +@@ -31,4 +31,8 @@ impl Rsdp { + self.rsdt_address as usize + }) + } ++ ++ pub fn revision(&self) -> u8 { ++ self.revision ++ } + } +diff --git a/src/arch/x86_shared/mod.rs b/src/arch/x86_shared/mod.rs +index e3c3050..11c33e9 100644 +--- a/src/arch/x86_shared/mod.rs ++++ b/src/arch/x86_shared/mod.rs +@@ -28,6 +28,8 @@ pub mod pti; + /// Initialization and start function + pub mod start; + ++pub mod sleep; ++ + /// Stop function + pub mod stop; + +diff --git a/src/scheme/acpi.rs b/src/scheme/acpi.rs +index 87570a1..5d73469 100644 +--- a/src/scheme/acpi.rs ++++ b/src/scheme/acpi.rs +@@ -10,6 +10,7 @@ use syscall::{ + + use crate::{ + acpi::{RxsdtEnum, RXSDT_ENUM}, ++ arch::sleep, + context::file::InternalFlags, + event, + sync::{CleanLockToken, RwLock, WaitCondition, L1}, +@@ -40,6 +41,7 @@ enum HandleKind { + TopLevel, + Rxsdt, + ShutdownPipe, ++ SleepControl, + SchemeRoot, + } + +@@ -146,11 +148,11 @@ impl KernelScheme for AcpiScheme { + if flags & O_EXCL == O_EXCL || flags & O_SYMLINK == O_SYMLINK { + return Err(Error::new(EINVAL)); + } +- if flags & O_ACCMODE != O_RDONLY && flags & O_STAT != O_STAT { +- return Err(Error::new(EROFS)); +- } + let (handle_kind, int_flags) = match path { + "" => { ++ if flags & O_ACCMODE != O_RDONLY && flags & O_STAT != O_STAT { ++ return Err(Error::new(EROFS)); ++ } + if flags & O_DIRECTORY != O_DIRECTORY && flags & O_STAT != O_STAT { + return Err(Error::new(EISDIR)); + } +@@ -158,17 +160,36 @@ impl KernelScheme for AcpiScheme { + (HandleKind::TopLevel, InternalFlags::POSITIONED) + } + "rxsdt" => { ++ if flags & O_ACCMODE != O_RDONLY && flags & O_STAT != O_STAT { ++ return Err(Error::new(EROFS)); ++ } + if flags & O_DIRECTORY == O_DIRECTORY && flags & O_STAT != O_STAT { + return Err(Error::new(ENOTDIR)); + } + (HandleKind::Rxsdt, InternalFlags::POSITIONED) + } + "kstop" => { ++ if flags & O_ACCMODE != O_RDONLY && flags & O_STAT != O_STAT { ++ return Err(Error::new(EROFS)); ++ } + if flags & O_DIRECTORY == O_DIRECTORY && flags & O_STAT != O_STAT { + return Err(Error::new(ENOTDIR)); + } + (HandleKind::ShutdownPipe, InternalFlags::empty()) + } ++ "sleep" => { ++ if flags & O_ACCMODE == O_RDONLY || flags & O_STAT == O_STAT { ++ // allowed ++ } else if flags & O_ACCMODE != syscall::flag::O_WRONLY ++ && flags & O_ACCMODE != syscall::flag::O_RDWR ++ { ++ return Err(Error::new(EINVAL)); ++ } ++ if flags & O_DIRECTORY == O_DIRECTORY && flags & O_STAT != O_STAT { ++ return Err(Error::new(ENOTDIR)); ++ } ++ (HandleKind::SleepControl, InternalFlags::POSITIONED) ++ } + _ => return Err(Error::new(ENOENT)), + }; + +@@ -191,6 +212,7 @@ impl KernelScheme for AcpiScheme { + Ok(match handle.kind { + HandleKind::Rxsdt => DATA.get().ok_or(Error::new(EBADFD))?.len() as u64, + HandleKind::ShutdownPipe => 1, ++ HandleKind::SleepControl => sleep::available_sleep_states().len() as u64, + HandleKind::TopLevel => 0, + HandleKind::SchemeRoot => return Err(Error::new(EBADF))?, + }) +@@ -253,6 +275,7 @@ impl KernelScheme for AcpiScheme { + + return dst_buf.copy_exactly(&[0x42]).map(|()| 1); + } ++ HandleKind::SleepControl => sleep::available_sleep_states(), + HandleKind::Rxsdt => DATA.get().ok_or(Error::new(EBADFD))?, + HandleKind::TopLevel => return Err(Error::new(EISDIR)), + HandleKind::SchemeRoot => return Err(Error::new(EBADF)), +@@ -295,11 +318,45 @@ impl KernelScheme for AcpiScheme { + kind: DirentKind::Socket, + name: "kstop", + inode: 0, ++ next_opaque_id: 2, ++ })?; ++ } ++ if opaque <= 2 { ++ buf.entry(DirEntry { ++ kind: DirentKind::Regular, ++ name: "sleep", ++ inode: 0, + next_opaque_id: u64::MAX, + })?; + } + Ok(buf.finalize()) + } ++ fn kwrite( ++ &self, ++ id: usize, ++ buf: crate::syscall::usercopy::UserSliceRo, ++ _flags: u32, ++ _stored_flags: u32, ++ token: &mut CleanLockToken, ++ ) -> Result { ++ let handle = *HANDLES.read(token.token()).get(id)?; ++ ++ if handle.stat { ++ return Err(Error::new(EBADF)); ++ } ++ ++ match handle.kind { ++ HandleKind::SleepControl => { ++ let mut tmp = [0_u8; 16]; ++ let len = buf.copy_common_bytes_to_slice(&mut tmp)?; ++ let request = core::str::from_utf8(&tmp[..len]).map_err(|_| Error::new(EINVAL))?; ++ sleep::trigger_sleep_request(request)?; ++ Ok(len) ++ } ++ HandleKind::SchemeRoot => Err(Error::new(EBADF)), ++ _ => Err(Error::new(EBADF)), ++ } ++ } + fn kfpath(&self, _id: usize, buf: UserSliceWo, _token: &mut CleanLockToken) -> Result { + //TODO: construct useful path? + buf.copy_common_bytes_from_slice("/scheme/kernel.acpi/".as_bytes()) +@@ -328,6 +385,11 @@ impl KernelScheme for AcpiScheme { + st_size: 1, + ..Default::default() + }, ++ HandleKind::SleepControl => Stat { ++ st_mode: MODE_FILE, ++ st_size: sleep::available_sleep_states().len().try_into().unwrap_or(u64::MAX), ++ ..Default::default() ++ }, + HandleKind::SchemeRoot => return Err(Error::new(EBADF)), + })?; + +diff --git a/src/arch/x86_shared/sleep.rs b/src/arch/x86_shared/sleep.rs +new file mode 100644 +index 0000000..9f98c0d +--- /dev/null ++++ b/src/arch/x86_shared/sleep.rs +@@ -0,0 +1,712 @@ ++use alloc::{sync::Arc, vec::Vec}; ++use core::{ ++ ptr::NonNull, ++ str::FromStr, ++ sync::atomic::{AtomicU32, Ordering}, ++}; ++ ++use acpi_ext::{ ++ aml::{namespace::AmlName, object::Object, Interpreter}, ++ registers::FixedRegisters, ++ sdt::{facs::Facs, fadt::Fadt, SdtHeader}, ++ AcpiTables, Handle, Handler, PhysicalMapping, ++}; ++use spin::Mutex; ++use syscall::error::{Error, EINVAL, EIO}; ++use x86::{segmentation::SegmentSelector, task, Ring}; ++ ++use crate::{ ++ acpi::ACPI_ROOT_INFO, ++ arch::interrupt, ++ memory::{ ++ round_down_pages, round_up_pages, KernelMapper, Page, PageFlags, PhysicalAddress, RmmA, ++ RmmArch, VirtualAddress, PAGE_SIZE, ++ }, ++ syscall::io::{Io, Pio}, ++}; ++ ++const ACPI_SLP_TYP_SHIFT: u16 = 10; ++const ACPI_SLP_TYP_MASK: u16 = 0x1C00; ++const ACPI_SLP_EN: u16 = 1 << 13; ++const WAKE_TRAMPOLINE_PHYS: usize = 0x8000; ++const SLEEP_RETURN_OK: usize = 0; ++ ++#[cfg(target_arch = "x86_64")] ++static WAKE_TRAMPOLINE_DATA: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/s3_wakeup")); ++ ++#[repr(C, packed)] ++#[derive(Clone, Copy, Debug, Default)] ++struct DescriptorTableRegister { ++ limit: u16, ++ base: u64, ++} ++ ++#[repr(C, align(64))] ++#[derive(Clone, Copy, Debug)] ++struct FpuState { ++ bytes: [u8; 4096], ++} ++ ++impl Default for FpuState { ++ fn default() -> Self { ++ Self { bytes: [0; 4096] } ++ } ++} ++ ++#[derive(Clone, Copy, Debug, Eq, PartialEq)] ++pub enum SleepState { ++ S3, ++ S5, ++} ++ ++#[derive(Clone, Copy, Debug, Eq, PartialEq)] ++pub enum SleepError { ++ UnsupportedArch, ++ MissingAcpi, ++ MissingFadt, ++ MissingFacs, ++ MissingSleepObject, ++ InvalidSleepObject, ++ UnsupportedPmControl, ++ UnsupportedAmlOperation, ++ SleepDidNotEnter, ++} ++ ++impl SleepError { ++ fn code(self) -> usize { ++ match self { ++ Self::UnsupportedArch => EINVAL as usize, ++ Self::MissingAcpi ++ | Self::MissingFadt ++ | Self::MissingFacs ++ | Self::MissingSleepObject ++ | Self::UnsupportedAmlOperation => EIO as usize, ++ Self::InvalidSleepObject | Self::UnsupportedPmControl | Self::SleepDidNotEnter => { ++ EINVAL as usize ++ } ++ } ++ } ++ ++ fn from_code(code: usize) -> Self { ++ match code as i32 { ++ x if x == EINVAL => Self::InvalidSleepObject, ++ _ => Self::MissingAcpi, ++ } ++ } ++} ++ ++#[derive(Clone, Copy, Debug, Default)] ++struct SavedCpuContext { ++ entry_rsp: usize, ++ runtime_rsp: usize, ++ facs_address: usize, ++ cr0: usize, ++ cr2: usize, ++ cr3: usize, ++ cr4: usize, ++ rflags: usize, ++ gdtr: DescriptorTableRegister, ++ idtr: DescriptorTableRegister, ++ efer: u64, ++ fs_base: u64, ++ gs_base: u64, ++ kernel_gs_base: u64, ++ fpu: FpuState, ++} ++ ++static SAVED_CONTEXT: Mutex> = Mutex::new(None); ++static AML_MUTEX_IDS: AtomicU32 = AtomicU32::new(1); ++ ++#[derive(Clone, Copy, Debug)] ++struct SleepTypeData { ++ a: u16, ++ b: u16, ++} ++ ++#[derive(Clone, Copy)] ++struct KernelAcpiHandler; ++ ++impl KernelAcpiHandler { ++ fn map_range(physical_address: usize, size: usize) -> (*mut u8, usize) { ++ let map_base = round_down_pages(physical_address); ++ let map_offset = physical_address - map_base; ++ let mapped_length = round_up_pages(size + map_offset); ++ ++ // SAFETY: The ACPI interpreter only requests firmware-described physical regions. ++ unsafe { ++ let mut mapper = KernelMapper::lock_rw(); ++ for page_index in 0..mapped_length / PAGE_SIZE { ++ let (_, flush) = mapper ++ .map_linearly( ++ PhysicalAddress::new(map_base + page_index * PAGE_SIZE), ++ PageFlags::new(), ++ ) ++ .expect("failed to linearly map ACPI physical region"); ++ flush.flush(); ++ } ++ } ++ ++ let virtual_base = RmmA::phys_to_virt(PhysicalAddress::new(map_base)).data(); ++ ((virtual_base + map_offset) as *mut u8, mapped_length) ++ } ++} ++ ++impl Handler for KernelAcpiHandler { ++ unsafe fn map_physical_region(&self, physical_address: usize, size: usize) -> PhysicalMapping { ++ let (virtual_start, mapped_length) = Self::map_range(physical_address, size); ++ PhysicalMapping { ++ physical_start: physical_address, ++ virtual_start: NonNull::new(virtual_start.cast::()) ++ .expect("expected mapped ACPI virtual address to be non-null"), ++ region_length: size, ++ mapped_length, ++ handler: *self, ++ } ++ } ++ ++ fn unmap_physical_region(_region: &PhysicalMapping) {} ++ ++ fn read_u8(&self, address: usize) -> u8 { ++ // SAFETY: AML system-memory accesses are byte-addressable firmware regions. ++ unsafe { core::ptr::read_volatile(RmmA::phys_to_virt(PhysicalAddress::new(address)).data() as *const u8) } ++ } ++ ++ fn read_u16(&self, address: usize) -> u16 { ++ // SAFETY: AML system-memory accesses are word-addressable firmware regions. ++ unsafe { ++ core::ptr::read_volatile(RmmA::phys_to_virt(PhysicalAddress::new(address)).data() as *const u16) ++ } ++ } ++ ++ fn read_u32(&self, address: usize) -> u32 { ++ // SAFETY: AML system-memory accesses are dword-addressable firmware regions. ++ unsafe { ++ core::ptr::read_volatile(RmmA::phys_to_virt(PhysicalAddress::new(address)).data() as *const u32) ++ } ++ } ++ ++ fn read_u64(&self, address: usize) -> u64 { ++ // SAFETY: AML system-memory accesses are qword-addressable firmware regions. ++ unsafe { ++ core::ptr::read_volatile(RmmA::phys_to_virt(PhysicalAddress::new(address)).data() as *const u64) ++ } ++ } ++ ++ fn write_u8(&self, address: usize, value: u8) { ++ // SAFETY: AML system-memory accesses are byte-addressable firmware regions. ++ unsafe { ++ core::ptr::write_volatile(RmmA::phys_to_virt(PhysicalAddress::new(address)).data() as *mut u8, value) ++ } ++ } ++ ++ fn write_u16(&self, address: usize, value: u16) { ++ // SAFETY: AML system-memory accesses are word-addressable firmware regions. ++ unsafe { ++ core::ptr::write_volatile( ++ RmmA::phys_to_virt(PhysicalAddress::new(address)).data() as *mut u16, ++ value, ++ ) ++ } ++ } ++ ++ fn write_u32(&self, address: usize, value: u32) { ++ // SAFETY: AML system-memory accesses are dword-addressable firmware regions. ++ unsafe { ++ core::ptr::write_volatile( ++ RmmA::phys_to_virt(PhysicalAddress::new(address)).data() as *mut u32, ++ value, ++ ) ++ } ++ } ++ ++ fn write_u64(&self, address: usize, value: u64) { ++ // SAFETY: AML system-memory accesses are qword-addressable firmware regions. ++ unsafe { ++ core::ptr::write_volatile( ++ RmmA::phys_to_virt(PhysicalAddress::new(address)).data() as *mut u64, ++ value, ++ ) ++ } ++ } ++ ++ fn read_io_u8(&self, port: u16) -> u8 { ++ Pio::::new(port).read() ++ } ++ ++ fn read_io_u16(&self, port: u16) -> u16 { ++ Pio::::new(port).read() ++ } ++ ++ fn read_io_u32(&self, port: u16) -> u32 { ++ Pio::::new(port).read() ++ } ++ ++ fn write_io_u8(&self, port: u16, value: u8) { ++ Pio::::new(port).write(value) ++ } ++ ++ fn write_io_u16(&self, port: u16, value: u16) { ++ Pio::::new(port).write(value) ++ } ++ ++ fn write_io_u32(&self, port: u16, value: u32) { ++ Pio::::new(port).write(value) ++ } ++ ++ fn read_pci_u8(&self, _address: acpi_ext::PciAddress, _offset: u16) -> u8 { ++ 0 ++ } ++ ++ fn read_pci_u16(&self, _address: acpi_ext::PciAddress, _offset: u16) -> u16 { ++ 0 ++ } ++ ++ fn read_pci_u32(&self, _address: acpi_ext::PciAddress, _offset: u16) -> u32 { ++ 0 ++ } ++ ++ fn write_pci_u8(&self, _address: acpi_ext::PciAddress, _offset: u16, _value: u8) {} ++ ++ fn write_pci_u16(&self, _address: acpi_ext::PciAddress, _offset: u16, _value: u16) {} ++ ++ fn write_pci_u32(&self, _address: acpi_ext::PciAddress, _offset: u16, _value: u32) {} ++ ++ fn nanos_since_boot(&self) -> u64 { ++ 0 ++ } ++ ++ fn stall(&self, microseconds: u64) { ++ for _ in 0..(microseconds.saturating_mul(64)) { ++ core::hint::spin_loop(); ++ } ++ } ++ ++ fn sleep(&self, milliseconds: u64) { ++ for _ in 0..(milliseconds.saturating_mul(64_000)) { ++ core::hint::spin_loop(); ++ } ++ } ++ ++ fn create_mutex(&self) -> Handle { ++ Handle(AML_MUTEX_IDS.fetch_add(1, Ordering::Relaxed)) ++ } ++ ++ fn acquire(&self, _mutex: Handle, _timeout: u16) -> Result<(), acpi_ext::aml::AmlError> { ++ Ok(()) ++ } ++ ++ fn release(&self, _mutex: Handle) {} ++} ++ ++fn sleep_state_name(state: SleepState) -> &'static str { ++ match state { ++ SleepState::S3 => "\\_S3", ++ SleepState::S5 => "\\_S5", ++ } ++} ++ ++fn encode_sleep_type(value: u16) -> u16 { ++ if value <= 0x7 { ++ value << ACPI_SLP_TYP_SHIFT ++ } else { ++ value & ACPI_SLP_TYP_MASK ++ } ++} ++ ++fn load_interpreter() -> Result<( ++ Arc>, ++ PhysicalMapping, ++ Interpreter, ++), SleepError> { ++ let root = *ACPI_ROOT_INFO.get().ok_or(SleepError::MissingAcpi)?; ++ let handler = KernelAcpiHandler; ++ ++ // SAFETY: ACPI root info is captured from the firmware-provided, already validated root table. ++ let tables = unsafe { ++ AcpiTables::from_rsdt(handler, root.revision, root.root_sdt_address.data()) ++ .map_err(|_| SleepError::MissingAcpi)? ++ }; ++ let fadt = tables.find_table::().ok_or(SleepError::MissingFadt)?; ++ let registers = Arc::new( ++ FixedRegisters::new(&fadt, handler).map_err(|_| SleepError::UnsupportedPmControl)?, ++ ); ++ let facs_address = fadt.facs_address().map_err(|_| SleepError::MissingFacs)?; ++ ++ // SAFETY: The FADT-supplied FACS address is used exactly as described by the ACPI spec. ++ let facs = unsafe { handler.map_physical_region::(facs_address, core::mem::size_of::()) }; ++ // SAFETY: The AML interpreter only needs an owned mapping of the same firmware FACS table. ++ let interpreter_facs = unsafe { ++ handler.map_physical_region::(facs_address, core::mem::size_of::()) ++ }; ++ let dsdt = tables.dsdt().map_err(|_| SleepError::MissingFadt)?; ++ let interpreter = Interpreter::new(handler, dsdt.revision, Arc::clone(®isters), Some(interpreter_facs)); ++ ++ // SAFETY: Each AML table mapping is owned by the interpreter during table loading. ++ unsafe { ++ let mapping = handler.map_physical_region::(dsdt.phys_address, dsdt.length as usize); ++ let stream = core::slice::from_raw_parts( ++ mapping.virtual_start.as_ptr().byte_add(core::mem::size_of::()) as *const u8, ++ dsdt.length as usize - core::mem::size_of::(), ++ ); ++ interpreter ++ .load_table(stream) ++ .map_err(|_| SleepError::UnsupportedAmlOperation)?; ++ ++ for ssdt in tables.ssdts() { ++ let mapping = handler.map_physical_region::(ssdt.phys_address, ssdt.length as usize); ++ let stream = core::slice::from_raw_parts( ++ mapping.virtual_start.as_ptr().byte_add(core::mem::size_of::()) as *const u8, ++ ssdt.length as usize - core::mem::size_of::(), ++ ); ++ interpreter ++ .load_table(stream) ++ .map_err(|_| SleepError::UnsupportedAmlOperation)?; ++ } ++ } ++ ++ Ok((registers, facs, interpreter)) ++} ++ ++fn sleep_type_data_from_interpreter( ++ interpreter: &Interpreter, ++ state: SleepState, ++) -> Result { ++ let name = AmlName::from_str(sleep_state_name(state)).map_err(|_| SleepError::MissingSleepObject)?; ++ let object = interpreter ++ .evaluate(name, Vec::new()) ++ .map_err(|_| SleepError::MissingSleepObject)?; ++ ++ let Object::Package(package) = &*object else { ++ return Err(SleepError::InvalidSleepObject); ++ }; ++ ++ let Some(typa_object) = package.first() else { ++ return Err(SleepError::InvalidSleepObject); ++ }; ++ let Some(typb_object) = package.get(1) else { ++ return Err(SleepError::InvalidSleepObject); ++ }; ++ ++ let Object::Integer(typa) = &**typa_object else { ++ return Err(SleepError::InvalidSleepObject); ++ }; ++ let Object::Integer(typb) = &**typb_object else { ++ return Err(SleepError::InvalidSleepObject); ++ }; ++ ++ Ok(SleepTypeData { ++ a: encode_sleep_type(*typa as u16), ++ b: encode_sleep_type(*typb as u16), ++ }) ++} ++ ++fn sleep_type_data(state: SleepState) -> Result { ++ let (_registers, _facs, interpreter) = load_interpreter()?; ++ sleep_type_data_from_interpreter(&interpreter, state) ++} ++ ++fn install_wake_trampoline(stack_rsp: usize, cr3: usize) { ++ let trampoline_page = Page::containing_address(VirtualAddress::new(WAKE_TRAMPOLINE_PHYS)); ++ let trampoline_frame = PhysicalAddress::new(WAKE_TRAMPOLINE_PHYS); ++ ++ // SAFETY: The 0x8000 low-memory trampoline page is reserved by the kernel for bootstrap stubs. ++ let (result, _) = unsafe { ++ let mut mapper = KernelMapper::lock_rw(); ++ let result = mapper ++ .map_phys( ++ trampoline_page.start_address(), ++ trampoline_frame, ++ PageFlags::new().execute(true).write(true), ++ ) ++ .expect("failed to map S3 wake trampoline page"); ++ (result, mapper.table().phys().data()) ++ }; ++ result.flush(); ++ ++ for (index, value) in WAKE_TRAMPOLINE_DATA.iter().enumerate() { ++ // SAFETY: The trampoline page is mapped writable at the same virtual address as the physical page. ++ unsafe { ++ core::ptr::write_volatile((WAKE_TRAMPOLINE_PHYS as *mut u8).add(index), *value); ++ } ++ } ++ ++ // SAFETY: The wake trampoline layout reserves three qword fields immediately after the jump. ++ unsafe { ++ let stack_slot = (WAKE_TRAMPOLINE_PHYS + 8) as *mut u64; ++ let page_table_slot = stack_slot.add(1); ++ let code_slot = stack_slot.add(2); ++ stack_slot.write(stack_rsp as u64); ++ page_table_slot.write(cr3 as u64); ++ #[expect(clippy::fn_to_numeric_cast)] ++ code_slot.write(resume_from_s3_trampoline as usize as u64); ++ } ++ ++ // SAFETY: The trampoline mapping is no longer needed once the physical page has been populated. ++ let (_frame, _, flush) = unsafe { ++ KernelMapper::lock_rw() ++ .unmap_phys(trampoline_page.start_address()) ++ .expect("failed to unmap S3 wake trampoline page") ++ }; ++ flush.flush(); ++} ++ ++fn save_descriptor_tables(context: &mut SavedCpuContext) { ++ // SAFETY: SGDT/SIDT only read the current CPU descriptor-table registers into the provided storage. ++ unsafe { ++ core::arch::asm!("sgdt [{}]", in(reg) &mut context.gdtr, options(nostack, preserves_flags)); ++ core::arch::asm!("sidt [{}]", in(reg) &mut context.idtr, options(nostack, preserves_flags)); ++ } ++} ++ ++fn save_fpu_state(context: &mut SavedCpuContext) { ++ // SAFETY: The kernel owns the current CPU at suspend entry and the FXSAVE buffer is 64-byte aligned. ++ unsafe { ++ core::arch::asm!( ++ "fxsave64 [{}]", ++ in(reg) context.fpu.bytes.as_mut_ptr(), ++ ); ++ } ++} ++ ++fn restore_fpu_state(context: &SavedCpuContext) { ++ // SAFETY: The saved FXSAVE image belongs to the same CPU context and matches the restore instruction. ++ unsafe { ++ core::arch::asm!( ++ "fxrstor64 [{}]", ++ in(reg) context.fpu.bytes.as_ptr(), ++ ); ++ } ++} ++ ++fn save_cpu_context(entry_rsp: usize) -> SavedCpuContext { ++ let mut context = SavedCpuContext { ++ entry_rsp, ++ ..SavedCpuContext::default() ++ }; ++ ++ // SAFETY: Reading control registers and MSRs is required to reconstruct the CPU execution state on wake. ++ unsafe { ++ core::arch::asm!( ++ "mov {}, cr0", ++ out(reg) context.cr0, ++ options(nostack, preserves_flags) ++ ); ++ core::arch::asm!( ++ "mov {}, cr2", ++ out(reg) context.cr2, ++ options(nostack, preserves_flags) ++ ); ++ core::arch::asm!( ++ "mov {}, cr3", ++ out(reg) context.cr3, ++ options(nostack, preserves_flags) ++ ); ++ core::arch::asm!( ++ "mov {}, cr4", ++ out(reg) context.cr4, ++ options(nostack, preserves_flags) ++ ); ++ core::arch::asm!( ++ "pushfq", ++ "pop {}", ++ out(reg) context.rflags, ++ options(preserves_flags) ++ ); ++ core::arch::asm!("mov {}, rsp", out(reg) context.runtime_rsp, options(nostack, preserves_flags)); ++ ++ context.efer = x86::msr::rdmsr(x86::msr::IA32_EFER); ++ context.fs_base = x86::msr::rdmsr(x86::msr::IA32_FS_BASE); ++ context.gs_base = x86::msr::rdmsr(x86::msr::IA32_GS_BASE); ++ context.kernel_gs_base = x86::msr::rdmsr(x86::msr::IA32_KERNEL_GSBASE); ++ } ++ ++ save_descriptor_tables(&mut context); ++ save_fpu_state(&mut context); ++ context ++} ++ ++fn set_firmware_waking_vector(facs: &mut PhysicalMapping, vector: usize) { ++ facs.firmware_waking_vector = vector as u32; ++ facs.x_firmware_waking_vector = vector as u64; ++} ++ ++fn write_pm1_control_block( ++ registers: &FixedRegisters, ++ sleep_type: SleepTypeData, ++) -> Result<(), SleepError> { ++ let current_a = registers ++ .pm1_control_registers ++ .pm1a ++ .read() ++ .map_err(|_| SleepError::UnsupportedPmControl)? as u16; ++ let armed_a = (current_a & !(ACPI_SLP_TYP_MASK | ACPI_SLP_EN)) | sleep_type.a; ++ ++ registers ++ .pm1_control_registers ++ .pm1a ++ .write(u64::from(armed_a)) ++ .map_err(|_| SleepError::UnsupportedPmControl)?; ++ ++ if let Some(pm1b) = ®isters.pm1_control_registers.pm1b { ++ let current_b = pm1b.read().map_err(|_| SleepError::UnsupportedPmControl)? as u16; ++ let armed_b = (current_b & !(ACPI_SLP_TYP_MASK | ACPI_SLP_EN)) | sleep_type.b; ++ pm1b.write(u64::from(armed_b)) ++ .map_err(|_| SleepError::UnsupportedPmControl)?; ++ pm1b.write(u64::from(armed_b | ACPI_SLP_EN)) ++ .map_err(|_| SleepError::UnsupportedPmControl)?; ++ } ++ ++ // SAFETY: WBINVD is required here to flush dirty cache lines before firmware powers down the CPU package. ++ unsafe { ++ core::arch::asm!("wbinvd", options(nostack, preserves_flags)); ++ } ++ ++ registers ++ .pm1_control_registers ++ .pm1a ++ .write(u64::from(armed_a | ACPI_SLP_EN)) ++ .map_err(|_| SleepError::UnsupportedPmControl)?; ++ ++ Ok(()) ++} ++ ++#[unsafe(naked)] ++unsafe extern "sysv64" fn enter_sleep_raw(state: usize) -> usize { ++ core::arch::naked_asm!( ++ "mov rsi, rsp", ++ "jmp {inner}", ++ inner = sym enter_sleep_raw_inner, ++ ); ++} ++ ++extern "C" fn enter_sleep_raw_inner(state: usize, entry_rsp: usize) -> usize { ++ let state = match state { ++ 3 => SleepState::S3, ++ 5 => SleepState::S5, ++ _ => return SleepError::InvalidSleepObject.code(), ++ }; ++ ++ let (registers, mut facs, interpreter) = match load_interpreter() { ++ Ok(tuple) => tuple, ++ Err(error) => return error.code(), ++ }; ++ let sleep_type = match sleep_type_data_from_interpreter(&interpreter, state) { ++ Ok(data) => data, ++ Err(error) => return error.code(), ++ }; ++ ++ let mut context = save_cpu_context(entry_rsp); ++ context.facs_address = facs.physical_start; ++ install_wake_trampoline(context.runtime_rsp, context.cr3); ++ set_firmware_waking_vector(&mut facs, WAKE_TRAMPOLINE_PHYS); ++ ++ { ++ let mut saved = SAVED_CONTEXT.lock(); ++ *saved = Some(context); ++ } ++ ++ // SAFETY: Suspend entry must not be interrupted while the wake vector and PM1 control block are being armed. ++ unsafe { ++ interrupt::disable(); ++ } ++ ++ if let Err(error) = write_pm1_control_block(registers.as_ref(), sleep_type) { ++ return error.code(); ++ } ++ ++ // SAFETY: The final CLI+HLT sequence is the architectural handoff point after asserting SLP_EN. ++ unsafe { ++ core::arch::asm!("cli; hlt", options(nostack)); ++ } ++ ++ SleepError::SleepDidNotEnter.code() ++} ++ ++extern "C" fn resume_from_s3_trampoline() -> ! { ++ let mut saved = SAVED_CONTEXT.lock(); ++ let context = saved.take().expect("S3 wake trampoline resumed without saved CPU context"); ++ drop(saved); ++ ++ // SAFETY: The saved FACS physical address was captured from the validated FADT during suspend entry. ++ if context.facs_address != 0 { ++ let mut facs = unsafe { ++ KernelAcpiHandler.map_physical_region::( ++ context.facs_address, ++ core::mem::size_of::(), ++ ) ++ }; ++ set_firmware_waking_vector(&mut facs, 0); ++ } ++ ++ // SAFETY: The wake trampoline already switched to the saved kernel CR3 and long mode, so the remaining restores are architectural register state only. ++ unsafe { ++ x86::msr::wrmsr(x86::msr::IA32_EFER, context.efer); ++ core::arch::asm!("mov cr3, {}", in(reg) context.cr3, options(nostack)); ++ core::arch::asm!("mov cr4, {}", in(reg) context.cr4, options(nostack)); ++ core::arch::asm!("mov cr2, {}", in(reg) context.cr2, options(nostack)); ++ core::arch::asm!("mov cr0, {}", in(reg) context.cr0, options(nostack)); ++ core::arch::asm!("lgdt [{}]", in(reg) &context.gdtr, options(nostack)); ++ core::arch::asm!("lidt [{}]", in(reg) &context.idtr, options(nostack)); ++ ++ task::load_tr(SegmentSelector::new(crate::arch::gdt::GDT_TSS as u16, Ring::Ring0)); ++ ++ x86::msr::wrmsr(x86::msr::IA32_FS_BASE, context.fs_base); ++ x86::msr::wrmsr(x86::msr::IA32_GS_BASE, context.gs_base); ++ x86::msr::wrmsr(x86::msr::IA32_KERNEL_GSBASE, context.kernel_gs_base); ++ } ++ ++ restore_fpu_state(&context); ++ ++ // SAFETY: Returning with the original entry stack and RFLAGS completes the suspend call as a successful function return. ++ unsafe { ++ core::arch::asm!( ++ "mov rsp, {entry_rsp}", ++ "push {rflags}", ++ "popfq", ++ "xor eax, eax", ++ "ret", ++ entry_rsp = in(reg) context.entry_rsp, ++ rflags = in(reg) context.rflags, ++ options(noreturn) ++ ); ++ } ++} ++ ++pub fn enter_sleep_state(state: SleepState) -> core::result::Result<(), SleepError> { ++ #[cfg(not(target_arch = "x86_64"))] ++ { ++ let _ = state; ++ return Err(SleepError::UnsupportedArch); ++ } ++ ++ #[cfg(target_arch = "x86_64")] ++ { ++ let raw = unsafe { ++ enter_sleep_raw(match state { ++ SleepState::S3 => 3, ++ SleepState::S5 => 5, ++ }) ++ }; ++ if raw == SLEEP_RETURN_OK { ++ Ok(()) ++ } else { ++ Err(SleepError::from_code(raw)) ++ } ++ } ++} ++ ++pub fn available_sleep_states() -> &'static [u8] { ++ if sleep_type_data(SleepState::S3).is_ok() { ++ b"S3\nS5\n" ++ } else { ++ b"S5\n" ++ } ++} ++ ++pub fn trigger_sleep_request(request: &str) -> Result<(), Error> { ++ match request.trim() { ++ "S3" => enter_sleep_state(SleepState::S3).map_err(|_| Error::new(EIO)), ++ "S5" => enter_sleep_state(SleepState::S5).map_err(|_| Error::new(EIO)), ++ _ => Err(Error::new(EINVAL)), ++ } ++} +diff --git a/src/asm/x86_64/s3_wakeup.asm b/src/asm/x86_64/s3_wakeup.asm +new file mode 100644 +index 0000000..7beeccf +--- /dev/null ++++ b/src/asm/x86_64/s3_wakeup.asm +@@ -0,0 +1,110 @@ ++; ACPI S3 wake trampoline ++; compiled with nasm by build.rs, copied to physical 0x8000 before S3 entry ++ ++ORG 0x8000 ++SECTION .text ++USE16 ++ ++trampoline: ++ jmp short startup_wake ++ times 8 - ($ - trampoline) nop ++ .stack: dq 0 ++ .page_table: dq 0 ++ .code: dq 0 ++ ++startup_wake: ++ cli ++ ++ xor ax, ax ++ mov ds, ax ++ mov es, ax ++ mov ss, ax ++ mov sp, 0 ++ ++ mov edi, [trampoline.page_table] ++ mov cr3, edi ++ ++ mov eax, cr0 ++ and al, 11110011b ++ or al, 00100010b ++ mov cr0, eax ++ ++ mov eax, cr4 ++ or eax, 1 << 9 | 1 << 7 | 1 << 5 | 1 << 4 ++ mov cr4, eax ++ ++ fninit ++ ++ lgdt [gdtr] ++ ++ mov ecx, 0xC0000080 ++ rdmsr ++ or eax, 1 << 11 | 1 << 8 ++ wrmsr ++ ++ mov ebx, cr0 ++ or ebx, 1 << 31 | 1 << 16 | 1 ++ mov cr0, ebx ++ ++ jmp gdt.kernel_code:long_mode_wake ++ ++USE64 ++long_mode_wake: ++ mov rax, gdt.kernel_data ++ mov ds, rax ++ mov es, rax ++ mov fs, rax ++ mov gs, rax ++ mov ss, rax ++ ++ mov rsp, [trampoline.stack] ++ mov rax, [trampoline.code] ++ jmp rax ++ ++struc GDTEntry ++ .limitl resw 1 ++ .basel resw 1 ++ .basem resb 1 ++ .attribute resb 1 ++ .flags__limith resb 1 ++ .baseh resb 1 ++endstruc ++ ++attrib: ++ .present equ 1 << 7 ++ .user equ 1 << 4 ++ .code equ 1 << 3 ++ .writable equ 1 << 1 ++ ++flags: ++ .long_mode equ 1 << 5 ++ ++gdtr: ++ dw gdt.end + 1 ++ dq gdt ++ ++gdt: ++.null equ $ - gdt ++ dq 0 ++ ++.kernel_code equ $ - gdt ++istruc GDTEntry ++ at GDTEntry.limitl, dw 0 ++ at GDTEntry.basel, dw 0 ++ at GDTEntry.basem, db 0 ++ at GDTEntry.attribute, db attrib.present | attrib.user | attrib.code ++ at GDTEntry.flags__limith, db flags.long_mode ++ at GDTEntry.baseh, db 0 ++iend ++ ++.kernel_data equ $ - gdt ++istruc GDTEntry ++ at GDTEntry.limitl, dw 0 ++ at GDTEntry.basel, dw 0 ++ at GDTEntry.basem, db 0 ++ at GDTEntry.attribute, db attrib.present | attrib.user | attrib.writable ++ at GDTEntry.flags__limith, db 0 ++ at GDTEntry.baseh, db 0 ++iend ++ ++.end equ $ - gdt diff --git a/sources/redbear-0.1.0/patches/P4-scheme-failure-modes.patch b/sources/redbear-0.1.0/patches/P4-scheme-failure-modes.patch new file mode 100644 index 00000000..c3453b86 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P4-scheme-failure-modes.patch @@ -0,0 +1,913 @@ +diff --git a/src/context/file.rs b/src/context/file.rs +index 2d3790f..150f483 100644 +--- a/src/context/file.rs ++++ b/src/context/file.rs +@@ -4,7 +4,7 @@ use crate::{ + event, + scheme::{self, SchemeId}, + sync::{CleanLockToken, RwLock, L6}, +- syscall::error::Result, ++ syscall::error::{Error, Result, ESTALE}, + }; + use alloc::sync::Arc; + use syscall::{schemev2::NewFdFlags, RwFlags, O_APPEND, O_NONBLOCK}; +@@ -18,6 +18,7 @@ pub struct FileDescription { + pub offset: u64, + /// The scheme that this file refers to + pub scheme: SchemeId, ++ pub scheme_generation: Option, + /// The number the scheme uses to refer to this file + pub number: usize, + /// The flags passed to open or fcntl(SETFL) +@@ -32,6 +33,52 @@ bitflags! { + } + } + impl FileDescription { ++ pub fn with_generation( ++ scheme: SchemeId, ++ scheme_generation: Option, ++ number: usize, ++ offset: u64, ++ flags: u32, ++ internal_flags: InternalFlags, ++ ) -> Self { ++ Self { ++ offset, ++ scheme, ++ scheme_generation, ++ number, ++ flags, ++ internal_flags, ++ } ++ } ++ ++ pub fn new( ++ scheme: SchemeId, ++ number: usize, ++ offset: u64, ++ flags: u32, ++ internal_flags: InternalFlags, ++ token: &mut CleanLockToken, ++ ) -> Self { ++ Self::with_generation( ++ scheme, ++ Some(scheme::current_scheme_generation(token.token(), scheme)), ++ number, ++ offset, ++ flags, ++ internal_flags, ++ ) ++ } ++ ++ pub fn get_scheme(&self, token: &mut CleanLockToken) -> Result { ++ if let Some(expected_generation) = self.scheme_generation ++ && expected_generation != scheme::current_scheme_generation(token.token(), self.scheme) ++ { ++ return Err(Error::new(ESTALE)); ++ } ++ ++ scheme::get_scheme(token.token(), self.scheme) ++ } ++ + pub fn rw_flags(&self, rw: RwFlags) -> u32 { + let mut ret = self.flags & !(O_NONBLOCK | O_APPEND) as u32; + if rw.contains(RwFlags::APPEND) { +@@ -76,7 +123,7 @@ impl FileDescription { + pub fn try_close(self, token: &mut CleanLockToken) -> Result<()> { + event::unregister_file(self.scheme, self.number, token); + +- let scheme = scheme::get_scheme(token.token(), self.scheme)?; ++ let scheme = self.get_scheme(token)?; + + scheme.close(self.number, token) + } +@@ -85,12 +132,12 @@ impl FileDescription { + impl FileDescriptor { + pub fn close(self, token: &mut CleanLockToken) -> Result<()> { + { +- let (scheme_id, number, internal_flags) = { ++ let (desc, number, internal_flags) = { + let desc = self.description.read(token.token()); +- (desc.scheme, desc.number, desc.internal_flags) ++ (*desc, desc.number, desc.internal_flags) + }; + if internal_flags.contains(InternalFlags::NOTIFY_ON_NEXT_DETACH) { +- let scheme = scheme::get_scheme(token.token(), scheme_id)?; ++ let scheme = desc.get_scheme(token)?; + scheme.detach(number, token)?; + } + } +diff --git a/src/context/memory.rs b/src/context/memory.rs +index 93446ba..a862b35 100644 +--- a/src/context/memory.rs ++++ b/src/context/memory.rs +@@ -64,14 +64,13 @@ impl UnmapResult { + return Ok(()); + }; + +- let (scheme_id, number) = { +- let desc = description.write(token.token()); +- (desc.scheme, desc.number) ++ let (scheme, number) = { ++ let desc = *description.read(token.token()); ++ (desc.get_scheme(token)?, desc.number) + }; + +- let scheme_opt = scheme::get_scheme(token.token(), scheme_id); +- let funmap_result = scheme_opt +- .and_then(|scheme| scheme.kfunmap(number, base_offset, self.size, self.flags, token)); ++ let funmap_result = scheme ++ .kfunmap(number, base_offset, self.size, self.flags, token); + + if let Ok(fd) = Arc::try_unwrap(description) { + fd.into_inner().try_close(token)?; +@@ -2687,20 +2686,13 @@ fn correct_inner<'l>( + // XXX: This is cheating, but guaranteed we won't deadlock because we've dropped addr_space_guard + let mut token = unsafe { CleanLockToken::new() }; + +- let (scheme_id, scheme_number) = { +- let desc = &file_ref.description.read(token.token()); +- (desc.scheme, desc.number) ++ let desc = *file_ref.description.read(token.token()); ++ let scheme = desc.get_scheme(&mut token).map_err(|_| PfError::Segv)?; ++ let scheme_number = desc.number; ++ let user_inner = match scheme { ++ KernelSchemes::User(user) => user.inner, ++ _ => return Err(PfError::Segv), + }; +- let user_inner = scheme::get_scheme(token.token(), scheme_id) +- .ok() +- .and_then(|s| { +- if let KernelSchemes::User(user) = s { +- Some(user.inner) +- } else { +- None +- } +- }) +- .ok_or(PfError::Segv)?; + + let offset = file_ref.base_offset as u64 + (pages_from_grant_start * PAGE_SIZE) as u64; + user_inner +diff --git a/src/scheme/mod.rs b/src/scheme/mod.rs +index d30272c..765e547 100644 +--- a/src/scheme/mod.rs ++++ b/src/scheme/mod.rs +@@ -14,7 +14,7 @@ use alloc::{ + }; + use core::{ + str, +- sync::atomic::{AtomicUsize, Ordering}, ++ sync::atomic::{AtomicU64, AtomicUsize, Ordering}, + }; + use hashbrown::hash_map::{self, DefaultHashBuilder, HashMap}; + use spin::Once; +@@ -169,6 +169,7 @@ enum Handle { + + /// Schemes list + static HANDLES: Once>> = Once::new(); ++static SCHEME_GENERATIONS: Once>> = Once::new(); + static SCHEME_LIST_NEXT_ID: AtomicUsize = AtomicUsize::new(MAX_GLOBAL_SCHEMES); + static SCHEME_LIST_ID: AtomicUsize = AtomicUsize::new(0); + +@@ -204,6 +205,10 @@ fn init_schemes() -> RwLock> { + RwLock::new(handles) + } + ++fn init_scheme_generations() -> RwLock> { ++ RwLock::new(HashMap::new()) ++} ++ + /// Get a handle to a scheme. + pub fn get_scheme(token: LockToken<'_, L0>, scheme_id: SchemeId) -> Result { + match handles().read(token).get(&scheme_id) { +@@ -212,10 +217,33 @@ pub fn get_scheme(token: LockToken<'_, L0>, scheme_id: SchemeId) -> Result, scheme_id: SchemeId) -> u64 { ++ scheme_generations() ++ .read(token) ++ .get(&scheme_id) ++ .map(|generation| generation.load(Ordering::Acquire)) ++ .unwrap_or(0) ++} ++ + fn handles<'a>() -> &'a RwLock> { + HANDLES.call_once(init_schemes) + } + ++fn scheme_generations<'a>() -> &'a RwLock> { ++ SCHEME_GENERATIONS.call_once(init_scheme_generations) ++} ++ ++fn increment_scheme_generation(scheme_id: SchemeId, token: &mut CleanLockToken) { ++ match scheme_generations().write(token.token()).entry(scheme_id) { ++ hash_map::Entry::Occupied(entry) => { ++ entry.get().fetch_add(1, Ordering::AcqRel); ++ } ++ hash_map::Entry::Vacant(entry) => { ++ entry.insert(AtomicU64::new(1)); ++ } ++ } ++} ++ + /// Scheme list type + pub struct SchemeList; + +@@ -260,9 +288,14 @@ impl SchemeList { + + /// Remove a scheme + fn remove(&self, id: usize, token: &mut CleanLockToken) { +- let scheme = handles().write(token.token()).remove(&SchemeId(id)); ++ let scheme_id = SchemeId(id); ++ let scheme = handles().write(token.token()).remove(&scheme_id); + + assert!(scheme.is_some()); ++ if let Some(Handle::Scheme(KernelSchemes::User(user))) = scheme.as_ref() { ++ user.inner.fail_pending_calls(token); ++ } ++ increment_scheme_generation(scheme_id, token); + if let Some(Handle::Scheme(KernelSchemes::User(user))) = scheme + && let Some(user) = Arc::into_inner(user.inner) + { +@@ -287,32 +320,32 @@ impl KernelScheme for SchemeList { + token: &mut CleanLockToken, + ) -> Result { + let scheme_id = SchemeId(scheme_id); +- match handles() +- .read(token.token()) +- .get(&scheme_id) +- .ok_or(Error::new(EBADF))? +- { +- Handle::Scheme(KernelSchemes::User(UserScheme { inner })) => { +- let inner = inner.clone(); +- assert!(scheme_id == inner.scheme_id); +- let scheme = scheme_id; +- let params = unsafe { user_buf.read_exact::()? }; +- +- return Ok(OpenResult::External(Arc::new(RwLock::new( +- FileDescription { +- scheme, +- number: params.number, +- offset: params.offset, +- flags: params.flags as u32, +- internal_flags: InternalFlags::from_extra0(params.internal_flags) +- .ok_or(Error::new(EINVAL))?, +- }, +- )))); ++ let maybe_inner = { ++ let handles = handles().read(token.token()); ++ match handles.get(&scheme_id).ok_or(Error::new(EBADF))? { ++ Handle::Scheme(KernelSchemes::User(UserScheme { inner })) => Some(inner.clone()), ++ Handle::SchemeCreationCapability => None, ++ _ => return Err(Error::new(EBADF)), + } +- Handle::SchemeCreationCapability => (), +- _ => return Err(Error::new(EBADF)), + }; + ++ if let Some(inner) = maybe_inner { ++ assert!(scheme_id == inner.scheme_id); ++ let params = unsafe { user_buf.read_exact::()? }; ++ ++ return Ok(OpenResult::External(Arc::new(RwLock::new( ++ FileDescription::new( ++ scheme_id, ++ params.number, ++ params.offset, ++ params.flags as u32, ++ InternalFlags::from_extra0(params.internal_flags) ++ .ok_or(Error::new(EINVAL))?, ++ token, ++ ), ++ )))); ++ } ++ + const EXPECTED: &[u8] = b"create-scheme"; + let mut buf = [0u8; EXPECTED.len()]; + +diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs +index 47588e1..1bdd6cc 100644 +--- a/src/scheme/proc.rs ++++ b/src/scheme/proc.rs +@@ -849,17 +873,17 @@ impl KernelScheme for ProcScheme { + } + } + fn extract_scheme_number(fd: usize, token: &mut CleanLockToken) -> Result<(KernelSchemes, usize)> { +- let (scheme_id, number) = { ++ let desc = { + let current_lock = context::current(); + let mut current = current_lock.read(token.token()); +- let (context, mut token) = current.token_split(); ++ let (context, mut context_token) = current.token_split(); + let file_descriptor = context +- .get_file(FileHandle::from(fd), &mut token) ++ .get_file(FileHandle::from(fd), &mut context_token) + .ok_or(Error::new(EBADF))?; +- let desc = file_descriptor.description.read(token.token()); +- (desc.scheme, desc.number) ++ *file_descriptor.description.read(context_token.token()) + }; +- let scheme = scheme::get_scheme(token.token(), scheme_id)?; ++ let scheme = desc.get_scheme(token)?; ++ let number = desc.number; + + Ok((scheme, number)) + } +diff --git a/src/scheme/user.rs b/src/scheme/user.rs +index b901302..dfbf66b 100644 +--- a/src/scheme/user.rs ++++ b/src/scheme/user.rs +@@ -80,6 +80,7 @@ const ONE: NonZeroUsize = match NonZeroUsize::new(1) { + Some(one) => one, + None => unreachable!(), + }; ++const MAX_SPURIOUS_WAKEUPS: usize = 100; + + enum ParsedCqe { + TriggerFevent { +@@ -209,6 +210,8 @@ impl UserInner { + caller_responsible: &mut PageSpan, + token: &mut CleanLockToken, + ) -> Result { ++ let mut remaining_spurious_wakeups = MAX_SPURIOUS_WAKEUPS; ++ + { + // Disable preemption to avoid context switches between setting the + // process state and sending the scheme request. The process is made +@@ -261,7 +264,10 @@ impl UserInner { + }; + + let states = self.states.lock(token.token()); +- let (mut states, mut token) = states.into_split(); ++ let (mut states, mut state_token) = states.into_split(); ++ let mut timed_out_descriptions = None; ++ let mut remove_state = false; ++ let mut timed_out = false; + match states.get_mut(sqe.tag as usize) { + // invalid state + None => return Err(Error::new(EBADFD)), +@@ -274,24 +280,35 @@ impl UserInner { + fds, + } => { + let maybe_eintr = +- eintr_if_sigkill(&mut callee_responsible, &mut token.token()); +- *o = State::Waiting { +- canceling: true, +- callee_responsible, +- context, +- fds, +- }; ++ eintr_if_sigkill(&mut callee_responsible, &mut state_token.token()); + +- maybe_eintr?; ++ if maybe_eintr.is_ok() { ++ remaining_spurious_wakeups = ++ remaining_spurious_wakeups.saturating_sub(1); ++ } ++ ++ if maybe_eintr.is_ok() && remaining_spurious_wakeups == 0 { ++ timed_out_descriptions = Some(Self::collect_descriptions_to_close(fds)); ++ remove_state = true; ++ } else { ++ *o = State::Waiting { ++ canceling: true, ++ callee_responsible, ++ context, ++ fds, ++ }; ++ } + +- context::current() +- .write(token.token()) +- .block("UserInner::call (woken up after cancelation request)"); ++ maybe_eintr?; + +- // We do not want to drop the lock before blocking +- // as if we get preempted in between we might miss a +- // wakeup. +- drop(states); ++ if remove_state { ++ states.remove(sqe.tag as usize); ++ timed_out = true; ++ } else { ++ context::current() ++ .write(state_token.token()) ++ .block("UserInner::call (woken up after cancelation request)"); ++ } + } + // spurious wakeup + State::Waiting { +@@ -300,60 +317,76 @@ impl UserInner { + context, + mut callee_responsible, + } => { +- let maybe_eintr = eintr_if_sigkill(&mut callee_responsible, &mut token); + let current_context = context::current(); ++ let maybe_eintr = ++ eintr_if_sigkill(&mut callee_responsible, &mut state_token); ++ ++ if maybe_eintr.is_ok() { ++ remaining_spurious_wakeups = ++ remaining_spurious_wakeups.saturating_sub(1); ++ } + +- *o = State::Waiting { +- // Currently we treat all spurious wakeups to have the same behavior +- // as signals (i.e., we send a cancellation request). It is not something +- // that should happen, but it certainly can happen, for example if a context +- // is awoken through its thread handle without setting any sig bits, or if the +- // caller clears its own sig bits. If it actually is a signal, then it is the +- // intended behavior. +- canceling: true, +- fds, +- context, +- callee_responsible, +- }; ++ if maybe_eintr.is_ok() && remaining_spurious_wakeups == 0 { ++ timed_out_descriptions = Some(Self::collect_descriptions_to_close(fds)); ++ remove_state = true; ++ } else { ++ *o = State::Waiting { ++ // Currently we treat all spurious wakeups to have the same behavior ++ // as signals (i.e., we send a cancellation request). It is not something ++ // that should happen, but it certainly can happen, for example if a context ++ // is awoken through its thread handle without setting any sig bits, or if the ++ // caller clears its own sig bits. If it actually is a signal, then it is the ++ // intended behavior. ++ canceling: true, ++ fds, ++ context, ++ callee_responsible, ++ }; ++ } + + maybe_eintr?; + +- // We do not want to preempt between sending the +- // cancellation and blocking again where we might +- // miss a wakeup. +- let mut preempt = PreemptGuardL1::new(¤t_context, &mut token); +- let token = preempt.token(); +- +- self.todo.send_locked( +- Sqe { +- opcode: Opcode::Cancel as u8, +- sqe_flags: SqeFlags::ONEWAY, +- tag: sqe.tag, +- ..Default::default() +- }, +- token.token(), +- ); +- event::trigger_locked( +- self.root_id, +- self.scheme_id.get(), +- EVENT_READ, +- token.token(), +- ); +- +- // 1. If cancellation was requested and arrived +- // before the scheme processed the request, an +- // acknowledgement will be sent back after the +- // cancellation is processed and we will be woken up +- // again. State will be State::Responded then. +- // +- // 2. If cancellation was requested but the scheme +- // already processed the request, we will receive +- // the actual response next and woken up again. +- // State will be State::Responded then. +- context::current() +- .write(token.token()) +- .block("UserInner::call (spurious wakeup)"); +- drop(states); ++ if remove_state { ++ states.remove(sqe.tag as usize); ++ timed_out = true; ++ } else { ++ // We do not want to preempt between sending the ++ // cancellation and blocking again where we might ++ // miss a wakeup. ++ let mut preempt = ++ PreemptGuardL1::new(¤t_context, &mut state_token); ++ let token = preempt.token(); ++ ++ self.todo.send_locked( ++ Sqe { ++ opcode: Opcode::Cancel as u8, ++ sqe_flags: SqeFlags::ONEWAY, ++ tag: sqe.tag, ++ ..Default::default() ++ }, ++ token.token(), ++ ); ++ event::trigger_locked( ++ self.root_id, ++ self.scheme_id.get(), ++ EVENT_READ, ++ token.token(), ++ ); ++ ++ // 1. If cancellation was requested and arrived ++ // before the scheme processed the request, an ++ // acknowledgement will be sent back after the ++ // cancellation is processed and we will be woken up ++ // again. State will be State::Responded then. ++ // ++ // 2. If cancellation was requested but the scheme ++ // already processed the request, we will receive ++ // the actual response next and woken up again. ++ // State will be State::Responded then. ++ context::current() ++ .write(token.token()) ++ .block("UserInner::call (spurious wakeup)"); ++ } + } + + // invalid state +@@ -368,6 +401,68 @@ impl UserInner { + } + }, + } ++ ++ if let Some(descriptions) = timed_out_descriptions { ++ drop(states); ++ for desc in descriptions { ++ let _ = desc.try_close(token); ++ } ++ } ++ ++ if timed_out { ++ return Err(Error::new(ETIMEDOUT)); ++ } ++ } ++ } ++ } ++ ++ fn collect_descriptions_to_close( ++ fds: Vec>, ++ ) -> Vec { ++ fds.into_iter() ++ .filter_map(|fd| Arc::try_unwrap(fd).ok()) ++ .map(RwLock::into_inner) ++ .collect() ++ } ++ ++ pub fn fail_pending_calls(&self, token: &mut CleanLockToken) { ++ let descriptions_to_close = { ++ let mut states_lock = self.states.lock(token.token()); ++ let (states, mut lock_token) = states_lock.token_split(); ++ let mut descriptions_to_close = Vec::new(); ++ let mut states_to_remove = Vec::new(); ++ ++ for (id, state) in states.iter_mut() { ++ match mem::replace(state, State::Placeholder) { ++ State::Waiting { context, fds, .. } => { ++ descriptions_to_close.extend(Self::collect_descriptions_to_close(fds)); ++ ++ match context.upgrade() { ++ Some(context) => { ++ *state = State::Responded(Response::Regular( ++ Err(Error::new(ENODEV)), ++ 0, ++ false, ++ )); ++ context.write(lock_token.token()).unblock(); ++ } ++ None => states_to_remove.push(id), ++ } ++ } ++ old_state => *state = old_state, ++ } ++ } ++ ++ for id in states_to_remove { ++ states.remove(id); ++ } ++ ++ descriptions_to_close ++ }; ++ ++ for desc in descriptions_to_close { ++ let _ = desc.try_close(token); ++ } + } + } + } +@@ -1283,6 +1376,7 @@ impl UserInner { + } + + pub fn into_drop(self, token: &mut CleanLockToken) { ++ self.fail_pending_calls(token); + self.todo.condition.into_drop(token); + } + } +diff --git a/src/syscall/fs.rs b/src/syscall/fs.rs +index bf98464..10c6a92 100644 +--- a/src/syscall/fs.rs ++++ b/src/syscall/fs.rs +@@ -12,7 +12,7 @@ use crate::{ + memory::{AddrSpace, GenericFlusher, Grant, PageSpan, TlbShootdownActions}, + }, + memory::{Page, VirtualAddress, PAGE_SIZE}, +- scheme::{self, FileHandle, KernelScheme, OpenResult, StrOrBytes}, ++ scheme::{FileHandle, KernelScheme, OpenResult, StrOrBytes}, + sync::{CleanLockToken, RwLock}, + syscall::{data::Stat, error::*, flag::*}, + }; +@@ -45,7 +45,7 @@ pub fn file_op_generic_ext( + (file, desc) + }; + +- let scheme = scheme::get_scheme(token.token(), desc.scheme)?; ++ let scheme = desc.get_scheme(token)?; + + op(&*scheme, file.description, desc, token) + } +@@ -73,14 +73,18 @@ pub fn openat( + ) -> Result { + let path_buf = copy_path_to_buf(raw_path, PATH_MAX)?; + +- let (scheme_id, number) = { ++ let desc = { + let current_lock = context::current(); + let mut current = current_lock.read(token.token()); +- let (context, mut token) = current.token_split(); +- let pipe = context.get_file(fh, &mut token).ok_or(Error::new(EBADF))?; +- let desc = pipe.description.read(token.token()); +- (desc.scheme, desc.number) ++ let (context, mut context_token) = current.token_split(); ++ let pipe = context ++ .get_file(fh, &mut context_token) ++ .ok_or(Error::new(EBADF))?; ++ *pipe.description.read(context_token.token()) + }; ++ let scheme = desc.get_scheme(token)?; ++ let number = desc.number; ++ let scheme_id = desc.scheme; + + let caller_ctx = context::current() + .read(token.token()) +@@ -88,8 +92,6 @@ pub fn openat( + .filter_uid_gid(euid, egid); + + let new_description = { +- let scheme = scheme::get_scheme(token.token(), scheme_id)?; +- + let res = scheme.kopenat( + number, + StrOrBytes::from_str(&path_buf), +@@ -101,13 +103,14 @@ pub fn openat( + + match res? { + OpenResult::SchemeLocal(number, internal_flags) => { +- Arc::new(RwLock::new(FileDescription { +- offset: 0, +- internal_flags, +- scheme: scheme_id, ++ Arc::new(RwLock::new(FileDescription::new( ++ scheme_id, + number, +- flags: (flags & !O_CLOEXEC) as u32, +- })) ++ 0, ++ (flags & !O_CLOEXEC) as u32, ++ internal_flags, ++ token, ++ ))) + } + OpenResult::External(desc) => desc, + } +@@ -137,16 +140,17 @@ pub fn unlinkat( + ) -> Result<()> { + let path_buf = copy_path_to_buf(raw_path, PATH_MAX)?; + +- let (number, scheme_id) = { ++ let desc = { + let current_lock = context::current(); + let mut current = current_lock.read(token.token()); +- let (context, mut token) = current.token_split(); +- let pipe = context.get_file(fh, &mut token).ok_or(Error::new(EBADF))?; +- let desc = pipe.description.read(token.token()); +- (desc.number, desc.scheme) ++ let (context, mut context_token) = current.token_split(); ++ let pipe = context ++ .get_file(fh, &mut context_token) ++ .ok_or(Error::new(EBADF))?; ++ *pipe.description.read(context_token.token()) + }; +- +- let scheme = scheme::get_scheme(token.token(), scheme_id)?; ++ let number = desc.number; ++ let scheme = desc.get_scheme(token)?; + + let caller_ctx = context::current() + .read(token.token()) +@@ -199,17 +203,18 @@ fn duplicate_file( + let description = { *file.description.read(token.token()) }; + + let new_description = { +- let scheme = scheme::get_scheme(token.token(), description.scheme)?; ++ let scheme = description.get_scheme(token)?; + + match scheme.kdup(description.number, user_buf, caller_ctx, token)? { + OpenResult::SchemeLocal(number, internal_flags) => { +- Arc::new(RwLock::new(FileDescription { +- offset: 0, +- internal_flags, +- scheme: description.scheme, ++ Arc::new(RwLock::new(FileDescription::new( ++ description.scheme, + number, +- flags: description.flags, +- })) ++ 0, ++ description.flags, ++ internal_flags, ++ token, ++ ))) + } + OpenResult::External(desc) => desc, + } +@@ -296,11 +301,10 @@ fn call_normal( + } + .ok_or(Error::new(EBADF))?; + +- let (scheme_id, number) = { +- let desc = file.description.read(token.token()); +- (desc.scheme, desc.number) ++ let (scheme, number) = { ++ let desc = *file.description.read(token.token()); ++ (desc.get_scheme(token)?, desc.number) + }; +- let scheme = scheme::get_scheme(token.token(), scheme_id)?; + + if flags.contains(CallFlags::STD_FS) { + scheme.translate_std_fs_call(number, file.description, payload, flags, metadata, token) +@@ -341,28 +345,28 @@ fn fdwrite_inner( + ) -> Result { + // TODO: Ensure deadlocks can't happen + let (scheme, number, descs_to_send) = { +- let (scheme, number) = { ++ let desc = { + let current_lock = context::current(); + let mut current = current_lock.read(token.token()); +- let (context, mut token) = current.token_split(); ++ let (context, mut context_token) = current.token_split(); + let file_descriptor = context +- .get_file(socket, &mut token) ++ .get_file(socket, &mut context_token) + .ok_or(Error::new(EBADF))?; +- let desc = &file_descriptor.description.read(token.token()); +- (desc.scheme, desc.number) ++ *file_descriptor.description.read(context_token.token()) + }; +- let scheme = scheme::get_scheme(token.token(), scheme)?; ++ let scheme = desc.get_scheme(token)?; ++ let number = desc.number; + + let current_lock = context::current(); + let mut current = current_lock.read(token.token()); +- let (context, mut token) = current.token_split(); ++ let (context, mut context_token) = current.token_split(); + ( + scheme, + number, + if flags.contains(CallFlags::FD_CLONE) { +- context.bulk_get_files(&target_fds, &mut token) ++ context.bulk_get_files(&target_fds, &mut context_token) + } else { +- context.bulk_remove_files(&target_fds, &mut token) ++ context.bulk_remove_files(&target_fds, &mut context_token) + }? + .into_iter() + .map(|f| f.description) +@@ -395,18 +399,22 @@ fn call_fdread( + metadata: &[u64], + token: &mut CleanLockToken, + ) -> Result { ++ let desc = { ++ let current_lock = context::current(); ++ let mut current = current_lock.read(token.token()); ++ let (context, mut context_token) = current.token_split(); ++ let file_descriptor = context ++ .get_file(fd, &mut context_token) ++ .ok_or(Error::new(EBADF))?; ++ *file_descriptor.description.read(context_token.token()) ++ }; + let (scheme, number) = { +- let (scheme, number) = { +- let current_lock = context::current(); +- let mut current = current_lock.read(token.token()); +- let (context, mut token) = current.token_split(); +- let file_descriptor = context.get_file(fd, &mut token).ok_or(Error::new(EBADF))?; +- let desc = file_descriptor.description.read(token.token()); +- (desc.scheme, desc.number) +- }; +- let scheme = scheme::get_scheme(token.token(), scheme)?; +- +- (scheme, number) ++ let scheme = desc.get_scheme(token)?; ++ let number = desc.number; ++ ( ++ scheme, ++ number, ++ ) + }; + + scheme.kfdread(number, payload, flags, metadata, token) +@@ -440,9 +448,9 @@ pub fn fcntl(fd: FileHandle, cmd: usize, arg: usize, token: &mut CleanLockToken) + } + .ok_or(Error::new(EBADF))?; + +- let (scheme_id, number, flags) = { +- let desc = file.description.write(token.token()); +- (desc.scheme, desc.number, desc.flags) ++ let (number, flags, desc) = { ++ let desc = *file.description.read(token.token()); ++ (desc.number, desc.flags, desc) + }; + + if cmd == F_DUPFD || cmd == F_DUPFD_CLOEXEC { +@@ -460,7 +468,7 @@ pub fn fcntl(fd: FileHandle, cmd: usize, arg: usize, token: &mut CleanLockToken) + + // Communicate fcntl with scheme + if cmd != F_GETFD && cmd != F_SETFD { +- let scheme = scheme::get_scheme(token.token(), scheme_id)?; ++ let scheme = desc.get_scheme(token)?; + + scheme.fcntl(number, cmd, arg, token)?; + }; +@@ -518,13 +526,11 @@ pub fn flink(fd: FileHandle, raw_path: UserSliceRo, token: &mut CleanLockToken) + let path = RedoxPath::from_absolute(&path_buf).ok_or(Error::new(EINVAL))?; + let (_, reference) = path.as_parts().ok_or(Error::new(EINVAL))?; + +- let (number, scheme_id) = { +- let desc = file.description.read(token.token()); +- (desc.number, desc.scheme) ++ let (number, scheme) = { ++ let desc = *file.description.read(token.token()); ++ (desc.number, desc.get_scheme(token)?) + }; + +- let scheme = scheme::get_scheme(token.token(), scheme_id)?; +- + // TODO: Check EXDEV. + /* + if scheme_id != description.scheme { +@@ -554,13 +560,11 @@ pub fn frename(fd: FileHandle, raw_path: UserSliceRo, token: &mut CleanLockToken + let path = RedoxPath::from_absolute(&path_buf).ok_or(Error::new(EINVAL))?; + let (_, reference) = path.as_parts().ok_or(Error::new(EINVAL))?; + +- let (number, scheme_id) = { +- let desc = file.description.read(token.token()); +- (desc.number, desc.scheme) ++ let (number, scheme) = { ++ let desc = *file.description.read(token.token()); ++ (desc.number, desc.get_scheme(token)?) + }; + +- let scheme = scheme::get_scheme(token.token(), scheme_id)?; +- + // TODO: Check EXDEV. + /* + if scheme_id != description.scheme { +diff --git a/src/syscall/process.rs b/src/syscall/process.rs +index e83da42..78eed9d 100644 +--- a/src/syscall/process.rs ++++ b/src/syscall/process.rs +@@ -271,23 +274,26 @@ unsafe fn bootstrap_mem(bootstrap: &crate::startup::Bootstrap) -> &'static [u8] + } + + fn insert_fd(scheme: SchemeId, number: usize, cloexec: bool, token: &mut CleanLockToken) -> usize { ++ let description = Arc::new(RwLock::new(FileDescription::new( ++ scheme, ++ number, ++ 0, ++ (O_CREAT | O_RDWR) as u32, ++ InternalFlags::empty(), ++ token, ++ ))); ++ + let current_lock = context::current(); + let mut current = current_lock.read(token.token()); +- let (context, mut token) = current.token_split(); ++ let (context, mut context_token) = current.token_split(); + context + .add_file_min( + FileDescriptor { +- description: Arc::new(RwLock::new(FileDescription { +- scheme, +- number, +- offset: 0, +- flags: (O_CREAT | O_RDWR) as u32, +- internal_flags: InternalFlags::empty(), +- })), ++ description, + cloexec, + }, + syscall::flag::UPPER_FDTBL_TAG + scheme.get(), +- &mut token, ++ &mut context_token, + ) + .expect("failed to insert fd to current context") + .get() diff --git a/sources/redbear-0.1.0/patches/P4-supplementary-groups.patch b/sources/redbear-0.1.0/patches/P4-supplementary-groups.patch new file mode 100644 index 00000000..1ff1e78f --- /dev/null +++ b/sources/redbear-0.1.0/patches/P4-supplementary-groups.patch @@ -0,0 +1,137 @@ +diff --git a/src/context/context.rs b/src/context/context.rs +index c97c516..6d723f4 100644 +--- a/src/context/context.rs ++++ b/src/context/context.rs +@@ -148,6 +148,8 @@ pub struct Context { + pub euid: u32, + pub egid: u32, + pub pid: usize, ++ /// Supplementary group IDs for access control decisions. ++ pub groups: Vec, + + // See [`PreemptGuard`] + // +@@ -204,6 +206,7 @@ impl Context { + euid: 0, + egid: 0, + pid: 0, ++ groups: Vec::new(), + + #[cfg(feature = "syscall_debug")] + syscall_debug_info: crate::syscall::debug::SyscallDebugInfo::default(), +@@ -479,6 +482,7 @@ impl Context { + uid: self.euid, + gid: self.egid, + pid: self.pid, ++ groups: self.groups.clone(), + } + } + } +diff --git a/src/scheme/mod.rs b/src/scheme/mod.rs +index d30272c..9da2b28 100644 +--- a/src/scheme/mod.rs ++++ b/src/scheme/mod.rs +@@ -777,6 +777,7 @@ pub struct CallerCtx { + pub pid: usize, + pub uid: u32, + pub gid: u32, ++ pub groups: alloc::vec::Vec, + } + impl CallerCtx { + pub fn filter_uid_gid(self, euid: u32, egid: u32) -> Self { +@@ -785,6 +786,7 @@ impl CallerCtx { + pid: self.pid, + uid: euid, + gid: egid, ++ groups: self.groups, + } + } else { + self +diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs +index 47588e1..6ffb256 100644 +--- a/src/scheme/proc.rs ++++ b/src/scheme/proc.rs +@@ -105,6 +105,7 @@ enum ContextHandle { + // Attr handles, to set ens/euid/egid/pid. + Authority, + Attr, ++ Groups, + + Status { + privileged: bool, +@@ -261,6 +262,7 @@ impl ProcScheme { + let handle = match actual_name { + "attrs" => ContextHandle::Attr, + "status" => ContextHandle::Status { privileged: true }, ++ "groups" => ContextHandle::Groups, + _ => return Err(Error::new(ENOENT)), + }; + +@@ -306,6 +308,11 @@ impl ProcScheme { + let id = NonZeroUsize::new(NEXT_ID.fetch_add(1, Ordering::Relaxed)) + .ok_or(Error::new(EMFILE))?; + let context = context::spawn(true, Some(id), ret, token)?; ++ { ++ let parent_groups = ++ context::current().read(token.token()).groups.clone(); ++ context.write(token.token()).groups = parent_groups; ++ } + HANDLES.write(token.token()).insert( + id.get(), + Handle { +@@ -1271,6 +1278,39 @@ impl ContextHandle { + guard.prio = (info.prio as usize).min(39); + Ok(size_of::()) + } ++ Self::Groups => { ++ const NGROUPS_MAX: usize = 65536; ++ if buf.len() % size_of::() != 0 { ++ return Err(Error::new(EINVAL)); ++ } ++ let count = buf.len() / size_of::(); ++ if count > NGROUPS_MAX { ++ return Err(Error::new(EINVAL)); ++ } ++ let mut groups = Vec::with_capacity(count); ++ for chunk in buf.in_exact_chunks(size_of::()).take(count) { ++ groups.push(chunk.read_u32()?); ++ } ++ let proc_id = { ++ let guard = context.read(token.token()); ++ guard.owner_proc_id ++ }; ++ { ++ let mut guard = context.write(token.token()); ++ guard.groups = groups.clone(); ++ } ++ if let Some(pid) = proc_id { ++ let mut contexts = context::contexts(token.downgrade()); ++ let (contexts, mut t) = contexts.token_split(); ++ for context_ref in contexts.iter() { ++ let mut ctx = context_ref.write(t.token()); ++ if ctx.owner_proc_id == Some(pid) { ++ ctx.groups = groups.clone(); ++ } ++ } ++ } ++ Ok(count * size_of::()) ++ } + ContextHandle::OpenViaDup => { + let mut args = buf.usizes(); + +@@ -1475,6 +1515,15 @@ impl ContextHandle { + debug_name, + }) + } ++ Self::Groups => { ++ let c = &context.read(token.token()); ++ let max = buf.len() / size_of::(); ++ let count = c.groups.len().min(max); ++ for (chunk, gid) in buf.in_exact_chunks(size_of::()).zip(&c.groups).take(count) { ++ chunk.copy_from_slice(&gid.to_ne_bytes())?; ++ } ++ Ok(count * size_of::()) ++ } + ContextHandle::Sighandler => { + let data = match context.read(token.token()).sig { + Some(ref sig) => SetSighandlerData { diff --git a/sources/redbear-0.1.0/patches/P5-boot-path-hardening.patch b/sources/redbear-0.1.0/patches/P5-boot-path-hardening.patch new file mode 100644 index 00000000..c676d3d8 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-boot-path-hardening.patch @@ -0,0 +1,422 @@ +diff --git a/src/acpi/madt/arch/x86.rs b/src/acpi/madt/arch/x86.rs +index 4dc2388..f472c08 100644 +--- a/src/acpi/madt/arch/x86.rs ++++ b/src/acpi/madt/arch/x86.rs +@@ -18,6 +18,7 @@ use crate::{ + + use super::{Madt, MadtEntry}; + ++const AP_SPIN_LIMIT: u32 = 1_000_000; + const TRAMPOLINE: usize = 0x8000; + static TRAMPOLINE_DATA: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/trampoline")); + +@@ -42,13 +43,17 @@ pub(super) fn init(madt: Madt) { + //TODO: do not have writable and executable! + let mut mapper = KernelMapper::lock_rw(); + +- let result = mapper +- .map_phys( +- trampoline_page.start_address(), +- trampoline_frame.base(), +- PageFlags::new().execute(true).write(true), +- ) +- .expect("failed to map trampoline"); ++ let result = match mapper.map_phys( ++ trampoline_page.start_address(), ++ trampoline_frame.base(), ++ PageFlags::new().execute(true).write(true), ++ ) { ++ Some(result) => result, ++ None => { ++ println!("KERNEL AP: failed to map trampoline page, AP bring-up disabled"); ++ return; ++ } ++ }; + + (result, mapper.table().phys().data()) + }; +@@ -72,17 +77,27 @@ pub(super) fn init(madt: Madt) { + if u32::from(ap_local_apic.id) == me.get() { + debug!(" This is my local APIC"); + } else if ap_local_apic.flags & 1 == 1 { +- let cpu_id = LogicalCpuId::next(); +- + // Allocate a stack +- let stack_start = RmmA::phys_to_virt( +- allocate_p2frame(4) +- .expect("no more frames in acpi stack_start") +- .base(), +- ) +- .data(); ++ let alloc = match allocate_p2frame(4) { ++ Some(frame) => frame, ++ None => { ++ println!("KERNEL AP: CPU {} no memory for stack, skipping", ap_local_apic.id); ++ continue; ++ } ++ }; ++ let stack_start = RmmA::phys_to_virt(alloc.base()).data(); + let stack_end = stack_start + (PAGE_SIZE << 4); + ++ let next_cpu = crate::CPU_COUNT.load(Ordering::Relaxed); ++ if next_cpu >= crate::cpu_set::MAX_CPU_COUNT { ++ println!( ++ "KERNEL AP: CPU {} exceeds logical CPU limit, skipping", ++ ap_local_apic.id ++ ); ++ continue; ++ } ++ let cpu_id = LogicalCpuId::new(next_cpu); ++ + let pcr_ptr = crate::arch::gdt::allocate_and_init_pcr(cpu_id, stack_end); + + let idt_ptr = crate::arch::idt::allocate_and_init_idt(cpu_id); +@@ -137,13 +152,34 @@ pub(super) fn init(madt: Madt) { + local_apic.set_icr(icr); + } + +- // Wait for trampoline ready +- while unsafe { (*ap_ready.cast::()).load(Ordering::SeqCst) } == 0 { ++ // Wait for trampoline ready with timeout ++ let mut trampoline_ready = false; ++ for _ in 0..AP_SPIN_LIMIT { ++ if unsafe { (*ap_ready.cast::()).load(Ordering::SeqCst) } != 0 { ++ trampoline_ready = true; ++ break; ++ } + hint::spin_loop(); + } +- while !AP_READY.load(Ordering::SeqCst) { ++ if !trampoline_ready { ++ println!("KERNEL AP: CPU {} trampoline timeout, skipping", ap_local_apic.id); ++ continue; ++ } ++ ++ let mut kernel_ready = false; ++ for _ in 0..AP_SPIN_LIMIT { ++ if AP_READY.load(Ordering::SeqCst) { ++ kernel_ready = true; ++ break; ++ } + hint::spin_loop(); + } ++ if !kernel_ready { ++ println!("KERNEL AP: CPU {} AP_READY timeout, skipping", ap_local_apic.id); ++ continue; ++ } ++ ++ crate::CPU_COUNT.fetch_add(1, Ordering::Relaxed); + + RmmA::invalidate_all(); + } +@@ -151,10 +187,12 @@ pub(super) fn init(madt: Madt) { + } + + // Unmap trampoline +- let (_frame, _, flush) = unsafe { ++ if let Some((_frame, _, flush)) = unsafe { + KernelMapper::lock_rw() + .unmap_phys(trampoline_page.start_address()) +- .expect("failed to unmap trampoline page") +- }; +- flush.flush(); ++ } { ++ flush.flush(); ++ } else { ++ println!("KERNEL AP: failed to unmap trampoline page (non-fatal)"); ++ } + } +diff --git a/src/allocator/mod.rs b/src/allocator/mod.rs +index 4fdb0ba..aaa7196 100644 +--- a/src/allocator/mod.rs ++++ b/src/allocator/mod.rs +@@ -7,26 +7,40 @@ mod linked_list; + /// Size of kernel heap + const KERNEL_HEAP_SIZE: usize = ::rmm::MEGABYTE; + ++#[cold] ++fn halt_kernel_heap_init(message: &str) -> ! { ++ print!("{message}"); ++ println!("Kernel heap initialization cannot continue. Halting."); ++ loop { ++ core::hint::spin_loop(); ++ } ++} ++ + unsafe fn map_heap(mapper: &mut KernelMapper, offset: usize, size: usize) { + let mut flush_all = PageFlushAll::new(); + + let heap_start_page = Page::containing_address(VirtualAddress::new(offset)); + let heap_end_page = Page::containing_address(VirtualAddress::new(offset + size - 1)); + for page in Page::range_inclusive(heap_start_page, heap_end_page) { +- let phys = mapper +- .allocator_mut() +- .allocate_one() +- .expect("failed to allocate kernel heap"); ++ let phys = match mapper.allocator_mut().allocate_one() { ++ Some(phys) => phys, ++ None => halt_kernel_heap_init( ++ "FATAL: failed to allocate physical frame for kernel heap\n", ++ ), ++ }; + let flush = unsafe { +- mapper +- .map_phys( +- page.start_address(), +- phys, +- PageFlags::new() +- .write(true) +- .global(cfg!(not(feature = "pti"))), +- ) +- .expect("failed to map kernel heap") ++ match mapper.map_phys( ++ page.start_address(), ++ phys, ++ PageFlags::new() ++ .write(true) ++ .global(cfg!(not(feature = "pti"))), ++ ) { ++ Some(flush) => flush, ++ None => halt_kernel_heap_init( ++ "FATAL: failed to map kernel heap virtual page\n", ++ ), ++ } + }; + flush_all.consume(flush); + } +diff --git a/src/arch/x86_shared/gdt.rs b/src/arch/x86_shared/gdt.rs +index cad344f..f7acae3 100644 +--- a/src/arch/x86_shared/gdt.rs ++++ b/src/arch/x86_shared/gdt.rs +@@ -192,6 +192,15 @@ impl ProcessorControlRegion { + } + } + ++#[cold] ++fn halt_pcr_init() -> ! { ++ println!("FATAL: failed to allocate physical memory for Processor Control Region"); ++ println!("Processor startup cannot continue. Halting."); ++ loop { ++ core::hint::spin_loop(); ++ } ++} ++ + pub unsafe fn pcr() -> *mut ProcessorControlRegion { + unsafe { + // Primitive benchmarking of RDFSBASE and RDGSBASE in userspace, appears to indicate that +@@ -375,7 +384,10 @@ pub fn allocate_and_init_pcr( + .next_power_of_two() + .trailing_zeros(); + +- let pcr_frame = crate::memory::allocate_p2frame(alloc_order).expect("failed to allocate PCR"); ++ let pcr_frame = match crate::memory::allocate_p2frame(alloc_order) { ++ Some(frame) => frame, ++ None => halt_pcr_init(), ++ }; + let pcr_ptr = RmmA::phys_to_virt(pcr_frame.base()).data() as *mut ProcessorControlRegion; + unsafe { core::ptr::write(pcr_ptr, ProcessorControlRegion::new_partial_init(cpu_id)) }; + +diff --git a/src/arch/x86_shared/idt.rs b/src/arch/x86_shared/idt.rs +index 5006458..47f692f 100644 +--- a/src/arch/x86_shared/idt.rs ++++ b/src/arch/x86_shared/idt.rs +@@ -78,6 +78,15 @@ static INIT_BSP_IDT: SyncUnsafeCell = SyncUnsafeCell::new(Idt::new()); + pub(crate) static IDTS: RwLock> = + RwLock::new(HashMap::with_hasher(DefaultHashBuilder::new())); + ++#[cold] ++fn halt_idt_init() -> ! { ++ println!("FATAL: failed to allocate physical pages for backup interrupt stack"); ++ println!("Interrupt setup cannot continue. Halting."); ++ loop { ++ core::hint::spin_loop(); ++ } ++} ++ + #[inline] + pub fn is_reserved(cpu_id: LogicalCpuId, index: u8) -> bool { + if cpu_id == LogicalCpuId::BSP { +@@ -161,8 +170,10 @@ pub fn allocate_and_init_idt(cpu_id: LogicalCpuId) -> *mut Idt { + .or_insert_with(|| Box::leak(Box::new(Idt::new()))); + + use crate::memory::{RmmA, RmmArch}; +- let frames = crate::memory::allocate_p2frame(4) +- .expect("failed to allocate pages for backup interrupt stack"); ++ let frames = match crate::memory::allocate_p2frame(4) { ++ Some(frames) => frames, ++ None => halt_idt_init(), ++ }; + + // Physical pages are mapped linearly. So is the linearly mapped virtual memory. + let base_address = RmmA::phys_to_virt(frames.base()); +diff --git a/src/memory/mod.rs b/src/memory/mod.rs +index 393ae7e..b4a1aa3 100644 +--- a/src/memory/mod.rs ++++ b/src/memory/mod.rs +@@ -754,7 +754,8 @@ pub fn init_mm(allocator: BumpAllocator) { + init_sections(allocator); + + unsafe { +- let the_frame = allocate_frame().expect("failed to allocate static zeroed frame"); ++ let the_frame = allocate_frame() ++ .expect("KERNEL MEM: failed to allocate static zeroed frame during init_mm - physical memory exhausted"); + let the_info = get_page_info(the_frame).expect("static zeroed frame had no PageInfo"); + the_info + .refcount +@@ -1027,9 +1028,13 @@ pub fn page_fault_handler( + let mut token = unsafe { CleanLockToken::new() }; + match context::memory::try_correcting_page_tables(faulting_page, mode, &mut token) { + Ok(()) => return Ok(()), +- Err(PfError::Oom) => todo!("oom"), ++ Err(PfError::Oom) => { ++ debug!("KERNEL PF: OOM during page table correction for {:#x}", faulting_address.data()); ++ } + Err(PfError::Segv | PfError::RecursionLimitExceeded) => (), +- Err(PfError::NonfatalInternalError) => todo!(), ++ Err(PfError::NonfatalInternalError) => { ++ debug!("KERNEL PF: internal error during page table correction for {:#x}", faulting_address.data()); ++ } + } + } + +@@ -1038,6 +1043,17 @@ pub fn page_fault_handler( + return Ok(()); + } + ++ debug!( ++ "KERNEL PF: addr={:#x} ip={:#x} mode={:?} kernel={} user={} write={} instr={}", ++ faulting_address.data(), ++ stack.ip(), ++ mode, ++ caused_by_kernel, ++ caused_by_user, ++ caused_by_write, ++ caused_by_instr_fetch, ++ ); ++ + Err(Segv) + } + static THE_ZEROED_FRAME: SyncUnsafeCell> = +diff --git a/src/startup/memory.rs b/src/startup/memory.rs +index 26922dd..f271200 100644 +--- a/src/startup/memory.rs ++++ b/src/startup/memory.rs +@@ -323,7 +323,16 @@ unsafe fn map_memory(areas: &[MemoryArea], mut bump_allocator: &mut Bum + } + } + +- let kernel_area = (*MEMORY_MAP.get()).kernel().unwrap(); ++ let kernel_area = match (*MEMORY_MAP.get()).kernel() { ++ Some(area) => area, ++ None => { ++ println!("FATAL: kernel memory area not found in boot memory map"); ++ println!("Cannot determine kernel base address. Halting."); ++ loop { ++ core::hint::spin_loop(); ++ } ++ } ++ }; + let kernel_base = kernel_area.start; + let kernel_size = kernel_area.end.saturating_sub(kernel_area.start); + // Map kernel at KERNEL_OFFSET +diff --git a/src/startup/mod.rs b/src/startup/mod.rs +index 8ad3cdf..86aabc2 100644 +--- a/src/startup/mod.rs ++++ b/src/startup/mod.rs +@@ -149,6 +149,15 @@ static BOOTSTRAP: spin::Once = spin::Once::new(); + pub(crate) static AP_READY: AtomicBool = AtomicBool::new(false); + static BSP_READY: AtomicBool = AtomicBool::new(false); + ++#[cold] ++fn halt_boot(message: &str) -> ! { ++ print!("{message}"); ++ println!("Kernel boot cannot continue. Halting."); ++ loop { ++ hint::spin_loop(); ++ } ++} ++ + /// This is the kernel entry point for the primary CPU. The arch crate is responsible for calling this + pub(crate) fn kmain(bootstrap: Bootstrap) -> ! { + let mut token = unsafe { CleanLockToken::new() }; +@@ -180,9 +189,7 @@ pub(crate) fn kmain(bootstrap: Bootstrap) -> ! { + context.euid = 0; + context.egid = 0; + } +- Err(err) => { +- panic!("failed to spawn userspace_init: {:?}", err); +- } ++ Err(_err) => halt_boot("FATAL: failed to spawn first userspace process userspace_init\n"), + } + + run_userspace(&mut token) +diff --git a/src/syscall/process.rs b/src/syscall/process.rs +index e83da42..084b64e 100644 +--- a/src/syscall/process.rs ++++ b/src/syscall/process.rs +@@ -33,6 +33,8 @@ pub fn exit_this_context(excp: Option, token: &mut CleanLock + let mut close_files; + let addrspace_opt; + ++ super::futex::cleanup_current_robust_futexes(token); ++ + let context_lock = context::current(); + { + let mut context = context_lock.write(token.token()); +@@ -44,6 +46,7 @@ pub fn exit_this_context(excp: Option, token: &mut CleanLock + addrspace_opt = context + .set_addr_space(None, token.downgrade()) + .and_then(|a| Arc::try_unwrap(a).ok()); ++ context.robust_list_head = None; + drop(mem::replace(&mut context.syscall_head, SyscallFrame::Dummy)); + drop(mem::replace(&mut context.syscall_tail, SyscallFrame::Dummy)); + } +@@ -244,7 +247,11 @@ pub unsafe fn usermode_bootstrap(bootstrap: &Bootstrap, token: &mut CleanLockTok + .copy_from_slice(bootstrap_slice) + .expect("failed to copy memory to bootstrap"); + +- let bootstrap_entry = u64::from_le_bytes(bootstrap_slice[0x1a..0x22].try_into().unwrap()); ++ let bootstrap_entry = if bootstrap_slice.len() >= 0x22 { ++ u64::from_le_bytes(bootstrap_slice[0x1a..0x22].try_into().unwrap()) ++ } else { ++ panic!("KERNEL BOOT: bootstrap initfs too small ({} bytes, need at least 34) - cannot determine entry point", bootstrap_slice.len()); ++ }; + debug!("Bootstrap entry point: {:X}", bootstrap_entry); + assert_ne!(bootstrap_entry, 0); + +@@ -271,23 +278,26 @@ unsafe fn bootstrap_mem(bootstrap: &crate::startup::Bootstrap) -> &'static [u8] + } + + fn insert_fd(scheme: SchemeId, number: usize, cloexec: bool, token: &mut CleanLockToken) -> usize { ++ let description = Arc::new(RwLock::new(FileDescription::new( ++ scheme, ++ number, ++ 0, ++ (O_CREAT | O_RDWR) as u32, ++ InternalFlags::empty(), ++ token, ++ ))); ++ + let current_lock = context::current(); + let mut current = current_lock.read(token.token()); +- let (context, mut token) = current.token_split(); ++ let (context, mut context_token) = current.token_split(); + context + .add_file_min( + FileDescriptor { +- description: Arc::new(RwLock::new(FileDescription { +- scheme, +- number, +- offset: 0, +- flags: (O_CREAT | O_RDWR) as u32, +- internal_flags: InternalFlags::empty(), +- })), ++ description, + cloexec, + }, + syscall::flag::UPPER_FDTBL_TAG + scheme.get(), +- &mut token, ++ &mut context_token, + ) + .expect("failed to insert fd to current context") + .get() diff --git a/sources/redbear-0.1.0/patches/P5-context-mod-sched.patch b/sources/redbear-0.1.0/patches/P5-context-mod-sched.patch new file mode 100644 index 00000000..58a60a58 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-context-mod-sched.patch @@ -0,0 +1,13 @@ +diff --git a/src/context/mod.rs b/src/context/mod.rs +index 37c73f5..4f5d60f 100644 +--- a/src/context/mod.rs ++++ b/src/context/mod.rs +@@ -22,7 +22,7 @@ use crate::{ + + use self::context::Kstack; + pub use self::{ +- context::{BorrowedHtBuf, Context, Status}, ++ context::{BorrowedHtBuf, Context, SchedPolicy, Status}, + switch::switch, + }; + diff --git a/sources/redbear-0.1.0/patches/P5-fatal-handler-diagnostics.patch b/sources/redbear-0.1.0/patches/P5-fatal-handler-diagnostics.patch new file mode 100644 index 00000000..f9fcc1c0 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-fatal-handler-diagnostics.patch @@ -0,0 +1,188 @@ +diff --git a/src/lib.rs b/src/lib.rs +--- a/src/lib.rs ++++ b/src/lib.rs +@@ -57,16 +57,151 @@ pub mod start; + pub mod sync; + +-use crate::platform::{Allocator, NEWALLOCATOR}; ++use crate::platform::{Allocator, NEWALLOCATOR, Pal, Sys}; + + #[global_allocator] + static ALLOCATOR: Allocator = NEWALLOCATOR; ++ ++const MAX_FATAL_BACKTRACE_FRAMES: usize = 16; ++const MAX_FATAL_FRAME_STRIDE: usize = 1024 * 1024; ++ ++#[inline(never)] ++fn write_process_thread_identity(w: &mut platform::FileWriter) { ++ use core::fmt::Write; ++ ++ let pid = Sys::getpid(); ++ let tid = Sys::gettid(); ++ ++ match crate::pthread::current_thread() { ++ Some(thread) => { ++ let _ = w.write_fmt(format_args!( ++ "RELIBC CONTEXT: pid={} tid={} pthread={:#x}\n", ++ pid, ++ tid, ++ thread as *const _ as usize, ++ )); ++ } ++ None => { ++ let _ = w.write_fmt(format_args!( ++ "RELIBC CONTEXT: pid={} tid={} pthread=\n", ++ pid, tid, ++ )); ++ } ++ } ++} ++ ++#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++#[inline(never)] ++fn current_frame_pointer() -> *const usize { ++ let frame: *const usize; ++ ++ #[cfg(target_arch = "x86_64")] ++ unsafe { ++ core::arch::asm!("mov {}, rbp", out(reg) frame, options(nomem, nostack, preserves_flags)); ++ } ++ ++ #[cfg(target_arch = "x86")] ++ unsafe { ++ core::arch::asm!("mov {}, ebp", out(reg) frame, options(nomem, nostack, preserves_flags)); ++ } ++ ++ #[cfg(target_arch = "aarch64")] ++ unsafe { ++ core::arch::asm!("mov {}, x29", out(reg) frame, options(nomem, nostack, preserves_flags)); ++ } ++ ++ frame ++} ++ ++#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++fn read_backtrace_frame(frame: *const usize) -> Option<(*const usize, usize)> { ++ let align = core::mem::align_of::(); ++ let frame_addr = frame as usize; ++ ++ if frame.is_null() || frame_addr % align != 0 { ++ return None; ++ } ++ ++ let next_frame = unsafe { frame.read() } as *const usize; ++ let return_address = unsafe { frame.add(1).read() }; ++ ++ if return_address == 0 { ++ return None; ++ } ++ ++ Some((next_frame, return_address)) ++} ++ ++#[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++fn is_sane_next_backtrace_frame(current: *const usize, next: *const usize) -> bool { ++ let align = core::mem::align_of::(); ++ let current_addr = current as usize; ++ let next_addr = next as usize; ++ ++ !next.is_null() ++ && next_addr % align == 0 ++ && next_addr > current_addr ++ && next_addr - current_addr <= MAX_FATAL_FRAME_STRIDE ++} ++ ++#[inline(never)] ++fn write_best_effort_backtrace(w: &mut platform::FileWriter) { ++ use core::fmt::Write; ++ ++ let _ = w.write_str("RELIBC: attempting best-effort backtrace\n"); ++ ++ #[cfg(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"))] ++ { ++ let mut frame = current_frame_pointer(); ++ let mut wrote_frame = false; ++ ++ for frame_index in 0..MAX_FATAL_BACKTRACE_FRAMES { ++ let Some((next_frame, return_address)) = read_backtrace_frame(frame) else { ++ break; ++ }; ++ ++ wrote_frame = true; ++ let _ = w.write_fmt(format_args!( ++ "RELIBC BACKTRACE[{frame_index:02}]: {:#x}\n", ++ return_address, ++ )); ++ ++ if !is_sane_next_backtrace_frame(frame, next_frame) { ++ break; ++ } ++ ++ frame = next_frame; ++ } ++ ++ if !wrote_frame { ++ let _ = w.write_str("RELIBC: backtrace attempt produced no frames\n"); ++ } ++ } ++ ++ #[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")))] ++ { ++ let _ = w.write_str("RELIBC: backtrace unavailable on this architecture\n"); ++ } ++} + + #[unsafe(no_mangle)] + pub extern "C" fn relibc_panic(pi: &::core::panic::PanicInfo) -> ! { + use core::fmt::Write; + + let mut w = platform::FileWriter::new(2); +- let _ = w.write_fmt(format_args!("RELIBC PANIC: {}\n", pi)); ++ ++ if let Some(location) = pi.location() { ++ let _ = w.write_fmt(format_args!( ++ "RELIBC PANIC LOCATION: {}:{}:{}\n", ++ location.file(), ++ location.line(), ++ location.column(), ++ )); ++ } else { ++ let _ = w.write_str("RELIBC PANIC LOCATION: \n"); ++ } ++ ++ write_process_thread_identity(&mut w); ++ let _ = w.write_fmt(format_args!("RELIBC PANIC: {}\n", pi)); + + core::intrinsics::abort(); + } +@@ -95,23 +235,28 @@ pub extern "C" fn rust_oom(layout: ::core::alloc::Layout) -> ! { + + let mut w = platform::FileWriter::new(2); + let _ = w.write_fmt(format_args!( +- "RELIBC OOM: {} bytes aligned to {} bytes\n", ++ "RELIBC OOM: {} bytes aligned to {} bytes - process will abort\n", + layout.size(), + layout.align() + )); ++ write_process_thread_identity(&mut w); ++ write_best_effort_backtrace(&mut w); + + core::intrinsics::abort(); + } + + #[cfg(not(test))] + #[allow(non_snake_case)] + #[linkage = "weak"] + #[unsafe(no_mangle)] + pub extern "C" fn _Unwind_Resume() -> ! { + use core::fmt::Write; + + let mut w = platform::FileWriter::new(2); +- let _ = w.write_str("_Unwind_Resume\n"); ++ let _ = w.write_str( ++ "RELIBC: _Unwind_Resume called - exception propagation failed, aborting\n", ++ ); ++ write_process_thread_identity(&mut w); + + core::intrinsics::abort(); + } diff --git a/sources/redbear-0.1.0/patches/P5-init-daemon-panic-hardening.patch b/sources/redbear-0.1.0/patches/P5-init-daemon-panic-hardening.patch new file mode 100644 index 00000000..efcd5d81 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-init-daemon-panic-hardening.patch @@ -0,0 +1,685 @@ +diff --git a/drivers/acpid/src/main.rs b/drivers/acpid/src/main.rs +index 059254b3..a3f5f996 100644 +--- a/drivers/acpid/src/main.rs ++++ b/drivers/acpid/src/main.rs +@@ -3,6 +3,7 @@ use std::fs::File; + use std::mem; + use std::ops::ControlFlow; + use std::os::unix::io::AsRawFd; ++use std::process; + use std::sync::Arc; + + use ::acpi::aml::op_region::{RegionHandler, RegionSpace}; +@@ -17,6 +18,58 @@ mod ec; + + mod scheme; + ++fn parse_physaddrs(sdt: &self::acpi::Sdt) -> Vec { ++ match &sdt.signature { ++ b"RSDT" => { ++ let chunks = sdt.data().chunks_exact(mem::size_of::()); ++ if !chunks.remainder().is_empty() { ++ eprintln!( ++ "acpid: malformed RSDT length {}: expected 4-byte entries", ++ sdt.data().len() ++ ); ++ process::exit(1); ++ } ++ ++ chunks ++ .map(|chunk| match <[u8; mem::size_of::()]>::try_from(chunk) { ++ Ok(bytes) => u32::from_le_bytes(bytes) as u64, ++ Err(_) => { ++ eprintln!("acpid: failed to decode RSDT physical address entry"); ++ process::exit(1); ++ } ++ }) ++ .collect() ++ } ++ b"XSDT" => { ++ let chunks = sdt.data().chunks_exact(mem::size_of::()); ++ if !chunks.remainder().is_empty() { ++ eprintln!( ++ "acpid: malformed XSDT length {}: expected 8-byte entries", ++ sdt.data().len() ++ ); ++ process::exit(1); ++ } ++ ++ chunks ++ .map(|chunk| match <[u8; mem::size_of::()]>::try_from(chunk) { ++ Ok(bytes) => u64::from_le_bytes(bytes), ++ Err(_) => { ++ eprintln!("acpid: failed to decode XSDT physical address entry"); ++ process::exit(1); ++ } ++ }) ++ .collect() ++ } ++ signature => { ++ eprintln!( ++ "acpid: expected [RX]SDT from kernel, got {:?}", ++ String::from_utf8_lossy(signature) ++ ); ++ process::exit(1); ++ } ++ } ++} ++ + fn daemon(daemon: daemon::Daemon) -> ! { + common::setup_logging( + "misc", +@@ -29,7 +82,10 @@ fn daemon(daemon: daemon::Daemon) -> ! { + log::info!("acpid start"); + + let rxsdt_raw_data: Arc<[u8]> = std::fs::read("/scheme/kernel.acpi/rxsdt") +- .expect("acpid: failed to read `/scheme/kernel.acpi/rxsdt`") ++ .unwrap_or_else(|err| { ++ eprintln!("acpid: failed to read `/scheme/kernel.acpi/rxsdt`: {err}"); ++ process::exit(1); ++ }) + .into(); + + if rxsdt_raw_data.is_empty() { +@@ -38,84 +94,84 @@ fn daemon(daemon: daemon::Daemon) -> ! { + std::process::exit(0); + } + +- let sdt = self::acpi::Sdt::new(rxsdt_raw_data).expect("acpid: failed to parse [RX]SDT"); +- +- let mut thirty_two_bit; +- let mut sixty_four_bit; +- +- let physaddrs_iter = match &sdt.signature { +- b"RSDT" => { +- thirty_two_bit = sdt +- .data() +- .chunks(mem::size_of::()) +- // TODO: With const generics, the compiler has some way of doing this for static sizes. +- .map(|chunk| <[u8; mem::size_of::()]>::try_from(chunk).unwrap()) +- .map(|chunk| u32::from_le_bytes(chunk)) +- .map(u64::from); +- +- &mut thirty_two_bit as &mut dyn Iterator +- } +- b"XSDT" => { +- sixty_four_bit = sdt +- .data() +- .chunks(mem::size_of::()) +- .map(|chunk| <[u8; mem::size_of::()]>::try_from(chunk).unwrap()) +- .map(|chunk| u64::from_le_bytes(chunk)); +- +- &mut sixty_four_bit as &mut dyn Iterator +- } +- _ => panic!("acpid: expected [RX]SDT from kernel to be either of those"), +- }; ++ let sdt = self::acpi::Sdt::new(rxsdt_raw_data).unwrap_or_else(|err| { ++ eprintln!("acpid: failed to parse [RX]SDT: {err}"); ++ process::exit(1); ++ }); ++ let physaddrs = parse_physaddrs(&sdt); + + let region_handlers: Vec<(RegionSpace, Box)> = vec![ + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + (RegionSpace::EmbeddedControl, Box::new(ec::Ec::new())), + ]; +- let acpi_context = self::acpi::AcpiContext::init(physaddrs_iter, region_handlers); ++ let acpi_context = self::acpi::AcpiContext::init(physaddrs.into_iter(), region_handlers); + + // TODO: I/O permission bitmap? + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +- common::acquire_port_io_rights().expect("acpid: failed to set I/O privilege level to Ring 3"); ++ common::acquire_port_io_rights().unwrap_or_else(|err| { ++ eprintln!("acpid: failed to set I/O privilege level to Ring 3: {err}"); ++ process::exit(1); ++ }); + + let shutdown_pipe = File::open("/scheme/kernel.acpi/kstop") +- .expect("acpid: failed to open `/scheme/kernel.acpi/kstop`"); +- +- let mut event_queue = RawEventQueue::new().expect("acpid: failed to create event queue"); +- let socket = Socket::nonblock().expect("acpid: failed to create disk scheme"); ++ .unwrap_or_else(|err| { ++ eprintln!("acpid: failed to open `/scheme/kernel.acpi/kstop`: {err}"); ++ process::exit(1); ++ }); ++ ++ let mut event_queue = RawEventQueue::new().unwrap_or_else(|err| { ++ eprintln!("acpid: failed to create event queue: {err}"); ++ process::exit(1); ++ }); ++ let socket = Socket::nonblock().unwrap_or_else(|err| { ++ eprintln!("acpid: failed to create disk scheme: {err}"); ++ process::exit(1); ++ }); + + let mut scheme = self::scheme::AcpiScheme::new(&acpi_context, &socket); + let mut handler = Blocking::new(&socket, 16); + + event_queue + .subscribe(shutdown_pipe.as_raw_fd() as usize, 0, EventFlags::READ) +- .expect("acpid: failed to register shutdown pipe for event queue"); ++ .unwrap_or_else(|err| { ++ eprintln!("acpid: failed to register shutdown pipe for event queue: {err}"); ++ process::exit(1); ++ }); + event_queue + .subscribe(socket.inner().raw(), 1, EventFlags::READ) +- .expect("acpid: failed to register scheme socket for event queue"); ++ .unwrap_or_else(|err| { ++ eprintln!("acpid: failed to register scheme socket for event queue: {err}"); ++ process::exit(1); ++ }); + + register_sync_scheme(&socket, "acpi", &mut scheme) +- .expect("acpid: failed to register acpi scheme to namespace"); ++ .unwrap_or_else(|err| { ++ eprintln!("acpid: failed to register acpi scheme to namespace: {err}"); ++ process::exit(1); ++ }); + + daemon.ready(); + +- libredox::call::setrens(0, 0).expect("acpid: failed to enter null namespace"); ++ libredox::call::setrens(0, 0).unwrap_or_else(|err| { ++ eprintln!("acpid: failed to enter null namespace: {err}"); ++ process::exit(1); ++ }); + + let mut mounted = true; + while mounted { +- let Some(event) = event_queue +- .next() +- .transpose() +- .expect("acpid: failed to read event file") +- else { ++ let Some(event) = event_queue.next().transpose().unwrap_or_else(|err| { ++ eprintln!("acpid: failed to read event file: {err}"); ++ process::exit(1); ++ }) else { + break; + }; + + if event.fd == socket.inner().raw() { + loop { +- match handler +- .process_requests_nonblocking(&mut scheme) +- .expect("acpid: failed to process requests") +- { ++ match handler.process_requests_nonblocking(&mut scheme).unwrap_or_else(|err| { ++ eprintln!("acpid: failed to process requests: {err}"); ++ process::exit(1); ++ }) { + ControlFlow::Continue(()) => {} + ControlFlow::Break(()) => break, + } +@@ -134,7 +190,8 @@ fn daemon(daemon: daemon::Daemon) -> ! { + + acpi_context.set_global_s_state(5); + +- unreachable!("System should have shut down before this is entered"); ++ eprintln!("acpid: system did not shut down after requesting S5"); ++ process::exit(1); + } + + fn main() { +diff --git a/drivers/pcid/src/main.rs b/drivers/pcid/src/main.rs +index 61cd9a78..18ee18ab 100644 +--- a/drivers/pcid/src/main.rs ++++ b/drivers/pcid/src/main.rs +@@ -3,6 +3,7 @@ + #![feature(non_exhaustive_omitted_patterns_lint)] + + use std::collections::BTreeMap; ++use std::process; + + use log::{debug, info, trace, warn}; + use pci_types::capability::PciCapability; +@@ -42,7 +43,16 @@ fn handle_parsed_header( + continue; + } + match endpoint_header.bar(i, pcie) { +- Some(TyBar::Io { port }) => bars[i as usize] = PciBar::Port(port.try_into().unwrap()), ++ Some(TyBar::Io { port }) => match port.try_into() { ++ Ok(port) => bars[i as usize] = PciBar::Port(port), ++ Err(_) => { ++ warn!( ++ "pcid: skipping invalid I/O BAR port {port:#x} on {}", ++ endpoint_header.header().address() ++ ); ++ bars[i as usize] = PciBar::None; ++ } ++ }, + Some(TyBar::Memory32 { + address, + size, +@@ -251,7 +261,10 @@ fn daemon(daemon: daemon::Daemon) -> ! { + info!("PCI SG-BS:DV.F VEND:DEVI CL.SC.IN.RV"); + + let mut scheme = scheme::PciScheme::new(pcie); +- let socket = redox_scheme::Socket::create().expect("failed to open pci scheme socket"); ++ let socket = redox_scheme::Socket::create().unwrap_or_else(|err| { ++ eprintln!("pcid: failed to open pci scheme socket: {err}"); ++ process::exit(1); ++ }); + let handler = Blocking::new(&socket, 16); + + { +@@ -259,17 +272,27 @@ fn daemon(daemon: daemon::Daemon) -> ! { + Ok(register_pci) => { + let access_id = scheme.access(); + +- let access_fd = socket +- .create_this_scheme_fd(0, access_id, syscall::O_RDWR, 0) +- .expect("failed to issue this resource"); +- let access_bytes = access_fd.to_ne_bytes(); +- let _ = register_pci +- .call_wo( +- &access_bytes, +- syscall::CallFlags::WRITE | syscall::CallFlags::FD, +- &[], +- ) +- .expect("failed to send pci_fd to acpid"); ++ match socket.create_this_scheme_fd(0, access_id, syscall::O_RDWR, 0) { ++ Ok(access_fd) => { ++ let access_bytes = access_fd.to_ne_bytes(); ++ if let Err(err) = register_pci.call_wo( ++ &access_bytes, ++ syscall::CallFlags::WRITE | syscall::CallFlags::FD, ++ &[], ++ ) { ++ warn!( ++ "pcid: failed to send pci_fd to acpid (error: {}). Running without ACPI integration.", ++ err ++ ); ++ } ++ } ++ Err(err) => { ++ warn!( ++ "pcid: failed to issue acpid registration resource (error: {}). Running without ACPI integration.", ++ err ++ ); ++ } ++ } + } + Err(err) => { + if err.errno() == libredox::errno::ENODEV { +@@ -305,13 +328,20 @@ fn daemon(daemon: daemon::Daemon) -> ! { + debug!("Enumeration complete, now starting pci scheme"); + + register_sync_scheme(&socket, "pci", &mut scheme) +- .expect("failed to register pci scheme to namespace"); ++ .unwrap_or_else(|err| { ++ eprintln!("pcid: failed to register pci scheme to namespace: {err}"); ++ process::exit(1); ++ }); + + let _ = daemon.ready(); + +- handler +- .process_requests_blocking(scheme) +- .expect("pcid: failed to process requests"); ++ match handler.process_requests_blocking(scheme) { ++ Ok(never) => match never {}, ++ Err(err) => { ++ eprintln!("pcid: failed to process requests: {err}"); ++ process::exit(1); ++ } ++ } + } + + fn scan_device( +@@ -323,6 +353,7 @@ fn scan_device( + ) { + for func_num in 0..8 { + let header = TyPciHeader::new(PciAddress::new(0, bus_num, dev_num, func_num)); ++ let header_address = header.address(); + + let (vendor_id, device_id) = header.id(pcie); + if vendor_id == 0xffff && device_id == 0xffff { +@@ -344,21 +375,40 @@ fn scan_device( + revision, + }; + +- info!("PCI {} {}", header.address(), full_device_id.display()); ++ info!("PCI {} {}", header_address, full_device_id.display()); + + let has_multiple_functions = header.has_multiple_functions(pcie); + + match header.header_type(pcie) { + HeaderType::Endpoint => { ++ let endpoint_header = match EndpointHeader::from_header(header, pcie) { ++ Some(endpoint_header) => endpoint_header, ++ None => { ++ warn!( ++ "pcid: failed to parse endpoint header for {}", ++ header_address, ++ ); ++ continue; ++ } ++ }; + handle_parsed_header( + pcie, + tree, +- EndpointHeader::from_header(header, pcie).unwrap(), ++ endpoint_header, + full_device_id, + ); + } + HeaderType::PciPciBridge => { +- let bridge_header = PciPciBridgeHeader::from_header(header, pcie).unwrap(); ++ let bridge_header = match PciPciBridgeHeader::from_header(header, pcie) { ++ Some(bridge_header) => bridge_header, ++ None => { ++ warn!( ++ "pcid: failed to parse bridge header for {}", ++ header_address, ++ ); ++ continue; ++ } ++ }; + bus_nums.push(bridge_header.secondary_bus_number(pcie)); + } + ty => { +diff --git a/init/src/main.rs b/init/src/main.rs +index 5682cf44..cd270a6e 100644 +--- a/init/src/main.rs ++++ b/init/src/main.rs +@@ -1,6 +1,7 @@ + use std::collections::BTreeMap; + use std::ffi::OsString; + use std::path::Path; ++use std::time::Duration; + use std::{env, fs, io}; + + use libredox::flag::{O_RDONLY, O_WRONLY}; +@@ -166,19 +167,36 @@ fn main() { + } + }; + for entry in entries { ++ let Some(unit_name) = entry.file_name().and_then(|name| name.to_str()) else { ++ eprintln!( ++ "init: skipping config entry with invalid filename: {}", ++ entry.display() ++ ); ++ continue; ++ }; + scheduler.schedule_start_and_report_errors( + &mut unit_store, +- UnitId(entry.file_name().unwrap().to_str().unwrap().to_owned()), ++ UnitId(unit_name.to_owned()), + ); + } + }; + + scheduler.step(&mut unit_store, &mut init_config); + +- libredox::call::setrens(0, 0).expect("init: failed to enter null namespace"); ++ if let Err(err) = libredox::call::setrens(0, 0) { ++ eprintln!("init: failed to enter null namespace: {err}"); ++ std::process::exit(1); ++ } + + loop { + let mut status = 0; +- libredox::call::waitpid(0, &mut status, 0).unwrap(); ++ match libredox::call::waitpid(0, &mut status, 0) { ++ Ok(_) => {} ++ Err(err) if err.errno() == libredox::errno::EINTR => continue, ++ Err(err) => { ++ eprintln!("init: waitpid failed: {err}"); ++ std::thread::sleep(Duration::from_millis(100)); ++ } ++ } + } + } +diff --git a/init/src/scheduler.rs b/init/src/scheduler.rs +index d42a4e57..3b8d10b0 100644 +--- a/init/src/scheduler.rs ++++ b/init/src/scheduler.rs +@@ -43,7 +43,10 @@ impl Scheduler { + ) { + let loaded_units = unit_store.load_units(unit_id.clone(), errors); + for unit_id in loaded_units { +- if !unit_store.unit(&unit_id).conditions_met() { ++ if unit_store ++ .try_unit(&unit_id) ++ .is_ok_and(|unit| !unit.conditions_met()) ++ { + continue; + } + +@@ -62,7 +65,10 @@ impl Scheduler { + + match job.kind { + JobKind::Start => { +- let unit = unit_store.unit_mut(&job.unit); ++ let Ok(unit) = unit_store.try_unit_mut(&job.unit) else { ++ eprintln!("init: unit {} not found in store, skipping", job.unit.0); ++ continue 'a; ++ }; + + for dep in &unit.info.requires_weak { + for pending_job in &self.pending { +diff --git a/init/src/service.rs b/init/src/service.rs +index ed0023e9..827ae275 100644 +--- a/init/src/service.rs ++++ b/init/src/service.rs +@@ -3,13 +3,24 @@ use std::ffi::OsString; + use std::io::Read; + use std::os::fd::{AsRawFd, OwnedFd}; + use std::os::unix::process::CommandExt; +-use std::process::Command; ++use std::process::{Child, Command}; + use std::{env, io}; + + use serde::Deserialize; + + use crate::script::subst_env; + ++fn terminate_child(child: &mut Child, command: &str) { ++ if let Err(err) = child.kill() { ++ if err.kind() != io::ErrorKind::InvalidInput { ++ eprintln!("init: failed to terminate {command}: {err}"); ++ } ++ } ++ if let Err(err) = child.wait() { ++ eprintln!("init: failed to reap {command}: {err}"); ++ } ++} ++ + #[derive(Clone, Debug, Deserialize)] + #[serde(deny_unknown_fields)] + pub struct Service { +@@ -37,7 +48,8 @@ pub enum ServiceType { + impl Service { + pub fn spawn(&self, base_envs: &BTreeMap) { + let mut command = Command::new(&self.cmd); +- command.args(self.args.iter().map(|arg| subst_env(arg))); ++ let resolved_args: Vec = self.args.iter().map(|arg| subst_env(arg)).collect(); ++ command.args(&resolved_args); + command.env_clear(); + for env in &self.inherit_envs { + if let Some(value) = env::var_os(env) { +@@ -45,14 +57,25 @@ impl Service { + } + } + command.envs(base_envs).envs(&self.envs); ++ let command_display = if resolved_args.is_empty() { ++ self.cmd.clone() ++ } else { ++ format!("{} {}", self.cmd, resolved_args.join(" ")) ++ }; + +- let (mut read_pipe, write_pipe) = io::pipe().unwrap(); ++ let (mut read_pipe, write_pipe) = match io::pipe().map_err(|err| { ++ eprintln!("init: failed to create readiness pipe for {command_display}: {err}"); ++ err ++ }) { ++ Ok(pair) => pair, ++ Err(_) => return, ++ }; + unsafe { pass_fd(&mut command, "INIT_NOTIFY", write_pipe.into()) }; + + let mut child = match command.spawn() { + Ok(child) => child, + Err(err) => { +- eprintln!("init: failed to execute {:?}: {}", command, err); ++ eprintln!("init: failed to execute {command_display}: {err}"); + return; + } + }; +@@ -61,10 +84,10 @@ impl Service { + ServiceType::Notify => match read_pipe.read_exact(&mut [0]) { + Ok(()) => {} + Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => { +- eprintln!("init: {command:?} exited without notifying readiness"); ++ eprintln!("init: {command_display} exited without notifying readiness"); + } + Err(err) => { +- eprintln!("init: failed to wait for {command:?}: {err}"); ++ eprintln!("init: failed to wait for {command_display}: {err}"); + } + }, + ServiceType::Scheme(scheme) => { +@@ -80,7 +103,7 @@ impl Service { + errno: syscall::EINTR, + }) => continue, + Ok(0) => { +- eprintln!("init: {command:?} exited without notifying readiness"); ++ eprintln!("init: {command_display} exited without notifying readiness"); + return; + } + Ok(1) => break, +@@ -89,26 +112,40 @@ impl Service { + return; + } + Err(err) => { +- eprintln!("init: failed to wait for {command:?}: {err}"); ++ eprintln!("init: failed to wait for {command_display}: {err}"); + return; + } + } + } + +- let current_namespace_fd = libredox::call::getns().expect("TODO"); +- libredox::call::register_scheme_to_ns(current_namespace_fd, scheme, new_fd) +- .expect("TODO"); ++ let current_namespace_fd = match libredox::call::getns() { ++ Ok(fd) => fd, ++ Err(err) => { ++ eprintln!("init: failed to get current namespace for {command_display}: {err}"); ++ terminate_child(&mut child, &command_display); ++ return; ++ } ++ }; ++ if let Err(err) = ++ libredox::call::register_scheme_to_ns(current_namespace_fd, scheme, new_fd) ++ { ++ eprintln!( ++ "init: failed to register scheme {scheme:?} for {command_display}: {err}" ++ ); ++ terminate_child(&mut child, &command_display); ++ return; ++ } + } + ServiceType::Oneshot => { + drop(read_pipe); + match child.wait() { + Ok(exit_status) => { + if !exit_status.success() { +- eprintln!("init: {command:?} failed with {exit_status}"); ++ eprintln!("init: {command_display} failed with {exit_status}"); + } + } + Err(err) => { +- eprintln!("init: failed to wait for {:?}: {}", command, err) ++ eprintln!("init: failed to wait for {command_display}: {err}") + } + } + } +diff --git a/init/src/unit.rs b/init/src/unit.rs +index 98053cb2..bd998394 100644 +--- a/init/src/unit.rs ++++ b/init/src/unit.rs +@@ -23,8 +23,14 @@ impl UnitStore { + } + + pub fn set_runtime_target(&mut self, unit_id: UnitId) { +- assert!(self.runtime_target.is_none()); +- assert!(self.units.contains_key(&unit_id)); ++ if self.runtime_target.is_some() { ++ eprintln!("init: runtime target already set, ignoring {}", unit_id.0); ++ return; ++ } ++ if !self.units.contains_key(&unit_id) { ++ eprintln!("init: runtime target {} not found in unit store", unit_id.0); ++ return; ++ } + self.runtime_target = Some(unit_id); + } + +@@ -85,8 +91,15 @@ impl UnitStore { + let unit = self.load_single_unit(unit_id, errors); + if let Some(unit) = unit { + loaded_units.push(unit.clone()); +- for dep in &self.unit(&unit).info.requires_weak { +- pending_units.push(dep.clone()); ++ match self.try_unit(&unit) { ++ Ok(unit) => { ++ for dep in &unit.info.requires_weak { ++ pending_units.push(dep.clone()); ++ } ++ } ++ Err(err) => { ++ errors.push(err); ++ } + } + } + } +@@ -94,12 +107,34 @@ impl UnitStore { + loaded_units + } + ++ pub fn try_unit(&self, unit: &UnitId) -> Result<&Unit, String> { ++ self.units ++ .get(unit) ++ .ok_or_else(|| format!("unit {} not found in store", unit.0)) ++ } ++ ++ // Keep the legacy infallible accessors for compatibility while scheduler/load paths ++ // use the fallible helpers to avoid panicking on missing units. ++ #[allow(dead_code)] + pub fn unit(&self, unit: &UnitId) -> &Unit { +- self.units.get(unit).unwrap() ++ self.try_unit(unit).unwrap_or_else(|err| { ++ eprintln!("init: {err}"); ++ std::process::exit(1); ++ }) ++ } ++ ++ pub fn try_unit_mut(&mut self, unit: &UnitId) -> Result<&mut Unit, String> { ++ self.units ++ .get_mut(unit) ++ .ok_or_else(|| format!("unit {} not found in store", unit.0)) + } + ++ #[allow(dead_code)] + pub fn unit_mut(&mut self, unit: &UnitId) -> &mut Unit { +- self.units.get_mut(unit).unwrap() ++ self.try_unit_mut(unit).unwrap_or_else(|err| { ++ eprintln!("init: {err}"); ++ std::process::exit(1); ++ }) + } + } + +@@ -180,7 +215,7 @@ impl Unit { + ) -> io::Result { + let config = fs::read_to_string(config_path)?; + +- let Some(ext) = config_path.extension().map(|ext| ext.to_str().unwrap()) else { ++ let Some(ext) = config_path.extension().and_then(|ext| ext.to_str()) else { + let script = Script::from_str(&config, errors)?; + return Ok(Unit { + id, diff --git a/sources/redbear-0.1.0/patches/P5-init-supervisor-restart.patch b/sources/redbear-0.1.0/patches/P5-init-supervisor-restart.patch new file mode 100644 index 00000000..a5325b59 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-init-supervisor-restart.patch @@ -0,0 +1,633 @@ +diff --git a/drivers/acpid/src/acpi.rs b/drivers/acpid/src/acpi.rs +index 94a1eb17e..3521bfc7b 100644 +--- a/drivers/acpid/src/acpi.rs ++++ b/drivers/acpid/src/acpi.rs +@@ -136,9 +136,10 @@ impl Sdt { + let header = match plain::from_bytes::(&slice) { + Ok(header) => header, + Err(plain::Error::TooShort) => return Err(InvalidSdtError::InvalidSize), +- Err(plain::Error::BadAlignment) => panic!( +- "plain::from_bytes failed due to alignment, but SdtHeader is #[repr(packed)]!" +- ), ++ Err(plain::Error::BadAlignment) => { ++ log::error!("acpid: plain::from_bytes failed due to alignment, but SdtHeader is #[repr(packed)] - internal inconsistency"); ++ return Err(InvalidSdtError::InvalidSize); ++ } + }; + + if header.length() != slice.len() { +diff --git a/drivers/acpid/src/main.rs b/drivers/acpid/src/main.rs +index 059254b3e..8f99f2ea9 100644 +--- a/drivers/acpid/src/main.rs ++++ b/drivers/acpid/src/main.rs +@@ -28,9 +28,13 @@ fn daemon(daemon: daemon::Daemon) -> ! { + + log::info!("acpid start"); + +- let rxsdt_raw_data: Arc<[u8]> = std::fs::read("/scheme/kernel.acpi/rxsdt") +- .expect("acpid: failed to read `/scheme/kernel.acpi/rxsdt`") +- .into(); ++ let rxsdt_raw_data: Arc<[u8]> = match std::fs::read("/scheme/kernel.acpi/rxsdt") { ++ Ok(data) => data.into(), ++ Err(err) => { ++ log::error!("acpid: failed to read `/scheme/kernel.acpi/rxsdt`: {}", err); ++ std::process::exit(1); ++ } ++ }; + + if rxsdt_raw_data.is_empty() { + log::info!("System doesn't use ACPI"); +@@ -38,7 +42,13 @@ fn daemon(daemon: daemon::Daemon) -> ! { + std::process::exit(0); + } + +- let sdt = self::acpi::Sdt::new(rxsdt_raw_data).expect("acpid: failed to parse [RX]SDT"); ++ let sdt = match self::acpi::Sdt::new(rxsdt_raw_data) { ++ Ok(sdt) => sdt, ++ Err(err) => { ++ log::error!("acpid: failed to parse [RX]SDT: {:?}", err); ++ std::process::exit(1); ++ } ++ }; + + let mut thirty_two_bit; + let mut sixty_four_bit; +@@ -64,7 +74,10 @@ fn daemon(daemon: daemon::Daemon) -> ! { + + &mut sixty_four_bit as &mut dyn Iterator + } +- _ => panic!("acpid: expected [RX]SDT from kernel to be either of those"), ++ _ => { ++ log::error!("acpid: expected [RX]SDT from kernel to be RSDT or XSDT"); ++ std::process::exit(1); ++ } + }; + + let region_handlers: Vec<(RegionSpace, Box)> = vec![ +@@ -75,49 +88,84 @@ fn daemon(daemon: daemon::Daemon) -> ! { + + // TODO: I/O permission bitmap? + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +- common::acquire_port_io_rights().expect("acpid: failed to set I/O privilege level to Ring 3"); ++ if let Err(err) = common::acquire_port_io_rights() { ++ log::error!("acpid: failed to set I/O privilege level to Ring 3: {:?}", err); ++ std::process::exit(1); ++ } + +- let shutdown_pipe = File::open("/scheme/kernel.acpi/kstop") +- .expect("acpid: failed to open `/scheme/kernel.acpi/kstop`"); ++ let shutdown_pipe = match File::open("/scheme/kernel.acpi/kstop") { ++ Ok(f) => f, ++ Err(err) => { ++ log::error!("acpid: failed to open `/scheme/kernel.acpi/kstop`: {}", err); ++ std::process::exit(1); ++ } ++ }; + +- let mut event_queue = RawEventQueue::new().expect("acpid: failed to create event queue"); +- let socket = Socket::nonblock().expect("acpid: failed to create disk scheme"); ++ let mut event_queue = match RawEventQueue::new() { ++ Ok(q) => q, ++ Err(err) => { ++ log::error!("acpid: failed to create event queue: {:?}", err); ++ std::process::exit(1); ++ } ++ }; ++ let socket = match Socket::nonblock() { ++ Ok(s) => s, ++ Err(err) => { ++ log::error!("acpid: failed to create scheme socket: {:?}", err); ++ std::process::exit(1); ++ } ++ }; + + let mut scheme = self::scheme::AcpiScheme::new(&acpi_context, &socket); + let mut handler = Blocking::new(&socket, 16); + +- event_queue ++ if let Err(err) = event_queue + .subscribe(shutdown_pipe.as_raw_fd() as usize, 0, EventFlags::READ) +- .expect("acpid: failed to register shutdown pipe for event queue"); +- event_queue ++ { ++ log::error!("acpid: failed to register shutdown pipe for event queue: {:?}", err); ++ std::process::exit(1); ++ } ++ if let Err(err) = event_queue + .subscribe(socket.inner().raw(), 1, EventFlags::READ) +- .expect("acpid: failed to register scheme socket for event queue"); ++ { ++ log::error!("acpid: failed to register scheme socket for event queue: {:?}", err); ++ std::process::exit(1); ++ } + +- register_sync_scheme(&socket, "acpi", &mut scheme) +- .expect("acpid: failed to register acpi scheme to namespace"); ++ if let Err(err) = register_sync_scheme(&socket, "acpi", &mut scheme) { ++ log::error!("acpid: failed to register acpi scheme to namespace: {:?}", err); ++ std::process::exit(1); ++ } + + daemon.ready(); + +- libredox::call::setrens(0, 0).expect("acpid: failed to enter null namespace"); ++ if let Err(err) = libredox::call::setrens(0, 0) { ++ log::error!("acpid: failed to enter null namespace: {}", err); ++ std::process::exit(1); ++ } + + let mut mounted = true; + while mounted { +- let Some(event) = event_queue +- .next() +- .transpose() +- .expect("acpid: failed to read event file") +- else { +- break; ++ let event = match event_queue.next().transpose() { ++ Ok(Some(ev)) => ev, ++ Ok(None) => break, ++ Err(err) => { ++ log::error!("acpid: failed to read event file: {:?}", err); ++ break; ++ } + }; + + if event.fd == socket.inner().raw() { + loop { +- match handler +- .process_requests_nonblocking(&mut scheme) +- .expect("acpid: failed to process requests") +- { +- ControlFlow::Continue(()) => {} +- ControlFlow::Break(()) => break, ++ match handler.process_requests_nonblocking(&mut scheme) { ++ Ok(flow) => match flow { ++ ControlFlow::Continue(()) => {} ++ ControlFlow::Break(()) => break, ++ }, ++ Err(err) => { ++ log::error!("acpid: failed to process requests: {:?}", err); ++ break; ++ } + } + } + } else if event.fd == shutdown_pipe.as_raw_fd() as usize { +diff --git a/drivers/pcid/src/main.rs b/drivers/pcid/src/main.rs +index 61cd9a787..cad33114b 100644 +--- a/drivers/pcid/src/main.rs ++++ b/drivers/pcid/src/main.rs +@@ -4,7 +4,7 @@ + + use std::collections::BTreeMap; + +-use log::{debug, info, trace, warn}; ++use log::{debug, error, info, trace, warn}; + use pci_types::capability::PciCapability; + use pci_types::{ + Bar as TyBar, CommandRegister, EndpointHeader, HeaderType, PciAddress, +@@ -259,17 +259,25 @@ fn daemon(daemon: daemon::Daemon) -> ! { + Ok(register_pci) => { + let access_id = scheme.access(); + +- let access_fd = socket ++ let access_fd = match socket + .create_this_scheme_fd(0, access_id, syscall::O_RDWR, 0) +- .expect("failed to issue this resource"); +- let access_bytes = access_fd.to_ne_bytes(); +- let _ = register_pci +- .call_wo( ++ { ++ Ok(fd) => Some(fd), ++ Err(err) => { ++ warn!("pcid: failed to issue acpi resource fd: {:?}", err); ++ None ++ } ++ }; ++ if let Some(access_fd) = access_fd { ++ let access_bytes = access_fd.to_ne_bytes(); ++ if let Err(err) = register_pci.call_wo( + &access_bytes, + syscall::CallFlags::WRITE | syscall::CallFlags::FD, + &[], +- ) +- .expect("failed to send pci_fd to acpid"); ++ ) { ++ warn!("pcid: failed to send pci_fd to acpid: {:?}", err); ++ } ++ } + } + Err(err) => { + if err.errno() == libredox::errno::ENODEV { +@@ -304,14 +312,17 @@ fn daemon(daemon: daemon::Daemon) -> ! { + } + debug!("Enumeration complete, now starting pci scheme"); + +- register_sync_scheme(&socket, "pci", &mut scheme) +- .expect("failed to register pci scheme to namespace"); ++ if let Err(err) = register_sync_scheme(&socket, "pci", &mut scheme) { ++ error!("pcid: failed to register pci scheme to namespace: {:?}", err); ++ std::process::exit(1); ++ } + + let _ = daemon.ready(); + +- handler +- .process_requests_blocking(scheme) +- .expect("pcid: failed to process requests"); ++ handler.process_requests_blocking(scheme).unwrap_or_else(|err| { ++ error!("pcid: failed to process requests: {:?}", err); ++ std::process::exit(1); ++ }); + } + + fn scan_device( +diff --git a/init/src/main.rs b/init/src/main.rs +index 5682cf445..72c97f53c 100644 +--- a/init/src/main.rs ++++ b/init/src/main.rs +@@ -166,19 +166,29 @@ fn main() { + } + }; + for entry in entries { ++ let Some(file_name) = entry.file_name().and_then(|n| n.to_str()) else { ++ eprintln!("init: skipping entry with invalid filename: {}", entry.display()); ++ continue; ++ }; + scheduler.schedule_start_and_report_errors( + &mut unit_store, +- UnitId(entry.file_name().unwrap().to_str().unwrap().to_owned()), ++ UnitId(file_name.to_owned()), + ); + } + }; + + scheduler.step(&mut unit_store, &mut init_config); + +- libredox::call::setrens(0, 0).expect("init: failed to enter null namespace"); ++ if let Err(err) = libredox::call::setrens(0, 0) { ++ eprintln!("init: failed to enter null namespace: {}", err); ++ return; ++ } + + loop { + let mut status = 0; +- libredox::call::waitpid(0, &mut status, 0).unwrap(); ++ match libredox::call::waitpid(0, &mut status, 0) { ++ Ok(()) => {} ++ Err(err) => eprintln!("init: waitpid error: {}", err), ++ } + } + } +diff --git a/init/src/scheduler.rs b/init/src/scheduler.rs +index d42a4e570..333e0e20e 100644 +--- a/init/src/scheduler.rs ++++ b/init/src/scheduler.rs +@@ -1,7 +1,16 @@ + use std::collections::VecDeque; ++use std::io::Read; ++use std::os::fd::AsRawFd; ++use std::os::unix::process::CommandExt; ++use std::process::Command; ++use std::time::Duration; ++use std::{env, io}; + + use crate::InitConfig; +-use crate::unit::{Unit, UnitId, UnitKind, UnitStore}; ++use crate::service::ServiceType; ++use crate::unit::{RestartPolicy, UnitId, UnitKind, UnitStore}; ++ ++const MAX_DEPENDENCY_WAIT_RETRIES: u32 = 1000; + + pub struct Scheduler { + pending: VecDeque, +@@ -10,10 +19,12 @@ pub struct Scheduler { + struct Job { + unit: UnitId, + kind: JobKind, ++ dep_retries: u32, + } + + enum JobKind { + Start, ++ Restart { backoff: Duration }, + } + + impl Scheduler { +@@ -50,37 +61,97 @@ impl Scheduler { + self.pending.push_back(Job { + unit: unit_id, + kind: JobKind::Start, ++ dep_retries: 0, + }); + } + } + + pub fn step(&mut self, unit_store: &mut UnitStore, init_config: &mut InitConfig) { + 'a: loop { +- let Some(job) = self.pending.pop_front() else { ++ let Some(mut job) = self.pending.pop_front() else { + return; + }; + + match job.kind { + JobKind::Start => { +- let unit = unit_store.unit_mut(&job.unit); ++ let unit = unit_store.unit(&job.unit); + ++ let timeout_secs = unit.info.dependency_timeout_secs; ++ let mut deps_pending = false; + for dep in &unit.info.requires_weak { + for pending_job in &self.pending { + if &pending_job.unit == dep { +- self.pending.push_back(job); +- continue 'a; ++ deps_pending = true; ++ break; + } + } ++ if deps_pending { ++ break; ++ } + } + +- run(unit, init_config); ++ if deps_pending { ++ if timeout_secs > 0 { ++ job.dep_retries += 1; ++ let max_retries = timeout_secs * 100; // ~10ms per retry ++ if job.dep_retries > max_retries as u32 { ++ eprintln!( ++ "init: {}: dependency timeout after {}s, failing", ++ job.unit.0, timeout_secs ++ ); ++ continue; ++ } ++ } else if job.dep_retries >= MAX_DEPENDENCY_WAIT_RETRIES { ++ eprintln!( ++ "init: {}: dependency wait exceeded {} retries, failing", ++ job.unit.0, MAX_DEPENDENCY_WAIT_RETRIES ++ ); ++ continue; ++ } ++ job.dep_retries += 1; ++ self.pending.push_back(job); ++ continue 'a; ++ } ++ ++ if let Err(restart) = run(unit_store, &job.unit, init_config) { ++ if let Some(backoff) = restart { ++ self.pending.push_back(Job { ++ unit: job.unit.clone(), ++ kind: JobKind::Restart { backoff }, ++ dep_retries: 0, ++ }); ++ } ++ } ++ } ++ JobKind::Restart { backoff } => { ++ std::thread::sleep(backoff); ++ let next_backoff = (backoff * 2).min(Duration::from_secs(60)); ++ if let Err(restart) = run(unit_store, &job.unit, init_config) { ++ if let Some(_next) = restart { ++ self.pending.push_back(Job { ++ unit: job.unit, ++ kind: JobKind::Restart { ++ backoff: next_backoff, ++ }, ++ dep_retries: 0, ++ }); ++ } ++ } + } + } + } + } + } + +-fn run(unit: &mut Unit, config: &mut InitConfig) { ++fn run( ++ unit_store: &UnitStore, ++ unit_id: &UnitId, ++ config: &mut InitConfig, ++) -> Result<(), Option> { ++ let unit = unit_store.unit(unit_id); ++ ++ let restart_policy = unit.info.restart; ++ + match &unit.kind { + UnitKind::LegacyScript { script } => { + for cmd in script.clone() { +@@ -89,11 +160,12 @@ fn run(unit: &mut Unit, config: &mut InitConfig) { + } + cmd.run(config); + } ++ Ok(()) + } + UnitKind::Service { service } => { + if config.skip_cmd.contains(&service.cmd) { + eprintln!("Skipping '{} {}'", service.cmd, service.args.join(" ")); +- return; ++ return Ok(()); + } + if config.log_debug { + eprintln!( +@@ -102,7 +174,44 @@ fn run(unit: &mut Unit, config: &mut InitConfig) { + service.cmd, + ); + } +- service.spawn(&config.envs); ++ ++ let mut command = Command::new(&service.cmd); ++ command.args(&service.args); ++ command.env_clear(); ++ for env in &service.inherit_envs { ++ if let Some(value) = env::var_os(env) { ++ command.env(env, value); ++ } ++ } ++ command.envs(config.envs.iter().map(|(k, v)| (k.as_str(), v.as_os_str()))); ++ ++ let (read_pipe, write_pipe) = match io::pipe() { ++ Ok(p) => p, ++ Err(err) => { ++ eprintln!("init: pipe failed for {}: {}", service.cmd, err); ++ return Err(restart_signal(restart_policy)); ++ } ++ }; ++ ++ let write_fd: std::os::fd::OwnedFd = write_pipe.into(); ++ unsafe { ++ command.env("INIT_NOTIFY", format!("{}", write_fd.as_raw_fd())); ++ command.pre_exec(move || { ++ if unsafe { libc::fcntl(write_fd.as_raw_fd(), libc::F_SETFD, 0) } == -1 { ++ Err(io::Error::last_os_error()) ++ } else { ++ Ok(()) ++ } ++ }); ++ } ++ ++ let status = service_spawn_status(read_pipe, command, &service.type_, &service.cmd); ++ ++ match status { ++ SpawnStatus::Success => Ok(()), ++ SpawnStatus::Failed => Err(restart_signal(restart_policy)), ++ SpawnStatus::Async => Ok(()), ++ } + } + UnitKind::Target {} => { + if config.log_debug { +@@ -111,6 +220,113 @@ fn run(unit: &mut Unit, config: &mut InitConfig) { + unit.info.description.as_ref().unwrap_or(&unit.id.0), + ); + } ++ Ok(()) ++ } ++ } ++} ++ ++enum SpawnStatus { ++ Success, ++ Failed, ++ Async, ++} ++ ++fn restart_signal(policy: RestartPolicy) -> Option { ++ match policy { ++ RestartPolicy::No => None, ++ RestartPolicy::OnFailure | RestartPolicy::Always => Some(Duration::from_secs(1)), ++ } ++} ++ ++fn service_spawn_status( ++ mut read_pipe: impl Read + AsRawFd, ++ mut command: Command, ++ service_type: &ServiceType, ++ cmd: &str, ++) -> SpawnStatus { ++ let mut child = match command.spawn() { ++ Ok(child) => child, ++ Err(err) => { ++ eprintln!("init: failed to execute {}: {}", cmd, err); ++ return SpawnStatus::Failed; ++ } ++ }; ++ ++ match service_type { ++ ServiceType::Notify => match read_pipe.read_exact(&mut [0]) { ++ Ok(()) => SpawnStatus::Success, ++ Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => { ++ eprintln!("init: {cmd} exited without notifying readiness"); ++ SpawnStatus::Failed ++ } ++ Err(err) => { ++ eprintln!("init: failed to wait for {cmd}: {err}"); ++ SpawnStatus::Failed ++ } ++ }, ++ ServiceType::Scheme(scheme) => { ++ let scheme = scheme.clone(); ++ let mut new_fd = usize::MAX; ++ let res = loop { ++ match syscall::call_ro( ++ read_pipe.as_raw_fd() as usize, ++ unsafe { plain::as_mut_bytes(&mut new_fd) }, ++ syscall::CallFlags::FD | syscall::CallFlags::FD_UPPER, ++ &[], ++ ) { ++ Err(syscall::Error { ++ errno: syscall::EINTR, ++ }) => continue, ++ Ok(0) => break SpawnStatus::Failed, ++ Ok(1) => break SpawnStatus::Success, ++ Ok(n) => { ++ eprintln!("init: incorrect amount of fds {n} returned from {cmd}"); ++ break SpawnStatus::Failed; ++ } ++ Err(err) => { ++ eprintln!("init: failed to wait for {cmd}: {err}"); ++ break SpawnStatus::Failed; ++ } ++ } ++ }; ++ ++ if matches!(res, SpawnStatus::Success) { ++ match libredox::call::getns() { ++ Ok(current_namespace_fd) => { ++ if let Err(err) = libredox::call::register_scheme_to_ns( ++ current_namespace_fd, ++ &scheme, ++ new_fd, ++ ) { ++ eprintln!("init: scheme registration failed for {cmd}: {err}"); ++ return SpawnStatus::Failed; ++ } ++ } ++ Err(err) => { ++ eprintln!("init: getns failed for {cmd}: {err}"); ++ return SpawnStatus::Failed; ++ } ++ } ++ } ++ res ++ } ++ ServiceType::Oneshot => { ++ drop(read_pipe); ++ match child.wait() { ++ Ok(exit_status) => { ++ if !exit_status.success() { ++ eprintln!("init: {cmd} failed with {exit_status}"); ++ SpawnStatus::Failed ++ } else { ++ SpawnStatus::Success ++ } ++ } ++ Err(err) => { ++ eprintln!("init: failed to wait for {cmd}: {err}"); ++ SpawnStatus::Failed ++ } ++ } + } ++ ServiceType::OneshotAsync => SpawnStatus::Async, + } + } +diff --git a/init/src/unit.rs b/init/src/unit.rs +index 98053cb2d..414b92d17 100644 +--- a/init/src/unit.rs ++++ b/init/src/unit.rs +@@ -125,6 +125,25 @@ pub struct UnitInfo { + pub condition_architecture: Option>, + // FIXME replace this with hwd reading from the devicetree + pub condition_board: Option>, ++ /// Restart policy for the service (only applies to Service units) ++ #[serde(default)] ++ pub restart: RestartPolicy, ++ /// Maximum time in seconds to wait for dependencies before failing (0 = no timeout) ++ #[serde(default)] ++ pub dependency_timeout_secs: u64, ++} ++ ++/// Restart policy for managed services ++#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Default)] ++#[serde(rename_all = "kebab-case")] ++pub enum RestartPolicy { ++ /// Never restart the service (default) ++ #[default] ++ No, ++ /// Restart on failure (non-zero exit or crash) ++ OnFailure, ++ /// Always restart (on any exit) ++ Always, + } + + fn true_bool() -> bool { +@@ -190,6 +209,8 @@ impl Unit { + requires_weak: script.1, + condition_architecture: None, + condition_board: None, ++ restart: RestartPolicy::No, ++ dependency_timeout_secs: 0, + }, + kind: UnitKind::LegacyScript { script: script.0 }, + }); diff --git a/sources/redbear-0.1.0/patches/P5-proc-setschedpolicy.patch b/sources/redbear-0.1.0/patches/P5-proc-setschedpolicy.patch new file mode 100644 index 00000000..07e234a1 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-proc-setschedpolicy.patch @@ -0,0 +1,152 @@ +diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs +index 47588e1..6578761 100644 +--- a/src/scheme/proc.rs ++++ b/src/scheme/proc.rs +@@ -1,7 +1,7 @@ + use crate::{ + context::{ + self, +- context::{HardBlockedReason, LockedFdTbl, SignalState}, ++ context::{HardBlockedReason, LockedFdTbl, SchedPolicy, SignalState}, + file::InternalFlags, + memory::{handle_notify_files, AddrSpace, AddrSpaceWrapper, Grant, PageSpan}, + Context, ContextLock, Status, +@@ -105,6 +105,7 @@ enum ContextHandle { + // Attr handles, to set ens/euid/egid/pid. + Authority, + Attr, ++ Groups, + + Status { + privileged: bool, +@@ -145,6 +146,7 @@ enum ContextHandle { + // directory. + OpenViaDup, + SchedAffinity, ++ SchedPolicy, + + MmapMinAddr(Arc), + } +@@ -249,6 +251,9 @@ impl ProcScheme { + false, + ), + "sched-affinity" => (ContextHandle::SchedAffinity, true), ++ // TODO: Switch this kernel-local proc handle over to a stable upstream ++ // redox_syscall ProcCall::SetSchedPolicy opcode once that lands. ++ "sched-policy" => (ContextHandle::SchedPolicy, false), + "status" => (ContextHandle::Status { privileged: false }, false), + _ if path.starts_with("auth-") => { + let nonprefix = &path["auth-".len()..]; +@@ -261,6 +266,7 @@ impl ProcScheme { + let handle = match actual_name { + "attrs" => ContextHandle::Attr, + "status" => ContextHandle::Status { privileged: true }, ++ "groups" => ContextHandle::Groups, + _ => return Err(Error::new(ENOENT)), + }; + +@@ -306,6 +312,11 @@ impl ProcScheme { + let id = NonZeroUsize::new(NEXT_ID.fetch_add(1, Ordering::Relaxed)) + .ok_or(Error::new(EMFILE))?; + let context = context::spawn(true, Some(id), ret, token)?; ++ { ++ let parent_groups = ++ context::current().read(token.token()).groups.clone(); ++ context.write(token.token()).groups = parent_groups; ++ } + HANDLES.write(token.token()).insert( + id.get(), + Handle { +@@ -1165,6 +1176,20 @@ impl ContextHandle { + + Ok(size_of_val(&mask)) + } ++ Self::SchedPolicy => { ++ if buf.len() != 2 { ++ return Err(Error::new(EINVAL)); ++ } ++ ++ let [policy, rt_priority] = unsafe { buf.read_exact::<[u8; 2]>()? }; ++ let sched_policy = SchedPolicy::try_from_raw(policy).ok_or(Error::new(EINVAL))?; ++ ++ context ++ .write(token.token()) ++ .set_sched_policy(sched_policy, rt_priority); ++ ++ Ok(2) ++ } + ContextHandle::Status { privileged } => { + let mut args = buf.usizes(); + +@@ -1268,9 +1293,42 @@ impl ContextHandle { + guard.pid = info.pid as usize; + guard.euid = info.euid; + guard.egid = info.egid; +- guard.prio = (info.prio as usize).min(39); ++ guard.set_sched_other_prio(info.prio as usize); + Ok(size_of::()) + } ++ Self::Groups => { ++ const NGROUPS_MAX: usize = 65536; ++ if buf.len() % size_of::() != 0 { ++ return Err(Error::new(EINVAL)); ++ } ++ let count = buf.len() / size_of::(); ++ if count > NGROUPS_MAX { ++ return Err(Error::new(EINVAL)); ++ } ++ let mut groups = Vec::with_capacity(count); ++ for chunk in buf.in_exact_chunks(size_of::()).take(count) { ++ groups.push(chunk.read_u32()?); ++ } ++ let proc_id = { ++ let guard = context.read(token.token()); ++ guard.owner_proc_id ++ }; ++ { ++ let mut guard = context.write(token.token()); ++ guard.groups = groups.clone(); ++ } ++ if let Some(pid) = proc_id { ++ let mut contexts = context::contexts(token.downgrade()); ++ let (contexts, mut t) = contexts.token_split(); ++ for context_ref in contexts.iter() { ++ let mut ctx = context_ref.write(t.token()); ++ if ctx.owner_proc_id == Some(pid) { ++ ctx.groups = groups.clone(); ++ } ++ } ++ } ++ Ok(count * size_of::()) ++ } + ContextHandle::OpenViaDup => { + let mut args = buf.usizes(); + +@@ -1427,6 +1485,11 @@ impl ContextHandle { + + buf.copy_exactly(crate::cpu_set::mask_as_bytes(&mask))?; + Ok(size_of_val(&mask)) ++ } ++ ContextHandle::SchedPolicy => { ++ let context = context.read(token.token()); ++ let data = [context.sched_policy as u8, context.sched_rt_priority]; ++ buf.copy_common_bytes_from_slice(&data) + } // TODO: Replace write() with SYS_SENDFD? + ContextHandle::Status { .. } => { + let status = { +@@ -1475,6 +1538,15 @@ impl ContextHandle { + debug_name, + }) + } ++ Self::Groups => { ++ let c = &context.read(token.token()); ++ let max = buf.len() / size_of::(); ++ let count = c.groups.len().min(max); ++ for (chunk, gid) in buf.in_exact_chunks(size_of::()).zip(&c.groups).take(count) { ++ chunk.copy_from_slice(&gid.to_ne_bytes())?; ++ } ++ Ok(count * size_of::()) ++ } + ContextHandle::Sighandler => { + let data = match context.read(token.token()).sig { + Some(ref sig) => SetSighandlerData { diff --git a/sources/redbear-0.1.0/patches/P5-pthread-sigmask-race.patch b/sources/redbear-0.1.0/patches/P5-pthread-sigmask-race.patch new file mode 100644 index 00000000..b7b1677c --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-pthread-sigmask-race.patch @@ -0,0 +1,63 @@ +diff --git a/src/header/signal/mod.rs b/src/header/signal/mod.rs +index f049573..f3d665c 100644 +--- a/src/header/signal/mod.rs ++++ b/src/header/signal/mod.rs +@@ -2,7 +2,10 @@ + //! + //! See . + +-use core::{mem, ptr}; ++use core::{ ++ mem, ptr, ++ sync::atomic::Ordering, ++}; + + use cbitset::BitSet; + +@@ -32,6 +35,9 @@ pub mod sys; + #[path = "redox.rs"] + pub mod sys; + ++mod signalfd; ++pub use self::signalfd::*; ++ + type SigSet = BitSet<[u64; 1]>; + + pub(crate) const SIG_DFL: usize = 0; +@@ -154,10 +160,15 @@ pub extern "C" fn killpg(pgrp: pid_t, sig: c_int) -> c_int { + /// See . + #[unsafe(no_mangle)] + pub unsafe extern "C" fn pthread_kill(thread: pthread_t, sig: c_int) -> c_int { +- let os_tid = { +- let pthread = unsafe { &*(thread as *const crate::pthread::Pthread) }; +- unsafe { pthread.os_tid.get().read() } +- }; ++ let pthread = unsafe { &*(thread as *const crate::pthread::Pthread) }; ++ let os_tid = unsafe { pthread.os_tid.get().read() }; ++ let flags = crate::pthread::PthreadFlags::from_bits_retain( ++ pthread.flags.load(Ordering::Acquire), ++ ); ++ if flags.contains(crate::pthread::PthreadFlags::FINISHED) { ++ return errno::ESRCH; ++ } ++ + crate::header::pthread::e(unsafe { Sys::rlct_kill(os_tid, sig as usize) }) + } + +@@ -168,12 +179,10 @@ pub unsafe extern "C" fn pthread_sigmask( + set: *const sigset_t, + oldset: *mut sigset_t, + ) -> c_int { +- // On Linux and Redox, pthread_sigmask and sigprocmask are equivalent +- if unsafe { sigprocmask(how, set, oldset) } == 0 { +- 0 +- } else { +- //TODO: Fix race +- platform::ERRNO.get() ++ let filtered_set = unsafe { set.as_ref().map(|&block| block & !RLCT_SIGNAL_MASK) }; ++ match unsafe { Sys::sigprocmask(how, filtered_set.as_ref(), oldset.as_mut()) } { ++ Ok(()) => 0, ++ Err(errno) => errno.0, + } + } + diff --git a/sources/redbear-0.1.0/patches/P5-robust-mutex-enotrec-fix.patch b/sources/redbear-0.1.0/patches/P5-robust-mutex-enotrec-fix.patch new file mode 100644 index 00000000..54388ec5 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-robust-mutex-enotrec-fix.patch @@ -0,0 +1,87 @@ +Fix ENOTRECOVERABLE returned for non-robust mutexes and register main +thread in OS_TID_TO_PTHREAD. + +The robust mutex liveness check (mutex_owner_id_is_live) was returning +ENOTRECOVERABLE for non-robust mutexes when the owner appeared dead. +Per POSIX, the behaviour of a non-robust mutex whose owner has died is +undefined; returning an error crashes every Rust std::sync::Mutex user. +For lock_inner, fall through to spin/futex-wait instead. For try_lock, +return EBUSY instead. + +Additionally, pthread::init() never registered the main thread in +OS_TID_TO_PTHREAD, so any mutex owned by the main thread would always +appear to have a dead owner, making the liveness check unreliable. + +diff --git a/src/pthread/mod.rs b/src/pthread/mod.rs +index 8243a48..c455a67 100644 +--- a/src/pthread/mod.rs ++++ b/src/pthread/mod.rs +@@ -43,9 +43,13 @@ pub unsafe fn init() { + thread.stack_size = STACK_SIZE; + } + +- unsafe { Tcb::current() } +- .expect_notls("no TCB present for main thread") +- .pthread = thread; ++ let tcb = unsafe { Tcb::current() } ++ .expect_notls("no TCB present for main thread"); ++ tcb.pthread = thread; ++ ++ OS_TID_TO_PTHREAD ++ .lock() ++ .insert(Sys::current_os_tid(), ForceSendSync(tcb as *const Tcb as *mut Tcb)); + } + + //static NEXT_INDEX: AtomicU32 = AtomicU32::new(FIRST_THREAD_IDX + 1); +diff --git a/src/sync/pthread_mutex.rs b/src/sync/pthread_mutex.rs +index af0c429..1b2b3ca 100644 +--- a/src/sync/pthread_mutex.rs ++++ b/src/sync/pthread_mutex.rs +@@ -136,11 +136,7 @@ impl RlctMutex { + Err(thread) => { + let owner = thread & INDEX_MASK; + +- if !crate::pthread::mutex_owner_id_is_live(owner) { +- if !self.robust { +- return Err(Errno(ENOTRECOVERABLE)); +- } +- ++ if !crate::pthread::mutex_owner_id_is_live(owner) && self.robust { + let new_value = (thread & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread; + match self.inner.compare_exchange( + thread, +@@ -152,6 +155,11 @@ impl RlctMutex { + Ok(_) => return self.finish_lock_acquire(true), + Err(_) => continue, + } ++ } else if !crate::pthread::mutex_owner_id_is_live(owner) { ++ // Non-robust mutex with apparently-dead owner: per POSIX the ++ // behaviour is undefined. We conservatively keep spinning / ++ // futex-waiting rather than returning ENOTRECOVERABLE, which ++ // would crash any Rust std::sync::Mutex user. + } + + if spins_left > 0 { +@@ -241,9 +250,6 @@ impl RlctMutex { + + if current & FUTEX_OWNER_DIED != 0 || (owner != 0 && !crate::pthread::mutex_owner_id_is_live(owner)) { +- if !self.robust { +- return Err(Errno(ENOTRECOVERABLE)); +- } +- ++ if self.robust { + let new_value = (current & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread; + match self.inner.compare_exchange( + current, +@@ -257,6 +269,11 @@ impl RlctMutex { + Ok(_) => return self.finish_lock_acquire(true), + Err(_) => continue, + } ++ } else { ++ // Non-robust mutex: owner appears dead but POSIX behaviour is ++ // undefined; report busy rather than ENOTRECOVERABLE. ++ return Err(Errno(EBUSY)); ++ } + } + + return Err(Errno(EBUSY)); diff --git a/sources/redbear-0.1.0/patches/P5-robust-mutexes.patch b/sources/redbear-0.1.0/patches/P5-robust-mutexes.patch new file mode 100644 index 00000000..1c5880a4 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-robust-mutexes.patch @@ -0,0 +1,380 @@ +diff --git a/src/sync/pthread_mutex.rs b/src/sync/pthread_mutex.rs +index 29bad63..af0c429 100644 +--- a/src/sync/pthread_mutex.rs ++++ b/src/sync/pthread_mutex.rs +@@ -1,3 +1,4 @@ ++use alloc::boxed::Box; + use core::{ + cell::Cell, + sync::atomic::{AtomicU32 as AtomicUint, Ordering}, +@@ -6,10 +7,9 @@ use core::{ + use crate::{ + error::Errno, + header::{bits_timespec::timespec, errno::*, pthread::*}, ++ platform::{Pal, Sys, types::c_int}, + }; + +-use crate::platform::{Pal, Sys, types::c_int}; +- + use super::FutexWaitResult; + + pub struct RlctMutex { +@@ -21,15 +21,22 @@ pub struct RlctMutex { + robust: bool, + } + ++pub struct RobustMutexNode { ++ pub next: *mut RobustMutexNode, ++ pub prev: *mut RobustMutexNode, ++ pub mutex: *const RlctMutex, ++} ++ + const STATE_UNLOCKED: u32 = 0; + const WAITING_BIT: u32 = 1 << 31; +-const INDEX_MASK: u32 = !WAITING_BIT; ++const FUTEX_OWNER_DIED: u32 = 1 << 30; ++const INDEX_MASK: u32 = !(WAITING_BIT | FUTEX_OWNER_DIED); + + // TODO: Lower limit is probably better. + const RECURSIVE_COUNT_MAX_INCLUSIVE: u32 = u32::MAX; + // TODO: How many spins should we do before it becomes more time-economical to enter kernel mode + // via futexes? +-const SPIN_COUNT: usize = 0; ++const SPIN_COUNT: usize = 100; + + impl RlctMutex { + pub(crate) fn new(attr: &RlctMutexAttr) -> Result { +@@ -69,13 +76,25 @@ impl RlctMutex { + Ok(0) + } + pub fn make_consistent(&self) -> Result<(), Errno> { +- todo_skip!(0, "pthread robust mutexes: not implemented"); +- Ok(()) ++ debug_assert!(self.robust, "make_consistent called on non-robust mutex"); ++ ++ if !self.robust { ++ return Err(Errno(EINVAL)); ++ } ++ ++ let current = self.inner.load(Ordering::Relaxed); ++ let owner = current & INDEX_MASK; ++ ++ if owner == os_tid_invalid_after_fork() && current & FUTEX_OWNER_DIED != 0 { ++ self.inner.store(0, Ordering::Release); ++ Ok(()) ++ } else { ++ Err(Errno(EINVAL)) ++ } + } + fn lock_inner(&self, deadline: Option<×pec>) -> Result<(), Errno> { + let this_thread = os_tid_invalid_after_fork(); +- +- //let mut spins_left = SPIN_COUNT; ++ let mut spins_left = SPIN_COUNT; + + loop { + let result = self.inner.compare_exchange_weak( +@@ -86,45 +105,59 @@ impl RlctMutex { + ); + + match result { +- // CAS succeeded +- Ok(_) => { +- if self.ty == Ty::Recursive { +- self.increment_recursive_count()?; +- } +- return Ok(()); +- } +- // CAS failed, but the mutex was recursive and we already own the lock. ++ Ok(_) => return self.finish_lock_acquire(false), + Err(thread) if thread & INDEX_MASK == this_thread && self.ty == Ty::Recursive => { + self.increment_recursive_count()?; + return Ok(()); + } +- // CAS failed, but the mutex was error-checking and we already own the lock. + Err(thread) if thread & INDEX_MASK == this_thread && self.ty == Ty::Errck => { +- return Err(Errno(EAGAIN)); ++ return Err(Errno(EDEADLK)); + } +- // CAS spuriously failed, simply retry the CAS. TODO: Use core::hint::spin_loop()? +- Err(thread) if thread & INDEX_MASK == 0 => { +- continue; ++ Err(thread) if thread & FUTEX_OWNER_DIED != 0 && thread & INDEX_MASK == 0 => { ++ return Err(Errno(ENOTRECOVERABLE)); + } +- // CAS failed because some other thread owned the lock. We must now wait. ++ Err(thread) if thread & FUTEX_OWNER_DIED != 0 => { ++ if !self.robust { ++ return Err(Errno(ENOTRECOVERABLE)); ++ } ++ ++ let new_value = (thread & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread; ++ match self.inner.compare_exchange( ++ thread, ++ new_value, ++ Ordering::Acquire, ++ Ordering::Relaxed, ++ ) { ++ Ok(_) => return self.finish_lock_acquire(true), ++ Err(_) => continue, ++ } ++ } ++ Err(thread) if thread & INDEX_MASK == 0 => continue, + Err(thread) => { +- /*if spins_left > 0 { +- // TODO: Faster to spin trying to load the flag, compared to CAS? ++ let owner = thread & INDEX_MASK; ++ ++ if !crate::pthread::mutex_owner_id_is_live(owner) { ++ if !self.robust { ++ return Err(Errno(ENOTRECOVERABLE)); ++ } ++ ++ let new_value = (thread & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread; ++ match self.inner.compare_exchange( ++ thread, ++ new_value, ++ Ordering::Acquire, ++ Ordering::Relaxed, ++ ) { ++ Ok(_) => return self.finish_lock_acquire(true), ++ Err(_) => continue, ++ } ++ } ++ ++ if spins_left > 0 { + spins_left -= 1; + core::hint::spin_loop(); + continue; + } +- +- spins_left = SPIN_COUNT; +- +- let inner = self.inner.fetch_or(WAITING_BIT, Ordering::Relaxed); +- +- if inner == STATE_UNLOCKED { +- continue; +- }*/ +- +- // If the mutex is not robust, simply futex_wait until unblocked. +- //crate::sync::futex_wait(&self.inner, inner | WAITING_BIT, None); + if crate::sync::futex_wait(&self.inner, thread, deadline) + == FutexWaitResult::TimedOut + { +@@ -140,6 +173,20 @@ impl RlctMutex { + pub fn lock_with_timeout(&self, deadline: ×pec) -> Result<(), Errno> { + self.lock_inner(Some(deadline)) + } ++ fn finish_lock_acquire(&self, owner_dead: bool) -> Result<(), Errno> { ++ if self.ty == Ty::Recursive { ++ self.increment_recursive_count()?; ++ } ++ if self.robust { ++ add_to_robust_list(self); ++ } ++ ++ if owner_dead { ++ Err(Errno(EOWNERDEAD)) ++ } else { ++ Ok(()) ++ } ++ } + fn increment_recursive_count(&self) -> Result<(), Errno> { + // We don't have to worry about asynchronous signals here, since pthread_mutex_trylock + // is not async-signal-safe. +@@ -161,41 +208,65 @@ impl RlctMutex { + pub fn try_lock(&self) -> Result<(), Errno> { + let this_thread = os_tid_invalid_after_fork(); + +- // TODO: If recursive, omitting CAS may be faster if it is already owned by this thread. +- let result = self.inner.compare_exchange( +- STATE_UNLOCKED, +- this_thread, +- Ordering::Acquire, +- Ordering::Relaxed, +- ); ++ loop { ++ let current = self.inner.load(Ordering::Relaxed); ++ ++ if current == STATE_UNLOCKED { ++ match self.inner.compare_exchange( ++ STATE_UNLOCKED, ++ this_thread, ++ Ordering::Acquire, ++ Ordering::Relaxed, ++ ) { ++ Ok(_) => return self.finish_lock_acquire(false), ++ Err(_) => continue, ++ } ++ } + +- if self.ty == Ty::Recursive { +- match result { +- Err(index) if index & INDEX_MASK != this_thread => return Err(Errno(EBUSY)), +- _ => (), ++ let owner = current & INDEX_MASK; ++ ++ if owner == this_thread && self.ty == Ty::Recursive { ++ self.increment_recursive_count()?; ++ return Ok(()); + } + +- self.increment_recursive_count()?; ++ if owner == this_thread && self.ty == Ty::Errck { ++ return Err(Errno(EDEADLK)); ++ } + +- return Ok(()); +- } ++ if current & FUTEX_OWNER_DIED != 0 && owner == 0 { ++ return Err(Errno(ENOTRECOVERABLE)); ++ } + +- match result { +- Ok(_) => Ok(()), +- Err(index) if index & INDEX_MASK == this_thread && self.ty == Ty::Errck => { +- Err(Errno(EDEADLK)) ++ if current & FUTEX_OWNER_DIED != 0 || (owner != 0 && !crate::pthread::mutex_owner_id_is_live(owner)) { ++ if !self.robust { ++ return Err(Errno(ENOTRECOVERABLE)); ++ } ++ ++ let new_value = (current & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread; ++ match self.inner.compare_exchange( ++ current, ++ new_value, ++ Ordering::Acquire, ++ Ordering::Relaxed, ++ ) { ++ Ok(_) => return self.finish_lock_acquire(true), ++ Err(_) => continue, ++ } + } +- Err(_) => Err(Errno(EBUSY)), ++ ++ return Err(Errno(EBUSY)); + } + } + // Safe because we are not protecting any data. + pub fn unlock(&self) -> Result<(), Errno> { ++ let current = self.inner.load(Ordering::Relaxed); ++ + if self.robust || matches!(self.ty, Ty::Recursive | Ty::Errck) { +- if self.inner.load(Ordering::Relaxed) & INDEX_MASK != os_tid_invalid_after_fork() { ++ if current & INDEX_MASK != os_tid_invalid_after_fork() { + return Err(Errno(EPERM)); + } + +- // TODO: Is this fence correct? + core::sync::atomic::fence(Ordering::Acquire); + } + +@@ -208,18 +279,47 @@ impl RlctMutex { + } + } + +- self.inner.store(STATE_UNLOCKED, Ordering::Release); +- crate::sync::futex_wake(&self.inner, i32::MAX); +- /*let was_waiting = self.inner.swap(STATE_UNLOCKED, Ordering::Release) & WAITING_BIT != 0; ++ if self.robust { ++ remove_from_robust_list(self); ++ } + +- if was_waiting { +- let _ = crate::sync::futex_wake(&self.inner, 1); +- }*/ ++ let new_state = if self.robust && current & FUTEX_OWNER_DIED != 0 { ++ FUTEX_OWNER_DIED ++ } else { ++ STATE_UNLOCKED ++ }; ++ ++ self.inner.store(new_state, Ordering::Release); ++ crate::sync::futex_wake(&self.inner, i32::MAX); + + Ok(()) + } + } + ++pub(crate) unsafe fn mark_robust_mutexes_dead(thread: &crate::pthread::Pthread) { ++ let head = thread.robust_list_head.get(); ++ let this_thread = os_tid_invalid_after_fork(); ++ let mut node = unsafe { *head }; ++ ++ unsafe { *head = core::ptr::null_mut() }; ++ ++ while !node.is_null() { ++ let next = unsafe { (*node).next }; ++ let mutex = unsafe { &*(*node).mutex }; ++ let current = mutex.inner.load(Ordering::Relaxed); ++ ++ if current & INDEX_MASK == this_thread { ++ mutex ++ .inner ++ .store((current & WAITING_BIT) | FUTEX_OWNER_DIED | this_thread, Ordering::Release); ++ crate::sync::futex_wake(&mutex.inner, i32::MAX); ++ } ++ ++ unsafe { drop(Box::from_raw(node)) }; ++ node = next; ++ } ++} ++ + #[repr(u8)] + #[derive(PartialEq)] + enum Ty { +@@ -237,6 +337,54 @@ enum Ty { + #[thread_local] + static CACHED_OS_TID_INVALID_AFTER_FORK: Cell = Cell::new(0); + ++fn add_to_robust_list(mutex: &RlctMutex) { ++ let thread = crate::pthread::current_thread().expect("current thread not present"); ++ let node_ptr = Box::into_raw(Box::new(RobustMutexNode { ++ next: core::ptr::null_mut(), ++ prev: core::ptr::null_mut(), ++ mutex: core::ptr::from_ref(mutex), ++ })); ++ ++ unsafe { ++ let head = thread.robust_list_head.get(); ++ if !(*head).is_null() { ++ (**head).prev = node_ptr; ++ } ++ (*node_ptr).next = *head; ++ *head = node_ptr; ++ } ++} ++ ++fn remove_from_robust_list(mutex: &RlctMutex) { ++ let thread = match crate::pthread::current_thread() { ++ Some(thread) => thread, ++ None => return, ++ }; ++ ++ unsafe { ++ let mut node = *thread.robust_list_head.get(); ++ ++ while !node.is_null() { ++ if core::ptr::eq((*node).mutex, core::ptr::from_ref(mutex)) { ++ if !(*node).prev.is_null() { ++ (*(*node).prev).next = (*node).next; ++ } else { ++ *thread.robust_list_head.get() = (*node).next; ++ } ++ ++ if !(*node).next.is_null() { ++ (*(*node).next).prev = (*node).prev; ++ } ++ ++ drop(Box::from_raw(node)); ++ return; ++ } ++ ++ node = (*node).next; ++ } ++ } ++} ++ + // Assumes TIDs are unique between processes, which I only know is true for Redox. + fn os_tid_invalid_after_fork() -> u32 { + // TODO: Coordinate better if using shared == PTHREAD_PROCESS_SHARED, with up to 2^32 separate diff --git a/sources/redbear-0.1.0/patches/P5-sched-api.patch b/sources/redbear-0.1.0/patches/P5-sched-api.patch new file mode 100644 index 00000000..b7bc9a27 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-sched-api.patch @@ -0,0 +1,130 @@ +diff --git a/src/header/sched/mod.rs b/src/header/sched/mod.rs +index bcdd346..6066550 100644 +--- a/src/header/sched/mod.rs ++++ b/src/header/sched/mod.rs +@@ -27,43 +27,110 @@ pub const SCHED_RR: c_int = 1; + pub const SCHED_OTHER: c_int = 2; + + /// See . +-// #[unsafe(no_mangle)] ++#[unsafe(no_mangle)] + pub extern "C" fn sched_get_priority_max(policy: c_int) -> c_int { +- todo!() ++ match policy { ++ SCHED_FIFO | SCHED_RR => 99, ++ SCHED_OTHER => 0, ++ _ => { ++ crate::platform::ERRNO.set(crate::header::errno::EINVAL); ++ -1 ++ } ++ } + } + +-/// See . +-// #[unsafe(no_mangle)] ++/// See . ++#[unsafe(no_mangle)] + pub extern "C" fn sched_get_priority_min(policy: c_int) -> c_int { +- todo!() ++ match policy { ++ SCHED_FIFO | SCHED_RR => 1, ++ SCHED_OTHER => 0, ++ _ => { ++ crate::platform::ERRNO.set(crate::header::errno::EINVAL); ++ -1 ++ } ++ } + } + + /// See . +-// #[unsafe(no_mangle)] ++#[unsafe(no_mangle)] + pub unsafe extern "C" fn sched_getparam(pid: pid_t, param: *mut sched_param) -> c_int { +- todo!() ++ if pid != 0 { ++ crate::platform::ERRNO.set(crate::header::errno::ESRCH); ++ return -1; ++ } ++ crate::platform::ERRNO.set(crate::header::errno::ENOSYS); ++ -1 ++} ++ ++/// See . ++#[unsafe(no_mangle)] ++pub extern "C" fn sched_getscheduler(pid: pid_t) -> c_int { ++ if pid != 0 { ++ crate::platform::ERRNO.set(crate::header::errno::ESRCH); ++ return -1; ++ } ++ crate::platform::ERRNO.set(crate::header::errno::ENOSYS); ++ -1 + } + + /// See . +-// #[unsafe(no_mangle)] +-pub extern "C" fn sched_rr_get_interval(pid: pid_t, time: *const timespec) -> c_int { +- todo!() ++#[unsafe(no_mangle)] ++pub extern "C" fn sched_rr_get_interval(pid: pid_t, tp: *mut timespec) -> c_int { ++ if pid != 0 { ++ crate::platform::ERRNO.set(crate::header::errno::ESRCH); ++ return -1; ++ } ++ if tp.is_null() { ++ crate::platform::ERRNO.set(crate::header::errno::EINVAL); ++ return -1; ++ } ++ unsafe { ++ (*tp).tv_sec = 0; ++ (*tp).tv_nsec = 100_000_000; // 100ms default SCHED_RR quantum ++ } ++ 0 + } + + /// See . +-// #[unsafe(no_mangle)] +-pub unsafe extern "C" fn sched_setparam(pid: pid_t, param: *const sched_param) -> c_int { +- todo!() ++#[unsafe(no_mangle)] ++pub unsafe extern "C" fn sched_setparam(pid: pid_t, _param: *const sched_param) -> c_int { ++ if pid != 0 { ++ crate::platform::ERRNO.set(crate::header::errno::ESRCH); ++ return -1; ++ } ++ crate::platform::ERRNO.set(crate::header::errno::ENOSYS); ++ -1 + } + + /// See . +-// #[unsafe(no_mangle)] ++#[unsafe(no_mangle)] + pub extern "C" fn sched_setscheduler( + pid: pid_t, + policy: c_int, + param: *const sched_param, + ) -> c_int { +- todo!() ++ if pid != 0 { ++ crate::platform::ERRNO.set(crate::header::errno::ESRCH); ++ return -1; ++ } ++ match policy { ++ SCHED_OTHER => { ++ if !param.is_null() && unsafe { (*param).sched_priority } != 0 { ++ crate::platform::ERRNO.set(crate::header::errno::EINVAL); ++ return -1; ++ } ++ SCHED_OTHER ++ } ++ SCHED_FIFO | SCHED_RR => { ++ crate::platform::ERRNO.set(crate::header::errno::ENOSYS); ++ -1 ++ } ++ _ => { ++ crate::platform::ERRNO.set(crate::header::errno::EINVAL); ++ -1 ++ } ++ } + } + + /// See . diff --git a/sources/redbear-0.1.0/patches/P5-sched-policy-context.patch b/sources/redbear-0.1.0/patches/P5-sched-policy-context.patch new file mode 100644 index 00000000..067565ac --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-sched-policy-context.patch @@ -0,0 +1,176 @@ +diff --git a/src/context/context.rs b/src/context/context.rs +index c97c516..8a8b078 100644 +--- a/src/context/context.rs ++++ b/src/context/context.rs +@@ -18,7 +18,8 @@ use crate::{ + cpu_stats, + ipi::{ipi, IpiKind, IpiTarget}, + memory::{ +- allocate_p2frame, deallocate_p2frame, Enomem, Frame, RaiiFrame, RmmA, RmmArch, PAGE_SIZE, ++ allocate_p2frame, deallocate_p2frame, Enomem, Frame, PhysicalAddress, RaiiFrame, RmmA, ++ RmmArch, PAGE_SIZE, + }, + percpu::PercpuBlock, + scheme::{CallerCtx, FileHandle, SchemeId}, +@@ -62,6 +63,38 @@ impl Status { + } + } + ++pub const SCHED_PRIORITY_LEVELS: usize = 40; ++pub const DEFAULT_SCHED_OTHER_PRIORITY: usize = 20; ++pub const DEFAULT_SCHED_RR_QUANTUM: u128 = 100_000_000; ++ ++#[repr(u8)] ++#[derive(Clone, Copy, Debug, PartialEq, Eq)] ++pub enum SchedPolicy { ++ Fifo = 0, ++ RoundRobin = 1, ++ Other = 2, ++} ++ ++impl SchedPolicy { ++ pub fn try_from_raw(raw: u8) -> Option { ++ match raw { ++ 0 => Some(Self::Fifo), ++ 1 => Some(Self::RoundRobin), ++ 2 => Some(Self::Other), ++ _ => None, ++ } ++ } ++} ++ ++pub fn rt_priority_to_kernel_prio(rt_priority: u8) -> usize { ++ (SCHED_PRIORITY_LEVELS - 1) ++ .saturating_sub((usize::from(rt_priority.min(99)) * (SCHED_PRIORITY_LEVELS - 1)) / 99) ++} ++ ++fn clamp_sched_other_prio(prio: usize) -> usize { ++ prio.min(SCHED_PRIORITY_LEVELS - 1) ++} ++ + #[derive(Clone, Debug)] + pub enum HardBlockedReason { + /// "SIGSTOP", only procmgr is allowed to switch contexts this state +@@ -140,6 +173,17 @@ pub struct Context { + pub fmap_ret: Option, + /// Priority + pub prio: usize, ++ pub sched_policy: SchedPolicy, ++ pub sched_rt_priority: u8, ++ pub sched_rr_ticks_consumed: u32, ++ pub sched_static_prio: usize, ++ pub sched_rr_quantum: u128, ++ #[allow(dead_code)] ++ pub futex_pi_boost: bool, ++ #[allow(dead_code)] ++ pub futex_pi_original_prio: usize, ++ #[allow(dead_code)] ++ pub futex_pi_waiters: Vec, + + // TODO: id can reappear after wraparound? + pub owner_proc_id: Option, +@@ -148,6 +192,8 @@ pub struct Context { + pub euid: u32, + pub egid: u32, + pub pid: usize, ++ /// Supplementary group IDs for access control decisions. ++ pub groups: Vec, + + // See [`PreemptGuard`] + // +@@ -197,13 +243,22 @@ impl Context { + files: Arc::new(RwLock::new(FdTbl::new())), + userspace: false, + fmap_ret: None, +- prio: 20, ++ prio: DEFAULT_SCHED_OTHER_PRIORITY, ++ sched_policy: SchedPolicy::Other, ++ sched_rt_priority: 0, ++ sched_rr_ticks_consumed: 0, ++ sched_static_prio: DEFAULT_SCHED_OTHER_PRIORITY, ++ sched_rr_quantum: DEFAULT_SCHED_RR_QUANTUM, ++ futex_pi_boost: false, ++ futex_pi_original_prio: DEFAULT_SCHED_OTHER_PRIORITY, ++ futex_pi_waiters: Vec::new(), + being_sigkilled: false, + owner_proc_id, + + euid: 0, + egid: 0, + pid: 0, ++ groups: Vec::new(), + + #[cfg(feature = "syscall_debug")] + syscall_debug_info: crate::syscall::debug::SyscallDebugInfo::default(), +@@ -218,11 +273,47 @@ impl Context { + self.preempt_locks == 0 + } + ++ fn base_sched_prio(&self) -> usize { ++ match self.sched_policy { ++ SchedPolicy::Other => clamp_sched_other_prio(self.sched_static_prio), ++ SchedPolicy::Fifo | SchedPolicy::RoundRobin => { ++ rt_priority_to_kernel_prio(self.sched_rt_priority) ++ } ++ } ++ } ++ ++ fn apply_sched_prio(&mut self) { ++ let base_prio = self.base_sched_prio(); ++ if self.futex_pi_boost { ++ self.futex_pi_original_prio = base_prio; ++ self.prio = self.prio.min(base_prio); ++ } else { ++ self.futex_pi_original_prio = base_prio; ++ self.prio = base_prio; ++ } ++ } ++ ++ pub fn set_sched_other_prio(&mut self, prio: usize) { ++ self.sched_static_prio = clamp_sched_other_prio(prio); ++ self.apply_sched_prio(); ++ } ++ ++ pub fn set_sched_policy(&mut self, sched_policy: SchedPolicy, rt_priority: u8) { ++ self.sched_policy = sched_policy; ++ self.sched_rt_priority = match sched_policy { ++ SchedPolicy::Other => 0, ++ SchedPolicy::Fifo | SchedPolicy::RoundRobin => rt_priority.min(99), ++ }; ++ self.sched_rr_ticks_consumed = 0; ++ self.apply_sched_prio(); ++ } ++ + /// Block the context, and return true if it was runnable before being blocked + pub fn block(&mut self, reason: &'static str) -> bool { + if self.status.is_runnable() { + self.status = Status::Blocked; + self.status_reason = reason; ++ self.sched_rr_ticks_consumed = 0; + true + } else { + false +@@ -232,6 +323,7 @@ impl Context { + pub fn hard_block(&mut self, reason: HardBlockedReason) -> bool { + if self.status.is_runnable() { + self.status = Status::HardBlocked { reason }; ++ self.sched_rr_ticks_consumed = 0; + + true + } else { +@@ -261,6 +353,7 @@ impl Context { + if self.status.is_soft_blocked() { + self.status = Status::Runnable; + self.status_reason = ""; ++ self.sched_rr_ticks_consumed = 0; + + true + } else { +@@ -479,6 +572,7 @@ impl Context { + uid: self.euid, + gid: self.egid, + pid: self.pid, ++ groups: self.groups.clone(), + } + } + } diff --git a/sources/redbear-0.1.0/patches/P5-sched-rt-policy.patch b/sources/redbear-0.1.0/patches/P5-sched-rt-policy.patch new file mode 100644 index 00000000..8d491afa --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-sched-rt-policy.patch @@ -0,0 +1,150 @@ +diff --git a/src/context/switch.rs b/src/context/switch.rs +index 86684c8..aeb29c9 100644 +--- a/src/context/switch.rs ++++ b/src/context/switch.rs +@@ -5,7 +5,7 @@ + use crate::{ + context::{ + self, arch, idle_contexts, idle_contexts_try, run_contexts, ArcContextLockWriteGuard, +- Context, ContextLock, WeakContextRef, ++ Context, ContextLock, SchedPolicy, WeakContextRef, + }, + cpu_set::LogicalCpuId, + cpu_stats::{self, CpuState}, +@@ -33,35 +33,17 @@ const SCHED_PRIO_TO_WEIGHT: [usize; 40] = [ + 70, 56, 45, 36, 29, 23, 18, 15, + ]; + +-/// Determines if a given context is eligible to be scheduled on a given CPU (in +-/// principle, the current CPU). +-/// +-/// # Safety +-/// This function is unsafe because it modifies the `context`'s state directly without synchronization. +-/// +-/// # Parameters +-/// - `context`: The context (process/thread) to be checked. +-/// - `cpu_id`: The logical ID of the CPU on which the context is being scheduled. +-/// +-/// # Returns +-/// - `UpdateResult::CanSwitch`: If the context can be switched to. +-/// - `UpdateResult::Skip`: If the context should be skipped (e.g., it's running on another CPU). + unsafe fn update_runnable( + context: &mut Context, + cpu_id: LogicalCpuId, + switch_time: u128, + ) -> UpdateResult { +- // Ignore contexts that are already running. + if context.running { + return UpdateResult::Skip; + } +- +- // Ignore contexts assigned to other CPUs. + if !context.sched_affinity.contains(cpu_id) { + return UpdateResult::Skip; + } +- +- // If context is soft-blocked and has a wake-up time, check if it should wake up. + if context.status.is_soft_blocked() + && let Some(wake) = context.wake + && switch_time >= wake +@@ -69,8 +51,6 @@ unsafe fn update_runnable( + context.wake = None; + context.unblock_no_ipi(); + } +- +- // If the context is runnable, indicate it can be switched to. + if context.status.is_runnable() { + UpdateResult::CanSwitch + } else { +@@ -95,7 +75,7 @@ pub fn tick(token: &mut CleanLockToken) { + let new_ticks = ticks_cell.get() + 1; + ticks_cell.set(new_ticks); + +- // Trigger a context switch after every 3 ticks (approx. 6.75 ms). ++ // Trigger a context switch after every 3 ticks. + if new_ticks >= 3 { + switch(token); + crate::context::signal::signal_handler(token); +@@ -167,10 +147,7 @@ pub fn switch(token: &mut CleanLockToken) -> SwitchResult { + let mut prev_context_guard = unsafe { prev_context_lock.write_arc() }; + + if !prev_context_guard.is_preemptable() { +- // Unset global lock + arch::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst); +- +- // Pretend to have finished switching, so CPU is not idled + return SwitchResult::Switched; + } + +@@ -377,6 +354,71 @@ fn select_next_context( + let total_contexts: usize = contexts_list.iter().map(|q| q.len()).sum(); + let mut skipped_contexts = 0; + ++ // PASS 0: SCHED_FIFO and SCHED_RR — scan for RT contexts to schedule. ++ // When a runnable RT context is found, it takes priority over all SCHED_OTHER. ++ for prio in 0..40 { ++ let rt_contexts = contexts_list ++ .get_mut(prio) ++ .expect("prio should be between [0, 39]"); ++ let len = rt_contexts.len(); ++ for _ in 0..len { ++ let (rt_ref, rt_lock) = match rt_contexts.pop_front() { ++ Some(lock) => match lock.upgrade() { ++ Some(l) => (lock, l), ++ None => { ++ skipped_contexts += 1; ++ continue; ++ } ++ }, ++ None => break, ++ }; ++ if Arc::ptr_eq(&rt_lock, &idle_context) { ++ rt_contexts.push_back(rt_ref); ++ continue; ++ } ++ // Current RT thread: if runnable with no higher-prio RT found yet, ++ // keep it running (no demotion to SCHED_OTHER) ++ if Arc::ptr_eq(&rt_lock, &prev_context_lock) { ++ let mut rt_guard = unsafe { rt_lock.write_arc() }; ++ if rt_guard.status.is_runnable() ++ && (rt_guard.sched_policy == SchedPolicy::Fifo ++ || rt_guard.sched_policy == SchedPolicy::RoundRobin) ++ { ++ percpu.balance.set(balance); ++ percpu.last_queue.set(i); ++ return Ok(Some(rt_guard)); ++ } ++ rt_contexts.push_back(rt_ref); ++ continue; ++ } ++ let mut rt_guard = unsafe { rt_lock.write_arc() }; ++ if !rt_guard.status.is_runnable() || rt_guard.running ++ || !rt_guard.sched_affinity.contains(cpu_id) ++ { ++ rt_contexts.push_back(rt_ref); ++ continue; ++ } ++ if rt_guard.sched_policy == SchedPolicy::Fifo ++ || rt_guard.sched_policy == SchedPolicy::RoundRobin ++ { ++ percpu.balance.set(balance); ++ percpu.last_queue.set(i); ++ if !Arc::ptr_eq(&prev_context_lock, &idle_context) { ++ let prev_ctx = WeakContextRef(Arc::downgrade(&prev_context_lock)); ++ if prev_context_guard.status.is_runnable() { ++ contexts_list[prev_context_guard.prio].push_back(prev_ctx); ++ } else { ++ idle_contexts(token.token()).push_back(prev_ctx); ++ } ++ } ++ return Ok(Some(rt_guard)); ++ } ++ rt_contexts.push_back(rt_ref); ++ } ++ } ++ ++ // PASS 1: SCHED_OTHER — existing DWRR deficit tracking ++ + 'priority: loop { + i = (i + 1) % 40; + total_iters += 1; diff --git a/sources/redbear-0.1.0/patches/P5-scheme-sched-id.patch b/sources/redbear-0.1.0/patches/P5-scheme-sched-id.patch new file mode 100644 index 00000000..5554697b --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-scheme-sched-id.patch @@ -0,0 +1,20 @@ +diff --git a/src/scheme/mod.rs b/src/scheme/mod.rs +index d30272c..9da2b28 100644 +--- a/src/scheme/mod.rs ++++ b/src/scheme/mod.rs +@@ -777,6 +777,7 @@ pub struct CallerCtx { + pub pid: usize, + pub uid: u32, + pub gid: u32, ++ pub groups: alloc::vec::Vec, + } + impl CallerCtx { + pub fn filter_uid_gid(self, euid: u32, egid: u32) -> Self { +@@ -785,6 +786,7 @@ impl CallerCtx { + pid: self.pid, + uid: euid, + gid: egid, ++ groups: self.groups, + } + } else { + self diff --git a/sources/redbear-0.1.0/patches/P5-signal-handler-panic-hardening.patch b/sources/redbear-0.1.0/patches/P5-signal-handler-panic-hardening.patch new file mode 100644 index 00000000..944a0e11 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-signal-handler-panic-hardening.patch @@ -0,0 +1,112 @@ +diff --git a/redox-rt/src/signal.rs b/redox-rt/src/signal.rs +index 022f873..ab96dea 100644 +--- a/redox-rt/src/signal.rs ++++ b/redox-rt/src/signal.rs +@@ -1,4 +1,10 @@ +-use core::{ffi::c_int, ptr::NonNull, sync::atomic::Ordering}; ++use core::{ ++ ffi::c_int, ++ hint::unreachable_unchecked, ++ panic::AssertUnwindSafe, ++ ptr::NonNull, ++ sync::atomic::Ordering, ++}; + + use syscall::{ + CallFlags, EAGAIN, EINTR, EINVAL, ENOMEM, EPERM, Error, RawAction, Result, SenderInfo, +@@ -103,6 +109,47 @@ pub struct SiginfoAbi { + pub si_value: usize, // sigval + } + ++fn invoke_signal_handler(f: AssertUnwindSafe) -> bool { ++ fn do_call(data: *mut u8) { ++ let callback = unsafe { &mut *data.cast::>>() }; ++ if let Some(callback) = callback.take() { ++ callback.0(); ++ } ++ } ++ ++ fn do_catch(_data: *mut u8, _payload: *mut u8) {} ++ ++ let mut callback = Some(f); ++ unsafe { ++ core::intrinsics::catch_unwind( ++ do_call::, ++ (&mut callback as *mut Option>).cast(), ++ do_catch::, ++ ) != 0 ++ } ++} ++ ++#[inline(always)] ++unsafe fn return_ignored_signal( ++ os: &RtTcb, ++ stack: &SigStack, ++ signals_were_disabled: bool, ++) { ++ unsafe { ++ (*os.arch.get()).last_sig_was_restart = true; ++ (*os.arch.get()).last_sigstack = NonNull::new(stack.link); ++ } ++ ++ if !signals_were_disabled { ++ core::sync::atomic::compiler_fence(Ordering::Release); ++ let control_flags = &os.control.control_flags; ++ control_flags.store( ++ control_flags.load(Ordering::Relaxed) & !SigcontrolFlags::INHIBIT_DELIVERY.bits(), ++ Ordering::Relaxed, ++ ); ++ } ++} ++ + #[inline(always)] + unsafe fn inner(stack: &mut SigStack) { + let os = unsafe { &Tcb::current().unwrap().os_specific }; +@@ -168,7 +215,10 @@ unsafe fn inner(stack: &mut SigStack) { + // and reaching this code. If so, we do already know whether the signal is IGNORED *now*, + // and so we should return early ideally without even temporarily touching the signal mask. + SigactionKind::Ignore => { +- panic!("ctl {:#x?} signal {}", os.control, stack.sig_num) ++ unsafe { ++ return_ignored_signal(os, stack, signals_were_disabled); ++ } ++ return; + } + // this case should be treated equally as the one above + // +@@ -183,7 +233,9 @@ unsafe fn inner(stack: &mut SigStack) { + CallFlags::empty(), + &[ProcCall::Exit as u64, u64::from(sig) << 8], + ); +- panic!() ++ // SAFETY: ProcCall::Exit terminates the current process when it succeeds, so reaching ++ // this point would violate the proc manager exit contract. ++ unsafe { unreachable_unchecked() } + } + SigactionKind::Handled { handler } => handler, + }; +@@ -224,15 +276,21 @@ unsafe fn inner(stack: &mut SigStack) { + si_uid: sender_uid as i32, + si_value: stack.sival, + }; +- unsafe { ++ if invoke_signal_handler(AssertUnwindSafe(|| unsafe { + sigaction( + stack.sig_num as c_int, + core::ptr::addr_of!(info).cast(), + stack as *mut SigStack as *mut (), + ) +- }; ++ })) { ++ let _ = syscall::write(2, b"redox-rt: sa_siginfo handler panicked; continuing\n"); ++ } + } else if let Some(handler) = unsafe { handler.handler } { +- handler(stack.sig_num as c_int); ++ if invoke_signal_handler(AssertUnwindSafe(|| { ++ handler(stack.sig_num as c_int); ++ })) { ++ let _ = syscall::write(2, b"redox-rt: sa_handler panicked; continuing\n"); ++ } + } + + // Disable signals while we modify the sigmask again diff --git a/sources/redbear-0.1.0/patches/P5-startup-init-panic-hardening.patch b/sources/redbear-0.1.0/patches/P5-startup-init-panic-hardening.patch new file mode 100644 index 00000000..c887c019 --- /dev/null +++ b/sources/redbear-0.1.0/patches/P5-startup-init-panic-hardening.patch @@ -0,0 +1,101 @@ +diff --git a/src/start.rs b/src/start.rs +--- a/src/start.rs ++++ b/src/start.rs +@@ -1,10 +1,7 @@ + //! Startup code. + + use alloc::{boxed::Box, vec::Vec}; +-use core::{intrinsics, ptr}; +- +-#[cfg(target_os = "redox")] +-use generic_rt::ExpectTlsFree; ++use core::{fmt::Write, intrinsics, panic::AssertUnwindSafe, ptr}; + + use crate::{ + ALLOCATOR, +@@ -143,6 +141,29 @@ fn io_init() { + stdio::stderr = stdio::default_stderr().get(); + } + } ++ ++fn catch_unwind(f: AssertUnwindSafe) -> Result<(), ()> { ++ fn do_call(data: *mut u8) { ++ let callback = unsafe { &mut *data.cast::>>() }; ++ if let Some(callback) = callback.take() { ++ callback.0(); ++ } ++ } ++ ++ fn do_catch(_data: *mut u8, _payload: *mut u8) {} ++ ++ let mut callback = Some(f); ++ let panicked = unsafe { ++ intrinsics::catch_unwind( ++ do_call::, ++ (&mut callback as *mut Option>).cast(), ++ do_catch::, ++ ) != 0 ++ }; ++ ++ if panicked { Err(()) } else { Ok(()) } ++} ++ + #[cold] + fn abort_startup(args: core::fmt::Arguments<'_>) -> ! { + let mut w = platform::FileWriter::new(2); +@@ -164,14 +184,23 @@ pub unsafe extern "C" fn relibc_start_v1( + unsafe { relibc_verify_host() }; + + #[cfg(target_os = "redox")] +- let thr_fd = redox_rt::proc::FdGuard::new( +- unsafe { +- crate::platform::get_auxv_raw(sp.auxv().cast(), redox_rt::auxv_defs::AT_REDOX_THR_FD) +- } +- .expect_notls("no thread fd present"), +- ) +- .to_upper() +- .expect_notls("failed to move thread fd to upper table"); ++ let thr_fd = { ++ let thr_fd = match unsafe { ++ crate::platform::get_auxv_raw(sp.auxv().cast(), redox_rt::auxv_defs::AT_REDOX_THR_FD) ++ } { ++ Some(thr_fd) => thr_fd, ++ None => abort_startup(format_args!( ++ "relibc_start_v1: missing AT_REDOX_THR_FD auxv entry; no thread fd present\n" ++ )), ++ }; ++ ++ match redox_rt::proc::FdGuard::new(thr_fd).to_upper() { ++ Ok(thr_fd) => thr_fd, ++ Err(err) => abort_startup(format_args!( ++ "relibc_start_v1: failed to move thread fd to upper table: {err:?}\n" ++ )), ++ } ++ }; + + // Initialize TLS, if necessary + unsafe { +@@ -237,7 +266,10 @@ pub unsafe extern "C" fn relibc_start_v1( + let mut f = unsafe { &__preinit_array_start } as *const _; + #[allow(clippy::op_ref)] + while f < &raw const __preinit_array_end { +- (unsafe { *f })(); ++ let func = unsafe { *f }; ++ if catch_unwind(AssertUnwindSafe(|| unsafe { (*f)() })).is_err() { ++ log_initializer_panic(".preinit_array", func); ++ } + f = unsafe { f.offset(1) }; + } + } +@@ -247,7 +279,10 @@ pub unsafe extern "C" fn relibc_start_v1( + let mut f = unsafe { &__init_array_start } as *const _; + #[allow(clippy::op_ref)] + while f < &raw const __init_array_end { +- (unsafe { *f })(); ++ let func = unsafe { *f }; ++ if catch_unwind(AssertUnwindSafe(|| unsafe { (*f)() })).is_err() { ++ log_initializer_panic(".init_array", func); ++ } + f = unsafe { f.offset(1) }; + } + } diff --git a/src/bin/repo.rs b/src/bin/repo.rs index 77281693..a5a19036 100644 --- a/src/bin/repo.rs +++ b/src/bin/repo.rs @@ -715,6 +715,20 @@ fn handle_fetch( allow_offline: bool, logger: &PtyOut, ) -> anyhow::Result { + // In release mode, explicit fetch is forbidden. Cook's internal fetch + // (allow_offline=true) is still allowed since it respects COOKBOOK_OFFLINE. + if !allow_offline { + if let Ok(release) = env::var("REDBEAR_RELEASE") { + if !release.is_empty() { + bail!("{}", Error::Other(format!( + "Fetch is disabled in release mode (REDBEAR_RELEASE={}). \ + Sources are immutable. To refresh, run: provision-release.sh", + release + ))); + } + } + } + let source_dir = match config.cook.offline && allow_offline { true => fetch_offline(&recipe, logger), false => fetch(&recipe, !recipe.is_deps, logger), @@ -789,6 +803,17 @@ fn handle_clean( cached = false; } let dir = recipe.dir.join("source"); + if matches!(*command, CliCommand::Unfetch) { + // In release mode, unfetch is forbidden — sources are immutable + if let Ok(release) = std::env::var("REDBEAR_RELEASE") { + if !release.is_empty() { + anyhow::bail!( + "Unfetch is disabled in release mode (REDBEAR_RELEASE={}). Sources are immutable.", + release + ); + } + } + } if dir.exists() && matches!(*command, CliCommand::Unfetch) { if is_local_overlay(&recipe.dir) && !redbear_allow_local_unfetch() { eprintln!( diff --git a/src/cook/fetch.rs b/src/cook/fetch.rs index 0440d2ec..47ae22d9 100644 --- a/src/cook/fetch.rs +++ b/src/cook/fetch.rs @@ -226,13 +226,22 @@ pub fn fetch_offline(recipe: &CookRecipe, logger: &PtyOut) -> Result { offline_check_exists(&source_dir)?; let (head_rev, _) = get_git_head_rev(&source_dir)?; + if let Some(expected_rev) = rev { + if head_rev != *expected_rev { + bail_other_err!( + "source at {} has revision {} but recipe expects {}. \ + Source archives may be corrupted. Restore from release archives.", + source_dir.display(), head_rev, expected_rev + ); + } + } FetchResult::cached(source_dir, head_rev) } Some(SourceRecipe::Tar {