From 6c2643bd9c1d7c36b8a8395ea75eb390f998b1c6 Mon Sep 17 00:00:00 2001 From: Vasilito Date: Thu, 16 Apr 2026 13:52:09 +0100 Subject: [PATCH] Expand linux-kpi wireless scaffolding, consolidate desktop plan, remove historical report Add channel/band/rate/BSS/RX-TX structures to linux-kpi wireless scaffolding (mac80211.rs, wireless.rs, net.rs, C headers), extend redbear-iwlwifi linux_port.c with comprehensive PCIe transport, and create consolidated CONSOLE-TO-KDE-DESKTOP-PLAN.md as the canonical desktop path document. Remove stale INTEGRATION_REPORT.md (1388 lines) in favor of current local/docs/ references. Update AGENTS.md, README, and docs index to point to the new plan. --- .codex | 0 AGENTS.md | 2 +- INTEGRATION_REPORT.md | 1388 ---------- README.md | 1 + docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md | 7 +- docs/README.md | 9 +- local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md | 895 +++++++ local/docs/WIFI-IMPLEMENTATION-PLAN.md | 23 +- .../source/src/c_headers/linux/atomic.h | 25 +- .../source/src/c_headers/linux/dma-mapping.h | 13 +- .../source/src/c_headers/linux/firmware.h | 2 +- .../source/src/c_headers/linux/interrupt.h | 6 +- .../linux-kpi/source/src/c_headers/linux/io.h | 17 +- .../source/src/c_headers/linux/irq.h | 2 +- .../source/src/c_headers/linux/jiffies.h | 2 +- .../source/src/c_headers/linux/mutex.h | 2 +- .../source/src/c_headers/linux/pci.h | 17 +- .../source/src/c_headers/linux/skb.h | 6 + .../source/src/c_headers/linux/skbuff.h | 17 + .../source/src/c_headers/linux/spinlock.h | 2 +- .../source/src/c_headers/linux/timer.h | 4 +- .../source/src/c_headers/linux/wait.h | 6 +- .../source/src/c_headers/net/cfg80211.h | 5 + .../source/src/c_headers/net/mac80211.h | 45 + .../drivers/linux-kpi/source/src/lib.rs | 1 + .../linux-kpi/source/src/rust_impl/dma.rs | 321 ++- .../linux-kpi/source/src/rust_impl/io.rs | 81 +- .../linux-kpi/source/src/rust_impl/list.rs | 197 ++ .../source/src/rust_impl/mac80211.rs | 406 ++- .../linux-kpi/source/src/rust_impl/mod.rs | 5 +- .../linux-kpi/source/src/rust_impl/net.rs | 464 +++- .../linux-kpi/source/src/rust_impl/pci.rs | 193 ++ .../linux-kpi/source/src/rust_impl/sync.rs | 191 +- .../source/src/rust_impl/wireless.rs | 354 ++- .../redbear-iwlwifi/source/src/linux_port.c | 2277 ++++++++++++++--- .../core/relibc/source.pre-preservation-test | 1 + 36 files changed, 5230 insertions(+), 1757 deletions(-) create mode 100644 .codex delete mode 100644 INTEGRATION_REPORT.md create mode 100644 local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md create mode 100644 local/recipes/drivers/linux-kpi/source/src/c_headers/linux/skb.h create mode 100644 local/recipes/drivers/linux-kpi/source/src/rust_impl/list.rs create mode 160000 recipes/core/relibc/source.pre-preservation-test diff --git a/.codex b/.codex new file mode 100644 index 00000000..e69de29b diff --git a/AGENTS.md b/AGENTS.md index 2b6865da..efa6739f 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -277,7 +277,7 @@ All custom work goes in `local/` — see `local/AGENTS.md` for overlay usage. - QEMU used for testing (make qemu). VirtualBox also supported - The `repo` binary (cookbook CLI) may crash with TUI in non-interactive environments — use `CI=1` - No git submodules — external repos managed via recipe source URLs and repo manifests -- File `INTEGRATION_REPORT.md` contains detailed integration status from a previous analysis +- Historical integration report removed (2026-04-16); see `local/docs/DESKTOP-STACK-CURRENT-STATUS.md` for current state ## SUBSYSTEM PRIORITY AND ORDER diff --git a/INTEGRATION_REPORT.md b/INTEGRATION_REPORT.md deleted file mode 100644 index c465aa7a..00000000 --- a/INTEGRATION_REPORT.md +++ /dev/null @@ -1,1388 +0,0 @@ -# Red Bear OS Integration Report: Wayland, KDE Plasma, and Linux Driver Support - -**Date**: April 11, 2026 -**Project**: Red Bear OS Build System (based on Redox OS) -**Status**: Assessment Complete - -> **Status correction (2026-04-14):** This report is a historical assessment snapshot and is no -> longer an accurate statement of current repository status. The repo now contains substantial work -> that this report still describes as missing, including `redox-driver-sys`, `linux-kpi`, -> `firmware-loader`, `redox-drm`, the AMD display path, the Qt6 stack, `config/redbear-kde.toml`, -> and a large `local/recipes/kde/` tree. -> -> **Canonical current-state docs:** use `README.md`, `AGENTS.md`, and -> `docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md` for repository-level current truth. For subsystem -> current-state detail, use the active documents under `local/docs/`. -> -> **WIP interpretation note:** upstream `recipes/wip/...` paths referenced below are part of the -> historical assessment context and should not be read as automatic current Red Bear shipping source -> of truth. Apply the overlay/WIP policy and the WIP migration ledger when interpreting them. - -## Current Snapshot - -| Area | Current repo state | -|---|---| -| ACPI / bare-metal | Complete in-tree | -| Driver infrastructure | Present and compiling in `local/recipes/drivers/` | -| DRM / display | Present and compiling in `local/recipes/gpu/`; hardware validation still pending | -| POSIX/input | Implemented in-tree with remaining validation work | -| Wayland | Partial runtime path | -| KDE | In progress with a mix of true builds and scaffolding | - -Read this file as historical context, not as the canonical current-state document. - ---- - -## Executive Summary - -Red Bear OS is based on Redox OS, a microkernel-based operating system written in Rust with comprehensive documentation on integrating Wayland, KDE Plasma, and Linux drivers. The project has: - -- **Active development**: 21+ Wayland recipes, 19+ KDE WIP recipes -- **Build system**: Fully functional, using Rust-based `repo` tool and Makefiles -- **Documentation**: Extensive, detailed implementation paths already documented -- **Blockers identified**: 7 POSIX gaps in relibc, no GPU acceleration, missing DRM/KMS scheme -- **Estimated timelines**: 6-10 months to KDE Plasma, 6-8 months to Linux drivers - ---- - -## 1. Compilation Status - -### Build System Analysis - -**Build System**: Rust-based `repo` tool with Makefile orchestration - -**Key Directories**: -- `config/` - Build configurations (minimal, desktop, wayland, x11) -- `recipes/` - Package recipes (9.6GB total, 60+ redox.patch files) -- `mk/` - Makefile infrastructure (config.mk, depends.mk, podman.mk, etc.) -- `src/` - Build system source (cookbook tool in Rust) -- `build/` - Output directory (build/{ARCH}/{CONFIG}/) - -**Available Configs**: -- `minimal` - Bare minimum bootable system -- `server` - Server-oriented (no GUI) -- `desktop-minimal` - Orbital + basic GUI -- `desktop` - COSMIC apps + installer -- `wayland` - Wayland compositor (experimental) -- `x11` - X.org + MATE desktop -- `demo` - Demo apps - -### Build Test Results - -**Prerequisites Status**: -- ✅ Rust toolchain installed (via rustup) -- ✅ Cargo available -- ✅ Make installed -- ✅ QEMU available -- ✅ Prebuilt toolchain exists: `prefix/x86_64-unknown-redox/` -- ✅ Build system binary compiled: `target/release/repo` - -**Build Attempt Results**: -``` -Kernel Source Fetch: ✅ SUCCESS -- Cloned 21452 objects from gitlab.redox-os.org -- Source located at: recipes/core/kernel/source/ - -Build Attempt: ⚠️ PARTIAL -- FUSE filesystem issue encountered (ioctl error 25) -- Kernel source successfully downloaded -- Build system infrastructure validated -``` - -**Issue Identified**: FUSE mount-related error during build, likely due to stale mounts or filesystem permissions. This is a build environment issue, not a project issue. The build system itself is functional. - ---- - -## 2. Wayland Integration: Concrete Path - -### Current State (Experimental/WIP) - -**Existing Components**: -- `config/wayland.toml` - Wayland configuration (21 packages) -- `recipes/wip/wayland/` - 21 Wayland packages: - - `libwayland` (1.24.0) - Patched with redox.patch - - `cosmic-comp` - Partial working, no keyboard input - - `smallvil` (Smithay) - Basic compositor running - - `wlroots` - Not compiled/tested - - `sway` - Not compiled/tested - - `hyprland` - Not compiled/tested - - `niri` - Needs Smithay port - - `xwayland` - Partially patched - - Wayland protocols, xkbcommon, etc. - -**Blockers Identified** (from docs/03-WAYLAND-ON-REDOX.md): - -### 2.1 POSIX Gaps in relibc (CRITICAL BLOCKER) - -**7 Missing APIs** (all stubbed in libwayland/redox.patch): - -| API | Used By | Effort | File Location | -|-----|----------|---------|--------------| -| `signalfd`/`signalfd4` | libwayland event loop | Medium | `relibc/src/header/signal/mod.rs` | -| `timerfd_create/settime/gettime` | libwayland timers | Medium | `relibc/src/header/sys_timerfd/` (NEW) | -| `eventfd`/`eventfd_read/write` | libwayland server | Low | `relibc/src/header/sys_eventfd/` (NEW) | -| `F_DUPFD_CLOEXEC` | libwayland fd management | Low | `relibc/src/header/fcntl/mod.rs` | -| `MSG_CMSG_CLOEXEC` | libwayland socket recv | Low | `relibc/src/header/sys_socket/mod.rs` | -| `MSG_NOSIGNAL` | libwayland connection | Low | `relibc/src/header/sys_socket/mod.rs` | -| `open_memstream` | libdrm, libwayland | Low | `relibc/src/header/stdio/src.rs` | - -**Total Estimated Effort**: ~870 lines of Rust code (1-2 weeks) - -### 2.2 Missing Input Stack - -**Components Needed**: -1. **evdev daemon** (`evdevd`) - Translate Redox input schemes to `/dev/input/eventX` - - Location: `recipes/core/evdevd/` (NEW) - - Implementation: ~500 lines of Rust - - Effort: 4-6 weeks - -2. **udev shim** - Device enumeration and hotplug - - Location: `recipes/wip/wayland/udev-shim/` (NEW) - - Implementation: ~500 lines of Rust - - Effort: 2-3 weeks - -3. **libinput port** - Input abstraction layer - - Location: `recipes/wip/wayland/libinput/` (NEW) - - Effort: 3-4 weeks - -**Total Input Stack Effort**: 9-13 weeks - -### 2.3 Missing DRM/KMS Scheme - -**Components Needed**: -1. **DRM daemon** (`drmd`) - Register `scheme:drm/card0` - - Location: `recipes/core/drmd/` (NEW) - - Structure: - ``` - src/ - ├── main.rs - daemon entry, scheme registration - ├── scheme.rs - "drm" scheme handler - ├── kms/ - KMS object management - │ ├── crtc.rs - │ ├── connector.rs - │ ├── encoder.rs - │ ├── plane.rs - │ └── framebuffer.rs - ├── gem.rs - GEM buffer management - ├── dmabuf.rs - DMA-BUF export/import - └── drivers/ - ├── mod.rs - driver trait - └── intel.rs - Intel GPU driver (modesetting) - ``` - - Effort: 8-12 weeks - -2. **Intel GPU driver** (native Rust modesetting) - - Location: `redox-drm/src/drivers/intel/` - - Documentation: Intel GPU PRM - - Effort: 6-8 weeks (part of drmd) - -3. **Mesa hardware backend** - - Location: Mesa winsys for Redox DRM (NEW) - - Effort: 4-6 weeks - -**Total DRM/KMS Effort**: 12-16 weeks - -### 2.4 Wayland Compositor Path - -**Recommended: Smithay/smallvil first, then KWin** - -**Why Smithay First**: -- Pure Rust - no C++ toolchain issues -- Already has Redox branch -- Pluggable input/DRM/EGL backends -- Gets working compositor months before KWin - -**Implementation Steps**: - -**Phase 1: Smithay Redox Backends** (4-6 weeks) - -```rust -// smithay/src/backend/input/redox.rs (NEW) -pub struct RedoxInputBackend { - devices: Vec, -} - -impl InputBackend for RedoxInputBackend { - fn dispatch(&mut self) -> Vec { - // Read from /dev/input/eventX via evdevd - // Translate to Smithay's InternalEvent - } -} -``` - -```rust -// smithay/src/backend/drm/redox.rs (NEW) -pub struct RedoxDrmBackend { - drm_fd: File, // opened from /scheme/drm/card0 -} - -impl DrmBackend for RedoxDrmBackend { - fn create_surface(&self, size: Size) -> Surface { - // Create framebuffer via DRM GEM - // Set KMS mode via scheme:drm - } - - fn page_flip(&self, surface: &Surface) -> Result { - // DRM page flip via scheme - } -} -``` - -```rust -// smithay/src/backend/egl/redox.rs (NEW) -pub struct RedoxEglDisplay { - // Mesa EGL display integration -} -``` - -**Phase 2: smallvil Recipe** (1-2 weeks) - -Modify `recipes/wip/wayland/smallvil/recipe.toml`: -```toml -[source] -git = "https://github.com/jackpot51/smithay" -branch = "redox" - -[build] -template = "cargo" -dependencies = [ - "libffi", - "libwayland", - "libxkbcommon", - "mesa", # for EGL - "libdrm", # for DRM backend - "evdevd", # for input - "seatd", # for session management -] -cargopackages = ["smallvil"] -``` - -**Phase 3: Verification** (1-2 weeks) - -1. `smallvil` launches with DRM backend - takes over display -2. Keyboard and mouse work via evdevd -3. `libcosmic-wayland_application` renders a window on compositor -4. Screenshot shows window - -**Phase 4: Enable Other Compositors** - -1. `cosmic-comp`: Uncomment libinput dependency, rebuild -2. `wlroots`: Build with libdrm + libinput + GBM -3. `sway`: Should work once wlroots builds -4. `KWin`: See Section 3 - -### 2.5 Wayland Implementation Timeline - -| Phase | Duration | Milestone | -|--------|----------|-----------| -| POSIX gaps (relibc) | 1-2 weeks | libwayland builds without patches | -| Input stack (evdevd + udev + libinput) | 4-6 weeks | libinput works | -| DRM/KMS (drmd + Intel driver) | 8-12 weeks | libdrm works, modesetting functional | -| Smithay backends + smallvil | 4-6 weeks | Working Wayland compositor | -| **Total to Wayland Compositor** | **~26 weeks (6 months)** | Functional Wayland on Red Bear OS | - -**Parallel Execution**: Input stack (4-6 weeks) can run in parallel with DRM/KMS (8-12 weeks), reducing total to **~20-24 weeks (5-6 months)** with 2 developers. - ---- - -## 3. KDE Plasma Integration: Concrete Path - -### Prerequisites (MUST be complete first) - -From docs/05-KDE-PLASMA-ON-REDOX.md: -- ✅ relibc POSIX gaps fixed (from Wayland Phase 1) -- ✅ evdevd + libinput working (from Wayland Phase 2) -- ✅ DRM/KMS scheme working (from Wayland Phase 3) -- ✅ Wayland compositor running (from Wayland Phase 4) -- ✅ Mesa EGL + software OpenGL (already ported) - -### Phase KDE-A: Qt Foundation (8-12 weeks) - -#### Step 1: Port `qtbase` (6-8 weeks) - -**Create recipe**: `recipes/wip/qt/qtbase/recipe.toml` - -```toml -[source] -tar = "https://download.qt.io/official_releases/qt/6.8/6.8.2/submodules/qtbase-everywhere-src-6.8.2.tar.xz" -patches = ["redox.patch"] - -[build] -template = "custom" -dependencies = [ - "libwayland", - "mesa", - "libdrm", - "libxkbcommon", - "zlib", - "openssl1", - "glib", - "pcre2", - "expat", - "fontconfig", - "freetype2", -] - -script = """ -DYNAMIC_INIT - -mkdir -p build && cd build - -cmake .. \ - -DCMAKE_INSTALL_PREFIX=/usr \ - -DCMAKE_BUILD_TYPE=Release \ - -DQT_BUILD_EXAMPLES=OFF \ - -DQT_BUILD_TESTS=OFF \ - -DFEATURE_wayland=ON \ - -DFEATURE_wayland_client=ON \ - -DFEATURE_xcb=OFF \ - -DFEATURE_xlib=OFF \ - -DFEATURE_opengl=ON \ - -DFEATURE_openssl=ON \ - -DFEATURE_dbus=ON \ - -DFEATURE_system_pcre2=ON \ - -DFEATURE_system_zlib=ON \ - -DINPUT_opengl=desktop \ - -DQT_QPA_PLATFORMS=wayland \ - -DQT_FEATURE_vulkan=OFF - -cmake --build . -j${COOKBOOK_MAKE_JOBS} -cmake --install . --prefix ${COOKBOOK_STAGE}/usr -""" -``` - -**What `redox.patch` for qtbase needs** (~500-800 lines): - -1. Platform detection: - ``` - qtbase/src/corelib/global/qsystemdetection.h — add Redox detection - qtbase/src/corelib/io/qfilesystemengine_unix.cpp — Redox path handling - ``` - -2. Shared memory: - ``` - qtbase/src/corelib/kernel/qsharedmemory.cpp — map to Redox shm scheme - ``` - -3. Process handling: - ``` - qtbase/src/corelib/io/qprocess_unix.cpp — already works (relibc POSIX) - ``` - -4. Network: - ``` - qtbase/src/network/ — should compile with relibc sockets - ``` - -#### Step 2: Port `qtwayland` (1-2 weeks) - -```toml -[source] -tar = "https://download.qt.io/official_releases/qt/6.8/6.8.2/submodules/qtwayland-everywhere-src-6.8.2.tar.xz" - -[build] -template = "custom" -dependencies = ["qtbase", "libwayland", "wayland-protocols"] - -script = """ -DYNAMIC_INIT -mkdir -p build && cd build -cmake .. \ - -DCMAKE_PREFIX_PATH=${COOKBOOK_SYSROOT}/usr \ - -DCMAKE_INSTALL_PREFIX=/usr \ - -DQT_BUILD_TESTS=OFF -cmake --build . -j${COOKBOOK_MAKE_JOBS} -cmake --install . --prefix ${COOKBOOK_STAGE}/usr -""" -``` - -#### Step 3: Port `qtdeclarative` (QML) (2-3 weeks) - -```toml -[source] -tar = "https://download.qt.io/official_releases/qt/6.8/6.8.2/submodules/qtdeclarative-everywhere-src-6.8.2.tar.xz" - -[build] -template = "custom" -dependencies = ["qtbase"] - -script = """ -# Same cmake pattern as qtwayland -""" -``` - -#### Step 4: Verification (1-2 weeks) - -Build and run a simple Qt Wayland app: -```cpp -#include -#include -int main(int argc, char *argv[]) { - QApplication app(argc, argv); - QLabel label("Hello from Qt on Redox!"); - label.show(); - return app.exec(); -} -``` - -**Milestone**: Window with "Hello from Qt on Redox!" appears on Wayland compositor. - -### Phase KDE-B: KDE Frameworks (8-12 weeks) - -#### KDE Frameworks Tier 1 (2-3 weeks) - -| Framework | Purpose | Estimated Patches | -|-----------|---------|------------------| -| `extra-cmake-modules` | CMake modules | None — pure CMake | -| `kcoreaddons` | Core utilities | ~50 lines (process detection) | -| `kconfig` | Configuration | ~30 lines (filesystem paths) | -| `kwidgetsaddons` | Extra Qt widgets | None — pure Qt | -| `kitemmodels` | Model/view classes | None — pure Qt | -| `kitemviews` | Item view classes | None — pure Qt | -| `kcodecs` | String encoding | None — pure Qt | -| `kguiaddons` | GUI utilities | None — pure Qt | - -**Recipe Pattern** (same for all Tier 1): -```toml -[source] -tar = "https://download.kde.org/stable/frameworks/6.10/kcoreaddons-6.10.0.tar.xz" -patches = ["redox.patch"] - -[build] -template = "custom" -dependencies = ["qtbase", "extra-cmake-modules"] - -script = """ -DYNAMIC_INIT -mkdir -p build && cd build -cmake .. \ - -DCMAKE_PREFIX_PATH=${COOKBOOK_SYSROOT}/usr \ - -DCMAKE_INSTALL_PREFIX=/usr \ - -DBUILD_TESTING=OFF \ - -DBUILD_QCH=OFF -cmake --build . -j${COOKBOOK_MAKE_JOBS} -cmake --install . --prefix ${COOKBOOK_STAGE}/usr -""" -``` - -#### KDE Frameworks Tier 2 (2-3 weeks) - -| Framework | Dependencies | Notes | -|-----------|--------------|-------| -| `ki18n` | `kcoreaddons`, gettext | Internationalization | -| `kauth` | `kcoreaddons` | PolicyKit stub needed | -| `kwindowsystem` | `qtbase` | Window management — needs Wayland backend | -| `kcrash` | `kcoreaddons` | Crash handler — may need signal adjustments | -| `karchive` | `qtbase`, zlib | Archive handling — should port cleanly | -| `kiconthemes` | `kwidgetsaddons`, `karchive` | Icon loading | - -#### KDE Frameworks Tier 3 (3-4 weeks) - Plasma essentials - -| Framework | Purpose | Key for Plasma? | -|-----------|---------|------------------| -| `kio` | File I/O abstraction | **Yes** — file dialogs, I/O slaves | -| `kservice` | Plugin/service management | **Yes** — app discovery | -| `kxmlgui` | GUI framework | **Yes** — menus, toolbars | -| `plasma-framework` | Plasma applets/containments | **Yes** — desktop shell | -| `knotifications` | Desktop notifications | **Yes** — notification system | -| `kpackage` | Package/asset management | **Yes** — Plasma packages | -| `kconfigwidgets` | Configuration widgets | **Yes** — settings UI | - -**Total frameworks needed for minimal Plasma**: ~25 - -**Estimated total patch effort for all frameworks**: ~1500-2000 lines - -### Phase KDE-C: Plasma Desktop (6-8 weeks) - -#### Step 1: Port KWin (4-6 weeks) - -```toml -[source] -tar = "https://download.kde.org/stable/plasma/6.3.4/kwin-6.3.4.tar.xz" -patches = ["redox.patch"] - -[build] -template = "custom" -dependencies = [ - "qtbase", "qtwayland", "qtdeclarative", - "kcoreaddons", "kconfig", "kwindowsystem", - "knotifications", "kxmlgui", "plasma-framework", - "libwayland", "wayland-protocols", - "mesa", "libdrm", "libinput", "seatd", - "libxkbcommon", -] - -script = """ -DYNAMIC_INIT -mkdir -p build && cd build -cmake .. \ - -DCMAKE_PREFIX_PATH=${COOKBOOK_SYSROOT}/usr \ - -DCMAKE_INSTALL_PREFIX=/usr \ - -DBUILD_TESTING=OFF \ - -DKWIN_BUILD_SCREENLOCKING=OFF \ - -DKWIN_BUILD_TABBOX=OFF \ - -DKWIN_BUILD_EFFECTS=ON -cmake --build . -j${COOKBOOK_MAKE_JOBS} -cmake --install . --prefix ${COOKBOOK_STAGE}/usr -""" -``` - -**What `redox.patch` for KWin needs** (~1000-1500 lines): - -1. DRM backend: - ``` - src/backends/drm/drm_backend.cpp — open DRM scheme instead of device node - src/backends/drm/drm_output.cpp — use scheme ioctl equivalents - ``` - -2. libinput backend: Should work via evdevd - ``` - src/backends/libinput/connection.cpp — may need path adjustments - ``` - -3. EGL/OpenGL: - ``` - src/libkwineglbackend.cpp — Mesa EGL should work (already ported) - ``` - -4. Session management: KWin expects logind, need stub: - ``` - src/session.h/cpp — stub LogindIntegration, use seatd instead - ``` - -5. udev: - ``` - src/udev.h/cpp — redirect to our udev-shim - ``` - -#### Step 2: Port `plasma-workspace` (2-3 weeks) - -```toml -[source] -tar = "https://download.kde.org/stable/plasma/6.3.4/plasma-workspace-6.3.4.tar.xz" - -[build] -template = "custom" -dependencies = [ - "kwin", "plasma-framework", "kio", "kservice", - "knotifications", "kpackage", "kconfigwidgets", - "qtbase", "qtwayland", "qtdeclarative", - "dbus", -] -``` - -**Key component**: `plasmashell` — desktop shell (panels, desktop containment, applet loader). Depends heavily on QML (qtdeclarative). - -#### Step 3: Create `config/kde.toml` - -```toml -include = ["desktop.toml"] - -[general] -filesystem_size = 4096 - -[packages] -# Qt -qtbase = {} -qtwayland = {} -qtdeclarative = {} -qtsvg = {} -# KDE Frameworks (minimal set) -extra-cmake-modules = {} -kcoreaddons = {} -kconfig = {} -kwidgetsaddons = {} -ki18n = {} -kwindowsystem = {} -kio = {} -kservice = {} -kxmlgui = {} -knotifications = {} -kpackage = {} -plasma-framework = {} -kconfigwidgets = {} -# KDE Plasma -kwin = {} -plasma-workspace = {} -plasma-desktop = {} -kde-cli-tools = {} -# Support -dbus = {} -mesa = {} -libdrm = {} -libinput = {} -seatd = {} -evdevd = {} -drmd = {} - -# Override init to launch KDE session -[[files]] -path = "/usr/lib/init.d/20_orbital" -data = """ -requires_weak 10_net -notify audiod -nowait VT=3 orbital orbital-kde -""" - -[[files]] -path = "/usr/bin/orbital-kde" -mode = 0o755 -data = """ -#!/usr/bin/env bash -set -ex - -export DISPLAY="" -export WAYLAND_DISPLAY=wayland-0 -export XDG_RUNTIME_DIR=/tmp/run/user/0 -export XDG_SESSION_TYPE=wayland -export KDE_FULL_SESSION=true -export XDG_CURRENT_DESKTOP=KDE - -mkdir -p /tmp/run/user/0 - -# Start D-Bus -dbus-daemon --system & - -# Start D-Bus session -eval $(dbus-launch --sh-syntax) - -# Start KWin (Wayland compositor + window manager) -kwin_wayland --replace & - -# Start Plasma Shell -sleep 2 -plasmashell & -""" -``` - -### System Integration Points - -#### D-Bus (Already Working) -D-Bus is ported and working in X11 config. KDE uses D-Bus extensively. - -#### Audio: PulseAudio/PipeWire Shim Needed -KDE expects PulseAudio or PipeWire. Redox has `scheme:audio`. - -**Options**: -- A: Port PipeWire (large effort) -- B: Write PulseAudio compatibility shim (medium effort) -- C: Use KDE without audio initially (skip for now) - -#### Service Management: D-Bus Service Files -KDE services register via D-Bus `.service` files. Need translation layer that: -1. Reads `/usr/share/dbus-1/services/*.service` files -2. Maps to Redox init scripts -3. Responds to D-Bus StartServiceByName calls - -#### Network: NetworkManager Integration -KDE uses NetworkManager. Redox has `smolnetd`. - -**Options**: -- A: Port NetworkManager (massive effort, needs systemd) -- B: Write NetworkManager D-Bus shim (medium effort) -- C: Skip network config UI initially - -### KDE Implementation Timeline - -| Phase | Duration | Milestone | -|--------|----------|-----------| -| Qt Foundation (qtbase, qtwayland, qtdeclarative) | 8-12 weeks | Qt app shows window | -| KDE Frameworks (25 frameworks) | 8-12 weeks | KDE app (Kate) runs | -| KWin + Plasma Shell | 6-8 weeks | KDE desktop visible | -| KDE Apps (Dolphin, Konsole, Kate) | 4-6 weeks | Full KDE ecosystem | -| **Total** | **~38 weeks (9-10 months)** | Full KDE Plasma session | - -**Critical Insight**: Qt Foundation is highest-risk phase. If Qt compilation hits unexpected relibc gaps, entire timeline shifts. - ---- - -## 4. Linux Driver Compatibility: Concrete Path - -### Why This Is Needed - -Writing native Rust GPU drivers for every vendor is years of work. Linux has mature, vendor-supported GPU drivers. A compatibility layer lets us port them with `#ifdef __redox__` patches instead of full rewrites. - -**Target Drivers** (priority order): -1. **i915** (Intel) - Best documented, most relevant for laptops -2. **amdgpu** (AMD) - Large market share, good open-source driver -3. **nouveau / nvk** (NVIDIA) - Community driver, limited performance -4. **Skip**: NVIDIA proprietary (binary-only, impossible without Linux kernel) - -### Architecture - -**Two-Mode Design**: - -**Mode A: C Driver Port** - Compile Linux C driver against our headers, run as userspace daemon -**Mode B: Rust Wrapper** - Rust crate provides idiomatic API, internally calls compat layer - -Both modes share: `redox-driver-sys` - -``` -┌────────────────────────────────────────────────────────────┐ -│ Mode A: C Driver Port │ -│ Linux C driver (i915.ko source) │ -│ compiled with -D__redox__ against linux-kpi headers │ -├────────────────────────────────────────────────────────────┤ -│ Mode B: Rust Wrapper │ -│ Rust crate (redox-intel-gpu) using compat APIs │ -├────────────────────────────────────────────────────────────┤ -│ linux-kpi (C header compatibility) │ -│ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ -│ │ linux/ │ │ linux/ │ │ linux/ │ │ -│ │ slab.h │ │ mutex.h │ │ pci.h │ │ -│ └──────────┘ └──────────┘ └──────────┘ │ -├────────────────────────────────────────────────────────────┤ -│ redox-driver-sys (Rust crate) │ -│ Provides: memory mapping, IRQ, DMA, PCI, DRM scheme │ -├────────────────────────────────────────────────────────────┤ -│ Red Bear OS │ -│ scheme:memory scheme:irq scheme:pci scheme:drm │ -└────────────────────────────────────────────────────────────┘ -``` - -### Crate 1: `redox-driver-sys` (2-3 weeks) - -**Repository**: New crate in Redox ecosystem -**Purpose**: Safe Rust wrappers around Redox's scheme-based hardware access - -``` -redox-driver-sys/ -├── Cargo.toml -├── src/ -│ ├── lib.rs — Re-exports -│ ├── memory.rs — Physical memory mapping (scheme:memory) -│ ├── irq.rs — Interrupt handling (scheme:irq) -│ ├── pci.rs — PCI device access (scheme:pci / pcid) -│ ├── io.rs — Port I/O (iopl syscall) -│ └── dma.rs — DMA buffer management -``` - -**Key Implementations**: - -```rust -// src/memory.rs -pub fn map_physical(phys: u64, size: usize, flags: MapFlags) -> Result<*mut u8> { - let fd = File::open("scheme:memory/physical")?; - let ptr = syscall::fmap(fd.as_raw_fd(), &Map { - offset: phys, - size, - flags: flags.to_syscall_flags(), - })?; - Ok(ptr as *mut u8) -} - -pub fn unmap_physical(ptr: *mut u8, size: usize) -> Result<()> { - syscall::funmap(ptr as usize, size)?; - Ok(()) -} -``` - -```rust -// src/irq.rs -pub struct IrqHandle { fd: File } - -impl IrqHandle { - pub fn request(irq_num: u32) -> Result { - let fd = File::open(&format!("scheme:irq/{}", irq_num))?; - Ok(Self { fd }) - } - - pub fn wait(&mut self) -> Result<()> { - let mut buf = [0u8; 8]; - self.fd.read(&mut buf)?; - Ok(()) - } -} -``` - -```rust -// src/pci.rs -pub struct PciDevice { - bus: u8, dev: u8, func: u8, - vendor_id: u16, device_id: u16, - bars: [u64; 6], - bar_sizes: [usize; 6], - irq: u32, -} - -pub fn enumerate() -> Result> { - // Read from pcid-spawner or scheme:pci - // Parse PCI configuration space - // Filter to GPU devices (class 0x030000-0x0302xx) -} -``` - -### Crate 2: `linux-kpi` (3-4 weeks) - -**Repository**: New crate. Installs C headers for use by Linux C drivers. -**Purpose**: Provides `linux/*.h` headers that translate Linux kernel APIs to `redox-driver-sys` - -``` -linux-kpi/ -├── Cargo.toml -├── src/ -│ ├── lib.rs — Rust API for Rust drivers -│ ├── c_headers/ — C headers for C driver ports -│ │ ├── linux/ -│ │ │ ├── slab.h → malloc/kfree (redox-driver-sys::memory) -│ │ │ ├── mutex.h → pthread mutex (redox-driver-sys::sync) -│ │ │ ├── spinlock.h → atomic lock -│ │ │ ├── pci.h → redox-driver-sys::pci -│ │ │ ├── io.h → port I/O (iopl) -│ │ │ ├── irq.h → redox-driver-sys::irq -│ │ │ ├── device.h → struct device wrapper -│ │ │ ├── kobject.h → reference-counted object -│ │ │ ├── workqueue.h → thread pool -│ │ │ ├── idr.h → ID allocation -│ │ │ └── dma-mapping.h → bus DMA (redox-driver-sys::dma) -│ │ ├── drm/ -│ │ │ ├── drm.h → DRM core types -│ │ │ ├── drm_crtc.h → KMS types -│ │ │ ├── drm_gem.h → GEM buffer objects -│ │ │ └── drm_ioctl.h → DRM ioctl definitions -│ │ └── asm/ -│ │ └── io.h → inl/outl port I/O -│ └── rust_impl/ — Rust implementations backing C headers -│ ├── memory.rs — kzalloc, kmalloc, kfree -│ ├── sync.rs — mutex, spinlock, completion -│ ├── workqueue.rs — work queue thread pool -│ ├── pci.rs — pci_register_driver, etc. -│ └── drm_shim.rs — DRM core shim (connects to scheme:drm) -``` - -**Example C Header**: - -```c -// c_headers/linux/slab.h -#ifndef _LINUX_SLAB_H -#define _LINUX_SLAB_H - -#include - -#define GFP_KERNEL 0 -#define GFP_ATOMIC 1 -#define GFP_DMA32 2 - -void *kmalloc(size_t size, unsigned int flags); -void *kzalloc(size_t size, unsigned int flags); -void kfree(const void *ptr); - -#endif -``` - -**Corresponding Rust Implementation**: - -```rust -// src/rust_impl/memory.rs -use std::alloc::{alloc, alloc_zeroed, dealloc, Layout}; - -#[no_mangle] -pub extern "C" fn kmalloc(size: usize, _flags: u32) -> *mut u8 { - unsafe { - let layout = Layout::from_size_align(size, 64).unwrap(); - alloc(layout) - } -} - -#[no_mangle] -pub extern "C" fn kzalloc(size: usize, _flags: u32) -> *mut u8 { - unsafe { - let layout = Layout::from_size_align(size, 64).unwrap(); - alloc_zeroed(layout) - } -} - -#[no_mangle] -pub extern "C" fn kfree(ptr: *const u8) { - if !ptr.is_null() { - unsafe { - // Linux kfree doesn't take size. Need size-tracking allocator. - // Use HashMap for tracking. - } - } -} -``` - -### Crate 3: `redox-drm` (12-16 weeks, overlaps with Wayland DRM) - -**Repository**: Part of Redox base repo or new crate -**Purpose**: The daemon that registers `scheme:drm` and talks to GPU hardware - -``` -redox-drm/ -├── Cargo.toml -├── src/ -│ ├── main.rs — Daemon entry, scheme registration -│ ├── scheme.rs — "drm" scheme handler (processes ioctls) -│ ├── kms/ -│ │ ├── mod.rs — KMS core -│ │ ├── crtc.rs — CRTC state machine -│ │ ├── connector.rs — Hotplug detection, EDID reading -│ │ ├── encoder.rs — Encoder management -│ │ ├── plane.rs — Primary/cursor planes -│ │ └── framebuffer.rs — Framebuffer allocation -│ ├── gem.rs — GEM buffer object management -│ ├── dmabuf.rs — DMA-BUF export/import via FD passing -│ └── drivers/ -│ ├── mod.rs — trait GpuDriver -│ └── intel/ -│ ├── mod.rs — Intel driver entry -│ ├── gtt.rs — Graphics Translation Table -│ ├── display.rs — Display pipe configuration -│ └── ring.rs — Command ring buffer (for acceleration later) -``` - -**Core DRM Scheme Protocol**: - -```rust -// src/scheme.rs -enum DrmRequest { - // Core - GetVersion, - GetCap { capability: u64 }, - - // KMS - ModeGetResources, - ModeGetConnector { connector_id: u32 }, - ModeGetEncoder { encoder_id: u32 }, - ModeGetCrtc { crtc_id: u32 }, - ModeSetCrtc { crtc_id: u32, fb_id: u32, x: u32, y: u32, connectors: Vec, mode: ModeModeInfo }, - ModePageFlip { crtc_id: u32, fb_id: u32, flags: u32, user_data: u64 }, - ModeAtomicCommit { flags: u32, props: Vec }, - - // GEM - GemCreate { size: u64 }, - GemClose { handle: u32 }, - GemMmap { handle: u32 }, - - // Prime/DMA-BUF - PrimeHandleToFd { handle: u32, flags: u32 }, - PrimeFdToHandle { fd: i32 }, -} -``` - -**Intel Driver** (native Rust modesetting): - -```rust -// src/drivers/intel.rs -pub struct IntelDriver { - mmio: *mut u8, // Memory-mapped I/O registers (via scheme:memory) - gtt_size: usize, // Graphics Translation Table size - framebuffer: PhysAddr, // Current scanout buffer -} - -impl IntelDriver { - pub fn new(pci_dev: &PciDev) -> Result { - // Map MMIO registers via scheme:memory/physical - let mmio = map_physical_memory(pci_dev.bar[0], pci_dev.bar_size[0])?; - - // Initialize GTT and display pipeline - Ok(Self { mmio, gtt_size, framebuffer }) - } - - pub fn modeset(&self, mode: &ModeInfo) -> Result<()> { - // 1. Allocate framebuffer in GTT - // 2. Configure pipe (timing, PLL) - // 3. Configure transcoder - // 4. Configure port (HDMI/DP) - // 5. Enable scanout from new framebuffer - Ok(()) - } - - pub fn page_flip(&self, crtc: u32, fb: PhysAddr) -> Result<()> { - // 1. Update GTT entry to point to new framebuffer - // 2. Trigger page flip on next VBlank - // 3. VBlank interrupt signals completion (via scheme:irq) - Ok(()) - } -} -``` - -### Concrete Porting Example: Intel i915 Driver (3-4 weeks) - -#### Step 1: Extract i915 from Linux kernel - -```bash -# Clone Linux kernel -git clone --depth 1 https://github.com/torvalds/linux.git -# Extract relevant directories -tar cf intel-driver.tar linux/drivers/gpu/drm/i915/ \ - linux/include/drm/ \ - linux/include/linux/ \ - linux/arch/x86/include/ -``` - -#### Step 2: Create recipe - -```toml -# recipes/wip/drivers/i915/recipe.toml -[source] -tar = "intel-driver.tar" - -[build] -template = "custom" -dependencies = [ - "redox-driver-sys", - "linux-kpi", - "redox-drm", -] - -script = """ -DYNAMIC_INIT - -# Build i915 driver as a shared library -# linked against linux-kpi and redox-driver-sys -export CFLAGS="-I${COOKBOOK_SYSROOT}/include/linux-kpi -D__redox__" -export LDFLAGS="-lredox_driver_sys -llinux_kpi -lredox_drm" - -# Compile driver source files -find drivers/gpu/drm/i915/ -name '*.c' | while read src; do - x86_64-unknown-redox-gcc -c $CFLAGS "$src" -o "${src%.c}.o" || true -done - -# Link into a single shared library -x86_64-unknown-redox-gcc -shared -o i915_redox.so \ - $(find drivers/gpu/drm/i915/ -name '*.o') \ - $LDFLAGS - -mkdir -p ${COOKBOOK_STAGE}/usr/lib/redox/drivers -cp i915_redox.so ${COOKBOOK_STAGE}/usr/lib/redox/drivers/ -""" -``` - -#### Step 3: Minimal patches needed - -For i915 on Redox, typical `#ifdef __redox__` changes: - -```c -// 1. Replace Linux module init with daemon main() -#ifdef __redox__ -int main(int argc, char **argv) { - return i915_driver_init(); -} -#else -module_init(i915_init); -module_exit(i915_exit); -#endif - -// 2. Replace kernel memory allocation -#ifdef __redox__ -#include // Our compat header -#else -#include // Real Linux -#endif - -// 3. Replace PCI access -#ifdef __redox__ -struct pci_dev *pdev = redox_pci_find_device(PCI_VENDOR_ID_INTEL, device_id); -#else -pdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); -#endif - -// 4. Replace MMIO mapping -#ifdef __redox__ -void __iomem *regs = redox_ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); -#else -void __iomem *regs = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); -#endif -``` - -### Concrete Porting Example: AMD amdgpu Driver (6-8 weeks) - -AMD's driver is larger and more complex. Key challenges: - -#### 1. Firmware Loading - -Need to implement: -``` -scheme:firmware/amdgpu/ — firmware blob storage -request_firmware() — compat function that reads from scheme -``` - -#### 2. TTM Memory Manager - -Port TTM to use Redox's memory scheme: -```rust -// TTM → Redox mapping: -// ttm_tt → allocated pages via scheme:memory -// ttm_buffer_object → GemHandle in scheme:drm -// ttm_bo_move → page table updates via GPU MMIO -``` - -#### 3. Display Core (DC) - -AMD's display code is ~100K lines. Need to: -- Port DCN (Display Core Next) hardware programming -- Adapt to Redox's DRM scheme instead of Linux kernel DRM -- Keep most code unchanged, just redirect memory/register access - -#### 4. Power Management - -amdgpu uses Linux power management APIs. Need stubs: -```c -#ifdef __redox__ -// No power management on Redox yet — always-on -#define pm_runtime_get_sync(dev) 0 -#define pm_runtime_put_autosuspend(dev) 0 -#define pm_runtime_allow(dev) 0 -#endif -``` - -**Estimated patches for amdgpu**: ~2000-3000 lines of `#ifdef __redox__` - -### Linux Driver Implementation Timeline - -| Phase | Component | Effort | Delivers | -|-------|-----------|---------|----------| -| 1 | `redox-driver-sys` crate | 2-3 weeks | Memory, IRQ, PCI, I/O primitives | -| 2 | Intel native driver (in `redox-drm`) | 6-8 weeks | First working GPU driver, modesetting | -| 3 | `linux-kpi` C headers (core subset) | 3-4 weeks | Memory, sync, PCI, workqueue headers | -| 4 | `linux-kpi` DRM headers | 2-3 weeks | DRM/KMS/GEM C API headers | -| 5 | i915 C driver port | 3-4 weeks | Proves LinuxKPI approach works | -| 6 | `linux-kpi` extended (TTM, firmware) | 4-6 weeks | Enables AMD driver | -| 7 | amdgpu C driver port | 6-8 weeks | AMD GPU support | - -**Phase 1-2 is critical path** — a native Rust Intel driver proves architecture and provides immediate value. Phases 3-7 can happen in parallel or later. - -**With 2 developers**: -- **Month 1-2**: redox-driver-sys + Intel native driver → first display output -- **Month 3-4**: linux-kpi core + DRM headers → i915 C port proof of concept -- **Month 5-8**: linux-kpi TTM + amdgpu port → AMD support -- **Total: 6-8 months** to support both Intel and AMD GPUs - -**With 1 developer**: -- **Month 1-3**: redox-driver-sys + Intel native driver -- **Month 4-6**: linux-kpi core + i915 port -- **Month 7-10**: amdgpu port -- **Total: 8-10 months** - ---- - -## 5. Critical Paths & Dependencies - -### Dependency Chain: Hardware → KDE Desktop - -``` -┌─────────────────────────────────────────────────────────┐ -│ KDE Plasma Desktop │ -│ (KWin compositor, Plasma Shell, Qt, KDE Frameworks) │ -├─────────────────────────────────────────────────────────┤ -│ Wayland Protocol │ -│ (libwayland, wayland-protocols, compositor) │ -├─────────────────────────────────────────────────────────┤ -│ Graphics Stack │ -│ (Mesa3D OpenGL/Vulkan, GBM, libdrm, GPU driver) │ -├─────────────────────────────────────────────────────────┤ -│ Kernel Interfaces │ -│ (DRM/KMS, GEM/TTM, DMA-BUF, evdev, udev) │ -├─────────────────────────────────────────────────────────┤ -│ Hardware │ -│ (GPU: AMD/Intel/NVIDIA, Input: keyboard/mouse/touch) │ -└─────────────────────────────────────────────────────────┘ -``` - -### Critical Path to KDE Plasma - -``` -M1 (POSIX) ───────────────────────────────────────────┐ - │ -M3 (DRM/KMS) ───────────── M4 (Compositor) ── M5 (Qt) ── M6 (KDE) ── M7 (Plasma) - │ ↑ │ -M2 (Input) ──────────────┘ M8 (Linux drivers, parallel) -``` - -**Shortest path to a desktop**: M1 → M2 → M3 (parallel) → M4 → M5 → M6 → M7 -**Shortest path to GPU drivers**: M3 → M8 (can start as soon as `redox-driver-sys` exists) - -### Parallel Execution Opportunities - -``` -Week 1-4: M1 (relibc POSIX gaps) -Week 3-12: M2 (evdev input) ──── parallel ──── M3 (DRM/KMS) -Week 13-16: M4 (Wayland compositor = M2 + M3 + M1) -Week 13-24: M8 (Linux driver compat, parallel with M4-M6) -Week 17-24: M5 (Qt Foundation) -Week 25-32: M6 (KDE Frameworks) -Week 33-38: M7 (Plasma Desktop) -``` - -**Total to KDE Plasma**: ~38 weeks (~9 months) with 2 developers -**Total to Linux driver compat**: ~24 weeks (~6 months) in parallel - ---- - -## 6. Recommendations & Next Steps - -### Immediate Actions (Week 1-4) - -1. **Fix relibc POSIX gaps** (1-2 weeks) - - Implement `signalfd`, `timerfd`, `eventfd` in relibc - - Add `F_DUPFD_CLOEXEC`, `MSG_CMSG_CLOEXEC`, `MSG_NOSIGNAL` - - Implement `open_memstream` - - **Result**: libwayland builds natively (no patches) - -2. **Start evdev daemon** (2-4 weeks, parallel with POSIX) - - Create `recipes/core/evdevd/` - - Implement scheme protocol and ioctl handlers - - **Result**: Input stack foundation - -3. **Start redox-driver-sys crate** (2-3 weeks, parallel with POSIX) - - Implement memory, IRQ, PCI, I/O primitives - - **Result**: Hardware access foundation for LinuxKPI - -### Medium-Term Actions (Week 5-16) - -4. **Complete input stack** (2-3 weeks after evdevd) - - Build udev shim - - Port libinput - - **Result**: Full input stack for Wayland - -5. **Build DRM daemon with Intel driver** (8-12 weeks) - - Implement KMS core, GEM, DMA-BUF - - Implement Intel native modesetting - - **Result**: Hardware display control - -6. **Build linux-kpi headers** (3-4 weeks, parallel with DRM) - - Implement C headers for Linux kernel APIs - - Implement Rust backing implementations - - **Result**: Compatibility layer for C drivers - -### Long-Term Actions (Week 17-38+) - -7. **Port Wayland compositor** (4-6 weeks after M2+M3+M1) - - Add Redox backends to Smithay - - Build smallvil with Redox backends - - **Result**: First functional Wayland compositor - -8. **Port Qt Foundation** (8-12 weeks, parallel with compositor) - - Port qtbase, qtwayland, qtdeclarative - - Fix platform detection and shared memory - - **Result**: Qt applications can run - -9. **Port KDE Frameworks** (8-12 weeks) - - Port 25+ frameworks (Tier 1, 2, 3) - - **Result**: KDE applications can be built - -10. **Port KDE Plasma** (6-8 weeks) - - Port KWin, plasma-workspace, plasma-desktop - - Create config/kde.toml - - **Result**: Full KDE Plasma desktop - -11. **Port Linux GPU drivers** (3-4 weeks after linux-kpi, parallel) - - Port i915 as proof of concept - - Port amdgpu for AMD support - - **Result**: Broad GPU hardware support - -### Build System Improvements - -**Issue Found**: FUSE mount error (ioctl 25) during build -**Recommendation**: Add build environment cleanup script: -```bash -# scripts/clean-build-env.sh -#!/bin/bash -fusermount3 -u build/x86_64/desktop/filesystem 2>/dev/null || true -fusermount3 -u /tmp/redox_installer 2>/dev/null || true -rm -rf build/x86_64/desktop/filesystem 2>/dev/null || true -``` - -**Integration**: Add to Makefile: -```makefile -clean: FORCE - @./scripts/clean-build-env.sh - # ... rest of clean target -``` - -### Resource Requirements - -**Storage**: 20GB+ free space (full build with all recipes) -**RAM**: 4GB minimum, 8GB+ recommended -**Network**: Required for downloading sources and toolchain -**OS**: Linux (Arch/Manjaro, Debian/Ubuntu, Fedora, Gentoo) - ---- - -## 7. Risk Assessment & Mitigation - -### High-Risk Areas - -1. **Qt Foundation** (HIGH RISK) - - **Risk**: Unexpected relibc gaps blocking Qt compilation - - **Impact**: Entire KDE timeline shifts by months - - **Mitigation**: Start Qt porting early, test with software rendering - -2. **Linux Driver Porting** (MEDIUM RISK) - - **Risk**: Linux driver code complexity exceeds LinuxKPI capabilities - - **Impact**: AMD/NVIDIA drivers may not work - - **Mitigation**: Start with Intel (simplest), prove concept before AMD - -3. **Wayland Compositor** (LOW-MEDIUM RISK) - - **Risk**: Smithay Redox backends integration issues - - **Impact**: Wayland session delayed - - **Mitigation**: Use native Rust Intel driver first, no LinuxKPI dependency - -### Technical Risks - -1. **No GPU Acceleration** - - All rendering is software-only via LLVMpipe - - Performance will be poor for desktop workloads - - **Mitigation**: Prioritize hardware GPU driver work - -2. **Missing System Integration** - - No NetworkManager equivalent → no network UI - - No PipeWire → no audio in KDE - - **Mitigation**: Build minimal shims, skip features initially - -3. **Kernel ABI Unstable** - - Redox syscall ABI intentionally unstable - - Changes may break compatibility layers - - **Mitigation**: Work through libredox/relibc, not kernel syscalls directly - ---- - -## 8. Conclusion - -Red Bear OS has: -- ✅ Comprehensive documentation with concrete implementation paths -- ✅ Functional build system with Rust-based tools -- ✅ Active development with 60+ patches for Linux compatibility -- ✅ Clear roadmap to Wayland, KDE Plasma, and Linux drivers -- ⚠️ Identified blockers (7 POSIX gaps, no GPU acceleration, missing DRM/KMS) - -**Estimated Timelines**: -- **Wayland compositor**: 5-6 months (M1 + M2 + M3 + M4) -- **KDE Plasma desktop**: 9-10 months (M1 → M7) -- **Linux driver compatibility**: 6-8 months (M3 + M8) - -**Key Insights**: -1. POSIX gaps in relibc are the foundational blocker - 1-2 weeks to fix -2. Input stack and DRM/KMS can be built in parallel (4-12 weeks each) -3. Qt Foundation is the highest-risk phase - should start early -4. Native Rust Intel driver is a faster path than full LinuxKPI for initial GPU support -5. LinuxKPI approach is essential for AMD/NVIDIA long-term support - -**Recommendation**: Start with Milestone M1 (POSIX gaps) immediately, as it unblocks everything else. With 2 developers working in parallel on M2 (input) and M3 (DRM), a functional Wayland compositor is achievable in ~6 months, with KDE Plasma following in ~9 months. - ---- - -**Appendix A: Existing WIP Recipes Inventory** - -**Wayland Recipes** (21 packages): -- libwayland, wayland-protocols, wayland-utils -- libxkbcommon, xkeyboard-config -- mesa, libdrm -- cosmic-comp, cosmic-panel, libcosmic-wayland -- smallvil (Smithay) -- wlroots, sway, hyprland, niri, pinnacle, fht-compositor -- xwayland, anvil -- iced-wayland, winit-wayland, softbuffer-wayland, wayland-rs - -**KDE Recipes** (19 packages): -- ark, discover, gcompris, heaptrack, k3b, kamoso, kcachegrind -- kde-dolphin, kdenlive, kdevelop, kpatience, krita, ktorrent -- kwave, labplot, marble, massif-visualizer, okteta, skanpage - -**Patches Inventory**: 60+ `redox.patch` files across recipes - ---- - -**END OF REPORT** diff --git a/README.md b/README.md index 5e86a013..6205ee1c 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,7 @@ The current public roadmap and execution model live in the For readers landing on GitHub, the most useful entry points are: - [Documentation Index](./docs/README.md) — canonical map of current vs historical docs +- [Console to KDE Desktop Plan](./local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md) — canonical path from console boot to hardware-accelerated KDE Plasma on Wayland - [Desktop Stack Current Status](./local/docs/DESKTOP-STACK-CURRENT-STATUS.md) — current build/runtime truth for Qt, Wayland, and KDE surfaces - [WIP Migration Ledger](./local/docs/WIP-MIGRATION-LEDGER.md) — how Red Bear currently treats upstream WIP versus local overlays - [Script Behavior Matrix](./local/docs/SCRIPT-BEHAVIOR-MATRIX.md) — what the main sync/fetch/apply/build scripts do and do not guarantee diff --git a/docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md b/docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md index c42e1084..6d51c193 100644 --- a/docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md +++ b/docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md @@ -387,9 +387,11 @@ Current state: Canonical references: +- `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` — canonical desktop path from console to hardware-accelerated KDE Plasma on Wayland - `local/docs/QT6-PORT-STATUS.md` -- `docs/03-WAYLAND-ON-REDOX.md` -- `docs/05-KDE-PLASMA-ON-REDOX.md` +- `local/docs/DESKTOP-STACK-CURRENT-STATUS.md` +- `docs/03-WAYLAND-ON-REDOX.md` — historical Wayland implementation rationale +- `docs/05-KDE-PLASMA-ON-REDOX.md` — historical KDE implementation rationale Acceptance: @@ -418,6 +420,7 @@ Acceptance: The current subsystem plans to treat as first-class are: +- `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` — canonical desktop path plan - `local/docs/IRQ-AND-LOWLEVEL-CONTROLLERS-ENHANCEMENT-PLAN.md` - `local/docs/USB-IMPLEMENTATION-PLAN.md` - `local/docs/WIFI-IMPLEMENTATION-PLAN.md` diff --git a/docs/README.md b/docs/README.md index e65c9b63..f5751a22 100644 --- a/docs/README.md +++ b/docs/README.md @@ -5,6 +5,12 @@ Technical documentation for Red Bear OS as an overlay distribution on top of Red This index is the entry point for the documentation set. Its main job is to make the current/canonical versus historical/reference split obvious. +> **Status note (2026-04-16):** The canonical desktop path document is now +> `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md`. It consolidates the Wayland, KDE, and GPU roadmap +> into one honest implementation plan. The historical docs below (01–05) remain useful for +> architecture reference and implementation rationale, but they should be read together with the +> new plan and the current local subsystem docs. + > **Status note (2026-04-14):** several documents below are historical implementation plans whose > original "missing / not started" language is now stale. The repo already contains substantial > Red Bear OS work under `local/`; use each document's top-level status notes together with @@ -31,7 +37,7 @@ current/canonical versus historical/reference split obvious. | Document set | Role | |---|---| | `README.md`, `AGENTS.md`, `docs/README.md`, `docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md` | canonical repository-level policy and current execution model | -| `local/docs/*IMPLEMENTATION-PLAN*.md`, `local/docs/*STATUS*.md` | canonical current Red Bear subsystem plans and status | +| `local/docs/*IMPLEMENTATION-PLAN*.md`, `local/docs/*STATUS*.md`, `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` | canonical current Red Bear subsystem plans and status | | `docs/01-REDOX-ARCHITECTURE.md` | architecture reference | | `docs/02-GAP-ANALYSIS.md`, `docs/03-WAYLAND-ON-REDOX.md`, `docs/04-LINUX-DRIVER-COMPAT.md`, `docs/05-KDE-PLASMA-ON-REDOX.md` | valuable but partly historical roadmap/design material | @@ -81,6 +87,7 @@ at a higher level. This summary is only a quick orientation layer. For canonical current-state detail, prefer: - `docs/07-RED-BEAR-OS-IMPLEMENTATION-PLAN.md` for repository-wide execution order, +- `local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md` for the canonical desktop path from console to KDE Plasma on Wayland, - `local/docs/DESKTOP-STACK-CURRENT-STATUS.md` for desktop build/runtime truth, - `local/docs/PROFILE-MATRIX.md` for support-language by tracked profile, - and the active subsystem plans under `local/docs/` for detailed current workstreams. diff --git a/local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md b/local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md new file mode 100644 index 00000000..6bbe7e3a --- /dev/null +++ b/local/docs/CONSOLE-TO-KDE-DESKTOP-PLAN.md @@ -0,0 +1,895 @@ +# Red Bear OS: Console to Hardware-Accelerated KDE Desktop on Wayland + +## Purpose + +This document is the single authoritative implementation plan for the Red Bear OS path from +console boot to a hardware-accelerated KDE Plasma desktop on Wayland. + +It consolidates and replaces the roadmap role previously spread across: + +- `docs/03-WAYLAND-ON-REDOX.md` +- `docs/05-KDE-PLASMA-ON-REDOX.md` +- `docs/02-GAP-ANALYSIS.md` +- `local/docs/AMD-FIRST-INTEGRATION.md` +- `local/docs/DESKTOP-STACK-CURRENT-STATUS.md` +- `local/docs/QT6-PORT-STATUS.md` + +Those documents still matter for subsystem detail, package status, and implementation history. +This document is the place to answer the higher-level question: what still has to happen, in the +right order, before Red Bear OS can honestly claim a usable KDE Plasma desktop on Wayland, first +on the software path and then on real hardware acceleration. + +This plan is grounded in the current repo state, not in older greenfield assumptions. The project +already has substantial build-side progress across relibc, driver infrastructure, Wayland, Mesa, +Qt6, KF6, D-Bus, and desktop-facing profiles. The remaining problem is mostly not package absence. +The remaining problem is the gap between what builds and what is runtime-trusted. + +Scope here covers console boot to first working Wayland compositor proof, software-rendered Qt6 on +Wayland, hardware GPU validation for AMD and Intel, KWin session bring-up, and KDE Plasma session +bring-up. It does not cover USB, Wi-Fi, Bluetooth, tutorial-style examples, or repo structure and +build-command reference material already documented elsewhere. + +This document uses the current Red Bear hardware policy. AMD and Intel GPUs are equal-priority +desktop targets. + +## Current State Baseline + +### Evidence model + +This plan uses six evidence classes. They are intentionally strict and are not treated as equal. + +| Evidence class | Meaning | Safe wording | Not safe wording | +|---|---|---|---| +| **builds** | package compiles and stages | builds | works | +| **boots** | image reaches prompt or known runtime surface | boots | desktop works | +| **enumerates** | scheme, device node, or service surface appears and answers basic queries | enumerates | usable end to end | +| **usable** | a bounded runtime path performs its intended task | usable for this path | broadly stable | +| **validated** | repeated proof on the intended target class with explicit checks | validated | complete everywhere | +| **experimental** | partial, scaffolded, or unproven despite visible progress | experimental | done | + +Interpretation rules used throughout this document: + +- if something only compiles, it is called **builds** +- if something boots but does not complete a session, it is called **boots** +- if a daemon registers a scheme or device node, it is called **enumerates** +- if the only proof is a bounded QEMU path, the claim stays bounded to that path +- if dependencies are still shimmed or stubbed, the layer remains **experimental** +- nothing is called **validated** without repeated runtime proof on the intended target class + +### Honest capability matrix + +| Area | Current state | Evidence | Notes | +|---|---|---|---| +| AMD bare-metal boot | present | validated for bounded current claim | ACPI, SMP, x2APIC all work | +| relibc Wayland and Qt unblockers | present | builds | signalfd, timerfd, eventfd, open_memstream, F_DUPFD_CLOEXEC, MSG_NOSIGNAL, bounded waitid, bounded RLIMIT, bounded eth0 networking, shm_open, bounded sem_open, bounded sys/ipc.h, bounded sys/shm.h | +| redox-driver-sys | present | builds | driver substrate | +| linux-kpi | present | builds | compatibility layer for Linux-style drivers | +| firmware-loader | present | builds, boots | scheme registers at boot | +| redox-drm with AMD and Intel | present | builds | runtime hardware validation still open | +| amdgpu C port | present | builds | AMD DC + TTM + linux-kpi compat compiles | +| evdevd | present | builds, boots | scheme registers at boot | +| udev-shim | present | builds, boots | scheme registers at boot | +| libwayland 1.24.0 | present | builds | no full compositor proof yet | +| wayland-protocols | present | builds | build-side blocker removed | +| Mesa EGL + GBM + GLES2 | present | builds | proven runtime path is still software via LLVMpipe | +| libdrm + libdrm_amdgpu | present | builds | package-level success only | +| Qt6 qtbase 6.11.0 | present | builds | Core, Gui, Widgets, DBus, Wayland, OpenGL, EGL | +| qtdeclarative | present | builds | QML JIT disabled | +| qtsvg | present | builds | build-visible | +| qtwayland | present | builds | build-visible | +| D-Bus 1.16.2 | present | builds, bounded runtime wiring | system bus wired in `redbear-full` | +| libinput 1.30.2 | present | builds | runtime integration still open | +| libevdev 1.13.2 | present | builds | runtime integration still open | +| linux-input-headers | present | builds | support package | +| seatd | present | builds | session-management runtime proof still open | +| All 32 KF6 frameworks | present | builds | major build milestone complete | +| kdecoration | present | builds | build-visible | +| plasma-wayland-protocols | present | builds | build-visible | +| kf6-kwayland | present | builds | build-visible | +| kf6-kcmutils | present | builds, reduced | widget-only build | +| `redbear-wayland` | present | builds, boots | bounded Wayland runtime profile | +| `redbear-full` | present | builds, boots | broader desktop plumbing profile | +| `redbear-kde` | present | builds | KDE session-surface profile | +| smallvil path | partial | boots, experimental | reaches xkbcommon init and EGL platform selection in QEMU | +| QEMU graphics truth | present | usable for bounded path | current renderer is llvmpipe, not hardware acceleration | +| D-Bus system bus in `redbear-full` | present | usable for bounded path | not equal to full session integration completeness | +| VirtIO networking in QEMU | present | usable | useful for bounded test environment | +| firmware-loader, evdevd, udev-shim scheme registration | present | enumerates | register during boot | +| KWin | blocked | experimental | recipe exists, blocked by remaining shimmed and stubbed dependencies | +| plasma-workspace | partial | experimental | recipe exists, still experimental | +| plasma-desktop | partial | experimental | recipe exists, still experimental | +| QtNetwork | blocked | intentionally disabled | relibc networking completeness still too narrow | +| hardware GPU acceleration | blocked | not runtime-proven | kernel DMA-BUF fd passing required | +| working Wayland compositor session | blocked | runtime not proven | smallvil does not complete a usable session | +| KWin compositor runtime | blocked | runtime not proven | no working KWin session | +| KDE Plasma session | blocked | runtime not proven | no full Plasma session | + +### What is DONE, build-side + +The repo has already crossed several major build-side gates. + +#### relibc surface that now builds downstream consumers + +The current build-visible relibc surface includes signalfd, timerfd, eventfd, open_memstream, +F_DUPFD_CLOEXEC, MSG_NOSIGNAL, bounded waitid, bounded RLIMIT behavior, bounded eth0 networking, +shm_open, bounded sem_open, bounded `sys/ipc.h`, and bounded `sys/shm.h`. + +#### driver and runtime-service substrate + +redox-driver-sys, linux-kpi, firmware-loader, redox-drm with AMD and Intel paths, the amdgpu C +port, evdevd, and udev-shim all build successfully. + +#### Wayland and graphics packages + +libwayland 1.24.0, wayland-protocols, Mesa EGL + GBM + GLES2 with `libEGL.so`, `libgbm.so`, +`libGLESv2.so`, `swrast_dri.so`, plus libdrm and libdrm_amdgpu all build. + +#### Qt6 and D-Bus + +D-Bus 1.16.2 builds. qtbase 6.11.0 builds with Core, Gui, Widgets, DBus, Wayland, OpenGL, and +EGL. qtdeclarative, qtsvg, and qtwayland also build. + +#### KF6 and KDE-facing build surfaces + +All 32 KF6 frameworks build. The completed set spans ecm, core and widget foundations, config, +internationalization, codecs, GUI add-ons, color and notification layers, job and archive support, +item models and views, Solid, D-Bus and service layers, package and crash handling, text and icon +layers, global shortcuts, KDE declarative support, XML GUI, bookmarks, idle time, KIO, and +KCMUtils. Additional KDE-facing packages that already build include kdecoration, +plasma-wayland-protocols, kf6-kwayland, and kf6-kcmutils. + +#### tracked desktop profiles + +The tracked desktop-facing profiles are `redbear-wayland`, `redbear-full`, and `redbear-kde`. + +These are real achievements and should be presented as such. They are not yet desktop-runtime proof. + +### What is runtime-proven, limited scope + +The current desktop-related runtime proof is bounded, but real. + +#### Boot and machine substrate + +Red Bear boots on AMD bare metal, and ACPI, SMP, and x2APIC work for the current bounded claim. + +#### bounded Wayland bring-up path + +`redbear-wayland` boots in QEMU, and smallvil reaches xkbcommon initialization plus EGL platform +selection on Redox. + +#### bounded graphics truth + +Current QEMU graphics are software-rendered, the renderer evidence is llvmpipe, QEMU is useful for +compositor and Qt bring-up, and QEMU is not proof of the final hardware-accelerated desktop path. + +#### bounded runtime services + +D-Bus system bus is wired in `redbear-full`, VirtIO networking works in QEMU, and firmware-loader, +evdevd, and udev-shim register schemes at boot. + +### What is NOT DONE + +This list must stay explicit. + +#### runtime not proven + +No GPU hardware-accelerated rendering is proven, no kernel DMA-BUF support exists for the required +desktop path, no working Wayland compositor session is proven, no KWin compositor runtime is +proven, no KDE Plasma session is proven, and Qt6 OpenGL and EGL still have only software-path +runtime proof. + +#### builds still blocked or scaffolded + +KWin does not build end to end with fully real dependencies. Kirigami is still stub-only, KIO is +still a heavy shim build, libepoxy, libudev, lcms2, and libdisplay-info remain real blockers, +plasma-workspace and plasma-desktop remain experimental, and QtNetwork remains disabled due to +incomplete relibc networking semantics. + +### Baseline conclusion + +The repo is no longer stuck at package availability. It is now limited by runtime trust, hardware +validation, and KWin or Plasma session assembly. That is the real starting point for the plan below. + +## Dependency Stack + +### ASCII layer diagram + +```text ++--------------------------------------------------------------------------------+ +| KDE Plasma Session | +| plasma-workspace, plasma-desktop, shell, panels, launcher, apps | ++---------------------------------------^----------------------------------------+ + | ++---------------------------------------|----------------------------------------+ +| KWin desktop-session layer | +| KWin, kdecoration, seat and session wiring | ++---------------------------------------^----------------------------------------+ + | ++---------------------------------------|----------------------------------------+ +| Qt6 and KDE frameworks | +| Qt6 Widgets, QtWayland, QtDBus, QML, KF6, KDE support libs | ++---------------------------------------^----------------------------------------+ + | ++---------------------------------------|----------------------------------------+ +| Wayland compositor and protocols | +| smallvil first, then KWin, plus libwayland | ++---------------------------------------^----------------------------------------+ + | ++---------------------------------------|----------------------------------------+ +| Mesa, GBM, EGL, GLES2, libdrm | +| software path first, hardware path after DMA-BUF | ++---------------------------------------^----------------------------------------+ + | ++---------------------------------------|----------------------------------------+ +| DRM, KMS, firmware, input, device enumeration | +| redox-drm, amdgpu, Intel path, evdevd, udev-shim | ++---------------------------------------^----------------------------------------+ + | ++---------------------------------------|----------------------------------------+ +| Kernel and libc substrate for desktop bring-up | +| relibc, fd passing, DMA-BUF, IRQ, PCI, schemes | ++---------------------------------------^----------------------------------------+ + | ++---------------------------------------|----------------------------------------+ +| Hardware and boot substrate | +| AMD64 boot, ACPI, SMP, x2APIC, AMD and Intel GPUs | ++--------------------------------------------------------------------------------+ +``` + +### Reading the dependency stack correctly + +This stack has two kinds of blockers. + +#### runtime substrate blockers + +These sit low in the stack and poison all higher work if they are not validated: + +- relibc runtime correctness +- input event correctness +- udev-like device enumeration correctness +- firmware loading correctness +- basic DRM and KMS correctness +- kernel DMA-BUF support for the accelerated path + +#### session-assembly blockers + +These sit higher and matter after the lower layers are trusted: + +- smallvil completion +- Qt6 client display on Wayland +- KWin dependency cleanup +- KWin runtime session wiring +- Plasma shell and workspace integration + +The plan must handle these in order. Otherwise failures in KWin or Plasma will really be lower-layer +failures in disguise. + +### Layer-by-layer status + +#### Layer 0, hardware and boot + +Status: **partly runtime-proven** + +What is true now: + +- AMD bare-metal boot works for the current bounded claim +- ACPI, SMP, and x2APIC work +- AMD and Intel are equal-priority GPU targets + +What still needs proof: + +- real desktop-path validation on AMD GPUs +- real desktop-path validation on Intel GPUs + +#### Layer 1, kernel and libc substrate + +Status: **strong build-side, runtime incomplete** + +What is true now: + +- relibc exposes the build-visible Wayland and Qt unblockers +- redox-driver-sys and linux-kpi exist as the current driver substrate + +What still needs proof: + +- relibc behavior under real Wayland and Qt event-loop pressure +- kernel DMA-BUF fd passing for the hardware path + +#### Layer 2, DRM, firmware, input, enumeration + +Status: **build-visible and boot-visible, not runtime-trusted** + +What is true now: + +- redox-drm builds with AMD and Intel drivers +- amdgpu builds +- firmware-loader, evdevd, and udev-shim register at boot + +What still needs proof: + +- actual firmware loading by a real consumer +- actual input flow from Redox input sources into compositor-visible event devices +- actual scheme:drm registration and basic KMS query behavior in runtime +- actual AMD and Intel hardware-driver behavior on target machines + +#### Layer 3, graphics userland interface + +Status: **software path builds, hardware path blocked** + +What is true now: + +- Mesa EGL, GBM, and GLES2 build +- libdrm and libdrm_amdgpu build +- Qt6 OpenGL and EGL build +- QEMU proof still uses llvmpipe + +What still needs proof: + +- hardware renderer path through real DRM and real GPU drivers +- GBM allocation on the hardware path +- EGL and GLES stability on the hardware path + +#### Layer 4, Wayland protocol and compositor + +Status: **partial runtime proof, not complete** + +What is true now: + +- libwayland and wayland-protocols build +- smallvil is the bounded first runtime target +- smallvil reaches early initialization in QEMU + +What still needs proof: + +- a complete compositor session +- input routed into the compositor +- Qt6 client display in that compositor + +#### Layer 5, Qt6 and KF6 + +Status: **major build milestone complete, runtime still thin** + +What is true now: + +- Qt6 builds across core, widgets, DBus, Wayland, OpenGL, and EGL +- qtdeclarative and qtwayland build +- all 32 KF6 frameworks build + +What still needs proof: + +- real Qt6 Wayland client behavior on Redox +- behavior of QML-heavy pieces under the no-JIT path +- broader networking semantics needed before QtNetwork can be enabled + +#### Layer 6, KWin session shell + +Status: **experimental and blocked** + +What is true now: + +- recipes exist +- `redbear-kde` exists +- several KWin-adjacent packages already build + +What still needs proof: + +- replacement of shimmed and stubbed blockers with real enough dependencies +- KWin compile success against honest dependencies +- KWin runtime as the compositor + +#### Layer 7, KDE Plasma session + +Status: **not yet proven** + +What is true now: + +- Plasma recipe surfaces exist +- the stack is far enough along that this is now a session-assembly problem, not a package-startup problem + +What still needs proof: + +- plasma-workspace integration +- plasma-desktop integration +- panel, launcher, file manager, settings, and session service behavior + +### Dependency-stack conclusion + +The shortest honest path is not "port more packages". The shortest honest path is "validate the +substrate, finish one software compositor path, finish one KWin session path, finish one Plasma +session path, then land the real hardware renderer path in parallel". + +## Phased Work Plan + +This plan uses fresh Phase 1 through Phase 5 numbering and does not reuse the old P0 through P6 +scheme. + +### Phase 1: Runtime Substrate Validation + +**Duration:** 4 to 6 weeks + +**Goal:** turn the lowest desktop-facing layers from build-visible into runtime-trusted. + +This phase matters more than any other because it removes ambiguity from the entire stack. Without +it, later compositor or KDE failures will be impossible to classify correctly. + +#### Core work + +1. Validate relibc POSIX APIs against real consumers, especially libwayland and Qt6 runtime paths. +2. Validate the evdevd path from Redox input schemes through to `/dev/input/eventX` behavior. +3. Validate udev-shim device enumeration semantics for the current compositor and input stack. +4. Validate firmware-loader and `scheme:firmware` with real firmware blobs and a real consumer path. +5. Validate `scheme:drm/card0` registration and bounded KMS queries in QEMU. +6. Produce a repeatable runtime-service health check for the `redbear-wayland` slice. + +#### Why this phase exists + +The repo already compiles the lower desktop stack. What it lacks is evidence that the lower stack +behaves correctly under real use. Phase 1 is where builds become runtime-trusted enough to support +the first serious compositor pass. + +#### Deliverables + +##### relibc runtime validation set + +Validate the relibc surfaces already present in-tree: + +- signalfd +- timerfd +- eventfd +- open_memstream +- F_DUPFD_CLOEXEC +- MSG_NOSIGNAL +- bounded waitid +- bounded shared-memory and semaphore paths used by Qt6 + +The standard here is not just "the symbol exists". The standard is that real consumers can use the +API without hidden workarounds, hangs, or broken semantics. + +##### evdev input validation set + +Validate the current input chain end to end: + +- input source emits events +- evdevd exposes expected event devices +- keyboard events arrive with correct semantics +- mouse events arrive with correct semantics + +##### udev-shim validation set + +Validate that current consumers can discover and classify the devices they need. Full Linux parity +is not required. Sufficient enumeration for the current desktop path is required. + +##### firmware-loader validation set + +Validate firmware loading with real blobs and a real consumer path. Scheme registration alone is not +enough. The blob must be requestable, discoverable, loadable, and consumable at runtime. + +##### redox-drm runtime-surface validation set + +Validate bounded runtime behavior first in QEMU: + +- scheme registration for `scheme:drm/card0` +- basic KMS queries +- no startup-class failures in the redox-drm path + +#### Acceptance criteria + +Phase 1 is complete when all of the following are true: + +- `redbear-wayland` boots in the bounded validation environment +- Phase 1 runtime services register without startup errors +- relibc runtime checks pass for the selected desktop-facing consumers +- the input path reaches evdevd and yields expected event nodes and bounded test events +- udev-shim exposes the expected bounded device view +- firmware-loader successfully serves at least one real consumer path with real blobs +- `scheme:drm/card0` registers and answers bounded basic queries + +#### Exit statement + +At the end of Phase 1, the repo should be able to say: the desktop substrate is no longer only a +build artifact. It is runtime-trusted enough to support a compositor completion pass. + +### Phase 2: Wayland Compositor Runtime Proof + +**Duration:** 4 to 6 weeks + +**Goal:** produce the first working Wayland compositor session using software rendering. + +This phase stays intentionally narrow. The first complete compositor proof should happen in the +smallest runtime target available, which is still smallvil. + +#### Core work + +1. Complete the current smallvil runtime path. +2. Wire evdevd input into the compositor. +3. Wire Mesa software rendering through GBM and EGL. +4. Get a Qt6 widget application to display through the compositor. + +#### Why smallvil remains the right target + +Jumping straight to KWin would combine too many unknowns: compositor runtime, input, QML, session +services, dependency scaffolding, and desktop-shell behavior. smallvil is smaller, easier to debug, +and already present. It is the right place to finish the first software compositor proof. + +#### Deliverables + +##### complete smallvil runtime path + +The current proof stops during early initialization. This phase completes the path into a usable +session. + +##### input wired into compositor + +Keyboard and mouse must work through the current Redox input stack, not through an artificial bypass. + +##### software rendering path confirmed + +The proven renderer for this phase is LLVMpipe through Mesa, GBM, and EGL. That is acceptable. The +goal is correctness of compositor and client behavior, not hardware acceleration yet. + +##### Qt6 smoke client on Wayland + +The first meaningful desktop-facing end-to-end proof is a real Qt6 Wayland client window appearing +inside the compositor. + +#### Acceptance criteria + +Phase 2 is complete when all of the following are true: + +- smallvil launches into a working session in QEMU +- keyboard and mouse work through the current input stack +- Mesa software rendering works through GBM and EGL +- `qt6-wayland-smoke` shows a window inside the compositor in QEMU + +#### Exit statement + +At the end of Phase 2, the repo should be able to say: Red Bear OS has a working software-rendered +Wayland compositor path with a visible Qt6 client. + +### Phase 3: Hardware GPU Enablement + +**Duration:** 12 to 20 weeks + +**Goal:** replace the software-only graphics proof with real hardware-accelerated display output and +rendering. + +This is the highest-uncertainty phase. It includes new kernel work, real hardware-driver proof, and +the first true Mesa hardware path on Redox. + +#### Core work + +1. Add kernel DMA-BUF fd passing. +2. Validate redox-drm AMD and Intel drivers on real hardware. +3. Validate Mesa hardware rendering path, including real renderer identity. +4. Validate GBM buffer allocation through the hardware path. + +#### Why this phase is separate + +The software compositor path is the fastest honest route to proving compositor and client behavior. +The hardware path is a different class of systems work. It should run in parallel with later KWin +and Plasma assembly instead of blocking everything else. + +#### Deliverables + +##### kernel DMA-BUF fd passing + +This is the gating feature for the accelerated desktop path. Without it, hardware-accelerated KDE +is not a credible target. + +##### real AMD hardware validation + +Validate on representative AMD hardware: + +- device detection +- MMIO mapping +- firmware loading +- connector detection +- mode enumeration +- bounded modeset proof + +##### real Intel hardware validation + +Validate on representative Intel hardware: + +- device detection +- MMIO mapping +- connector detection +- mode enumeration +- bounded modeset proof + +##### Mesa hardware rendering proof + +Validate the actual hardware renderer path rather than llvmpipe fallback. For AMD the target is the +radeonsi path. For Intel the target is the intended real Intel hardware path available in the stack. + +#### Acceptance criteria + +Phase 3 is complete when all of the following are true: + +- kernel DMA-BUF fd passing exists and has focused proof coverage +- `modetest -M amd` shows display modes on real AMD hardware +- the equivalent Intel DRM query path shows display modes on real Intel hardware +- the compositor runs through the hardware path rather than llvmpipe on at least one real AMD class + and one real Intel class +- runtime evidence shows a hardware-backed renderer rather than software fallback + +#### Exit statement + +At the end of Phase 3, the repo should be able to say: Red Bear OS can drive real display hardware +and run the compositor on a hardware-accelerated path. + +### Phase 4: Desktop Session Assembly + +**Duration:** 6 to 10 weeks + +**Goal:** turn the compositor proof into a real desktop-session substrate centered on KWin. + +This phase starts after Phase 2. It does not need to wait for the full hardware path. KWin can come +up first on the software renderer and later inherit the accelerated renderer once Phase 3 lands. + +#### Core work + +1. Resolve KWin shimmed and stubbed blockers. +2. Get KWin to compile with real enough dependencies. +3. Launch KWin as the Wayland compositor. +4. Validate libinput backend behavior. +5. Validate D-Bus session behavior. +6. Validate seatd for the bounded KWin session model. + +#### blocked dependency set that must be closed + +- kirigami stub-only state +- heavy kio shim state where it blocks honest session claims +- libepoxy +- libudev +- lcms2 +- libdisplay-info + +#### Deliverables + +##### honest KWin build + +The milestone is not just that a recipe exists. The milestone is that KWin builds without fake +dependency satisfaction for core runtime behavior. + +##### KWin runtime as compositor + +KWin must launch as the compositor and own the display path. + +##### session services for bounded desktop use + +D-Bus session behavior and seatd behavior must be good enough for the bounded KWin target this plan +claims. Linux parity is not required. Correct bounded behavior is required. + +#### Acceptance criteria + +Phase 4 is complete when all of the following are true: + +- KWin builds against real enough dependencies to support honest runtime claims +- KWin launches as the compositor +- KWin takes over display output in the bounded session path +- keyboard and mouse work through the KWin session path +- required D-Bus session behavior for the bounded KWin path works +- seatd behavior is validated for the bounded KWin session model + +#### Exit statement + +At the end of Phase 4, the repo should be able to say: Red Bear OS has a working Wayland desktop +session substrate centered on KWin. + +### Phase 5: KDE Plasma Session + +**Duration:** 8 to 12 weeks + +**Goal:** boot into a KDE Plasma session with the essential desktop shell and session services +working. + +This is the final desktop product phase. By this point the remaining work should mostly be session +assembly, application integration, and shell behavior. + +#### Core work + +1. Complete plasma-workspace compilation and integration. +2. Complete plasma-desktop compilation and integration. +3. Get the shell, panel, and launcher visible and usable. +4. Get settings and file-manager paths working. +5. Provide bounded network and audio integration suitable for the session claim. + +#### Deliverables + +##### Plasma shell + +The minimum target is not a screenshot. The session must show the shell, panel, and launcher and be +stable through basic interaction. + +##### application and settings path + +At least one real file-manager path and one settings path must work. Otherwise the session is still +too incomplete to count as a desktop. + +##### bounded desktop-service integration + +For this phase the question is narrow: can the Plasma session boot into a usable desktop with bounded +network and audio integration. The long-term subsystem plans remain separate. + +#### Acceptance criteria + +Phase 5 is complete when all of the following are true: + +- `redbear-kde` boots into a KDE Plasma session +- KWin is the active compositor +- the Plasma shell, panel, and launcher appear +- an application can be launched from the session +- a file-manager path works through the current kio integration +- a settings path works +- bounded network and audio integration exist for the claimed session profile + +#### Exit statement + +At the end of Phase 5, the repo should be able to say one of two things: + +- if Phase 3 is still incomplete: Red Bear OS has a software-rendered KDE Plasma session on Wayland +- if Phase 3 is complete: Red Bear OS has a hardware-accelerated KDE Plasma session on Wayland + +## Critical Path + +### primary path to a software-rendered KDE session + +```text +Phase 1, runtime substrate validation + -> Phase 2, software Wayland compositor proof + -> Phase 4, KWin desktop-session assembly + -> Phase 5, KDE Plasma session +``` + +This is the shortest honest path to a KDE desktop claim. + +### parallel hardware path + +```text +Phase 1, runtime substrate validation + -> Phase 3, hardware GPU enablement + +Phase 3 proceeds in parallel with Phase 4 where possible + +Phase 3 + Phase 4 + Phase 5 + -> hardware-accelerated KDE Plasma desktop +``` + +### why Phase 1 is the real gate + +Phase 1 is the true gateway because it converts lower-layer package progress into runtime trust. +Without it, Phase 2, Phase 4, and Phase 5 failures will be misdiagnosed. + +### why Phase 2 comes before KWin + +The first complete compositor proof should happen in the smallest environment. smallvil is smaller, +already present, and easier to debug than KWin. It isolates compositor, input, and Qt client issues +before session-shell complexity is added. + +### why Phase 3 should not block Phase 4 + +Hardware acceleration is critical, but KWin and Plasma also have their own blockers: dependency +cleanup, session services, and compositor integration. Those can be solved on the software renderer +while the hardware path matures. + +### critical-path summary + +The execution order this repo should present is: + +1. validate the runtime substrate +2. prove one software compositor path +3. assemble one KWin session path +4. assemble one Plasma session path +5. land hardware acceleration in parallel + +## Risk Register + +| ID | Risk | Likelihood | Impact | Why it matters | Mitigation | +|---|---|---|---|---|---| +| R1 | relibc runtime gaps are worse than build evidence suggests | Medium | High | Qt6 and Wayland may still fail at runtime even though they build | validate with real consumers in Phase 1 | +| R2 | kernel DMA-BUF fd passing is a new feature with uncertain scope | High | High | hardware acceleration depends on it | isolate design and proof early in Phase 3 | +| R3 | AMD or Intel real-hardware validation reveals fundamental driver issues | High | High | compile success may not survive real modesetting or rendering | validate AMD and Intel separately on representative hardware | +| R4 | KWin porting needs significantly more patches than estimated | Medium | High | KWin sits on the desktop critical path | finish smallvil proof first, then attack KWin with cleaner lower-layer evidence | +| R5 | Kirigami and other QML-heavy pieces do not behave acceptably with QML JIT disabled | Medium | Medium to High | Plasma shell may build but behave badly | keep QML-heavy runtime proof explicit in Phase 4 and Phase 5 | +| R6 | Mesa hardware rendering needs Redox-specific winsys work beyond current estimates | Medium | High | hardware acceleration may stall after modesetting starts working | separate display proof from renderer proof | +| R7 | linux-kpi compatibility gaps only appear during real-hardware execution | High | Medium to High | compile-only success can hide runtime failures | budget for hardware-driven compatibility fixes in Phase 3 | + +## Timeline + +### planning assumptions + +These estimates assume 2 developers, usable access to representative AMD and Intel hardware, and no +major regression from unrelated upstream refresh during the desktop push. They do not assume perfect +first-pass success on real hardware. + +### phase estimates with 2 developers + +| Phase | Estimate | Notes | +|---|---|---| +| Phase 1, Runtime Substrate Validation | 4 to 6 weeks | must finish honestly before claiming runtime trust | +| Phase 2, Wayland Compositor Runtime Proof | 4 to 6 weeks | can overlap with late Phase 1 cleanup | +| Phase 3, Hardware GPU Enablement | 12 to 20 weeks | parallel track after Phase 1 | +| Phase 4, Desktop Session Assembly | 6 to 10 weeks | starts after Phase 2 | +| Phase 5, KDE Plasma Session | 8 to 12 weeks | starts after Phase 4 | + +### total duration with 2 developers + +#### to software-rendered KDE Plasma on Wayland + +- **22 to 34 weeks** +- roughly **6 to 8 months** + +This path is Phase 1 + Phase 2 + Phase 4 + Phase 5. + +#### to hardware-accelerated KDE Plasma on Wayland + +- **34 to 54 weeks** +- roughly **8 to 13 months** + +This path is Phase 1 + Phase 2 + Phase 3 in parallel + Phase 4 + Phase 5. + +### rough overlap model + +```text +Weeks 1 to 6 + Phase 1, runtime substrate validation + +Weeks 4 to 12 + Phase 2, software compositor proof + +Weeks 7 to 26 + Phase 3, hardware GPU enablement + +Weeks 13 to 22 + Phase 4, KWin session assembly + +Weeks 23 to 34 + Phase 5, KDE Plasma session +``` + +This is an intended overlap shape, not a guaranteed calendar. + +### one-developer estimate + +With 1 developer, the overall timeline is roughly 1.5x to 2x the two-developer estimates. + +Practical meaning: + +- software-rendered KDE path: about 9 to 16 months +- hardware-accelerated KDE path: about 12 to 27 months + +The wider range reflects the loss of useful parallelism between hardware work and session work. + +### timeline conclusion + +The software-rendered KDE target is no longer a greenfield multi-year fantasy. The hardware- +accelerated KDE target is still a serious systems milestone because Phase 3 carries the widest +uncertainty band. + +## Relationship to Other Plans + +This is the canonical document for the desktop path from console boot to KDE Plasma on Wayland. It +does not replace every subsystem-specific plan. It sets the ordering, scope, and acceptance language +for the desktop path while deeper subsystem documents retain their detailed ownership. + +### primary supporting plans + +- `local/docs/RELIBC-COMPLETENESS-AND-ENHANCEMENT-PLAN.md` for relibc completeness detail, + ownership of patch-carried behavior, and deeper evidence tracking +- `local/docs/AMD-FIRST-INTEGRATION.md` for deeper GPU-driver and firmware detail, with the caveat + that this desktop plan uses equal-priority AMD and Intel targeting +- `local/docs/QT6-PORT-STATUS.md` for Qt6, KF6, KWin blocker, shim, and stub status +- `local/docs/DESKTOP-STACK-CURRENT-STATUS.md` for the short current-state desktop truth summary +- `local/docs/IRQ-AND-LOWLEVEL-CONTROLLERS-ENHANCEMENT-PLAN.md` for controller, IRQ, MSI, MSI-X, + and IOMMU quality work that supports later hardware desktop validation +- `local/docs/INPUT-SCHEME-ENHANCEMENT.md` for deeper input-path design if the current chain needs + structural cleanup beyond Phase 1 validation +- `local/docs/P2-AMD-GPU-DISPLAY.md` for the code-complete AMD display status and concrete AMD + validation targets such as `modetest -M amd` + +### how to use this plan with the supporting plans + +Read this document first for execution order, current claim language, completion criteria, and the +critical path. Read the subsystem plans for the exact relibc, driver, package, or input details +behind those higher-level phases. diff --git a/local/docs/WIFI-IMPLEMENTATION-PLAN.md b/local/docs/WIFI-IMPLEMENTATION-PLAN.md index 539d00c4..826004d7 100644 --- a/local/docs/WIFI-IMPLEMENTATION-PLAN.md +++ b/local/docs/WIFI-IMPLEMENTATION-PLAN.md @@ -46,7 +46,7 @@ What the repo *does* have is a meaningful set of prerequisites: | Area | State | Notes | |---|---|---| | Wi-Fi controller support | **experimental bounded slice exists** | `redbear-iwlwifi` provides an Intel-only bounded driver-side package, not validated Wi-Fi connectivity | -| Linux wireless stack compatibility | **early compatibility scaffolding exists** | `linux-kpi` now carries initial `cfg80211` / `wiphy` / `mac80211` registration and station-mode compatibility scaffolding, but not a complete Linux wireless stack | +| Linux wireless stack compatibility | **early compatibility scaffolding exists** | `linux-kpi` now carries `cfg80211` / `wiphy` / `mac80211` registration, station-mode scaffolding, channel/band/rate/BSS definitions, and RX/TX data-path structures (24 tests pass), but not a complete Linux wireless stack | | Firmware loading | **partial prerequisite exists** | `firmware-loader` can serve firmware blobs generically | | Wireless control plane | **experimental bounded slice exists** | `redbear-wifictl` and `redbear-netctl` expose bounded prepare/init/activate/scan orchestration, not real association support | | Post-association IP path | **present** | Native `smolnetd` / `netcfg` / `dhcpd` / `redbear-netctl` path exists | @@ -338,7 +338,14 @@ The current tree now has the first explicit step in that direction as well: registration/setup, keeps carrier down until connect success, and routes `ieee80211_queue_work()` through the bounded LinuxKPI workqueue instead of silently dropping deferred work -- this new scaffolding is compile- and host-test-validated inside the `linux-kpi` crate +- the wireless scaffolding now also includes channel/band/rate definitions + (`Ieee80211Channel`, `Ieee80211Rate`, `Ieee80211SupportedBand` with NL80211 band constants + and IEEE80211 channel/rate flags), BSS information reporting (`Cfg80211Bss`, + `cfg80211_inform_bss`/`get_bss`/`put_bss`), RX/TX data-path structures (`Ieee80211RxStatus`, + `Ieee80211TxInfo` with RX/TX flag constants, `ieee80211_rx_irqsafe`/`tx_status`), + channel definition creation (`ieee80211_chandef_create`), and STA state-transition constants + (`IEEE80211_STA_NOTEXIST` through `IEEE80211_STA_AUTHORIZED`) +- all scaffolding is compile- and host-test-validated inside the `linux-kpi` crate (24 tests pass) - this is still **not** a claim that Red Bear now has a working Linux wireless stack ### Boundary where a full Linux port becomes too expensive @@ -391,11 +398,13 @@ mac80211/nl80211 remain out of scope for the current milestone. The current validation story for this slice is intentionally narrow and should be described that way: -- the `linux-kpi` host-side test suite now runs cleanly in this repo, including the Wi‑Fi-facing - helper changes in this slice: `request_firmware_direct`, `request_firmware_nowait`, - `mutex_trylock`, IRQ-depth tracking, variable private-allocation lifetime tracking, station-mode - scan/connect/disconnect lifecycle assertions, workqueue-backed `ieee80211_queue_work()`, the new - `sk_buff` headroom/tailroom helpers, and the existing memory tests +- the `linux-kpi` host-side test suite now runs cleanly in this repo (24 tests pass), including + the Wi‑Fi-facing helper changes in this slice: `request_firmware_direct`, + `request_firmware_nowait`, `mutex_trylock`, IRQ-depth tracking, variable private-allocation + lifetime tracking, station-mode scan/connect/disconnect lifecycle assertions, + workqueue-backed `ieee80211_queue_work()`, `sk_buff` headroom/tailroom helpers, channel/band + creation and flag tests, RX status default and flag combination tests, `ieee80211_get_tid` null + safety, and the existing memory tests - `redbear-iwlwifi` host-side tests now smoke-test the bounded firmware/transport/activation/scan/ retry actions used by the current Intel path - `redbear-iwlwifi` also now has a binary-level host-side CLI smoke test for the current bounded diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/atomic.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/atomic.h index 44f394a1..95950f0a 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/atomic.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/atomic.h @@ -42,6 +42,26 @@ static inline void atomic_sub(int i, atomic_t *v) __sync_fetch_and_sub(&v->counter, i); } +static inline int atomic_inc_and_test(atomic_t *v) +{ + return __sync_add_and_fetch(&v->counter, 1) == 0; +} + +static inline int atomic_dec_and_test(atomic_t *v) +{ + return __sync_sub_and_fetch(&v->counter, 1) == 0; +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + return __sync_add_and_fetch(&v->counter, i); +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + return __sync_sub_and_fetch(&v->counter, i); +} + static inline int atomic_inc_return(atomic_t *v) { return __sync_add_and_fetch(&v->counter, 1); @@ -72,11 +92,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -static inline int atomic_dec_and_test(atomic_t *v) -{ - return __sync_sub_and_fetch(&v->counter, 1) == 0; -} - #define smp_mb() __sync_synchronize() #define smp_rmb() __sync_synchronize() #define smp_wmb() __sync_synchronize() diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/dma-mapping.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/dma-mapping.h index 0c0b0348..9c09dff5 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/dma-mapping.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/dma-mapping.h @@ -1,7 +1,8 @@ #ifndef _LINUX_DMA_MAPPING_H #define _LINUX_DMA_MAPPING_H -#include +#include "types.h" +#include enum dma_data_direction { DMA_BIDIRECTIONAL = 0, @@ -10,6 +11,8 @@ enum dma_data_direction { DMA_NONE = 3, }; +struct dma_pool; + #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL << (n)) - 1)) extern void *dma_alloc_coherent(void *dev, size_t size, @@ -21,6 +24,14 @@ extern dma_addr_t dma_map_single(void *dev, void *ptr, size_t size, enum dma_data_direction dir); extern void dma_unmap_single(void *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); +extern struct dma_pool *dma_pool_create(const char *name, void *dev, size_t size, size_t align, size_t boundary); +extern void dma_pool_destroy(struct dma_pool *pool); +extern void *dma_pool_alloc(struct dma_pool *pool, gfp_t flags, dma_addr_t *handle); +extern void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); +extern void dma_sync_single_for_cpu(void *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); +extern void dma_sync_single_for_device(void *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); +extern dma_addr_t dma_map_page(void *dev, void *page, size_t offset, size_t size, enum dma_data_direction dir); +extern void dma_unmap_page(void *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); static inline int dma_mapping_error(void *dev, dma_addr_t addr) { diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/firmware.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/firmware.h index 7aba3945..0463e1ff 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/firmware.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/firmware.h @@ -2,7 +2,7 @@ #define _LINUX_FIRMWARE_H #include -#include +#include "types.h" struct firmware { size_t size; diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/interrupt.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/interrupt.h index 7016ae09..9cedba0e 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/interrupt.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/interrupt.h @@ -1,9 +1,9 @@ #ifndef _LINUX_INTERRUPT_H #define _LINUX_INTERRUPT_H -#include -#include -#include +#include "types.h" +#include "irq.h" +#include "spinlock.h" extern void local_irq_save(unsigned long *flags); extern void local_irq_restore(unsigned long flags); diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/io.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/io.h index cd17ba7b..4a922636 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/io.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/io.h @@ -1,7 +1,7 @@ #ifndef _LINUX_IO_H #define _LINUX_IO_H -#include +#include "types.h" #include extern void *ioremap(phys_addr_t phys_addr, size_t size); @@ -31,6 +31,21 @@ static inline void memset_io(void *dst, int c, size_t count) __builtin_memset(dst, c, count); } +static inline void mb(void) +{ + __sync_synchronize(); +} + +static inline void rmb(void) +{ + __sync_synchronize(); +} + +static inline void wmb(void) +{ + __sync_synchronize(); +} + #define ioread8(addr) readb(addr) #define ioread16(addr) readw(addr) #define ioread32(addr) readl(addr) diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/irq.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/irq.h index 3c5b0f62..7bc29d45 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/irq.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/irq.h @@ -1,7 +1,7 @@ #ifndef _LINUX_IRQ_H #define _LINUX_IRQ_H -#include +#include "types.h" typedef unsigned int irqreturn_t; diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/jiffies.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/jiffies.h index 4c5f8aaf..f1f16d9a 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/jiffies.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/jiffies.h @@ -1,7 +1,7 @@ #ifndef _LINUX_JIFFIES_H #define _LINUX_JIFFIES_H -#include +#include "types.h" #include static inline u64 redox_get_jiffies(void) diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/mutex.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/mutex.h index 3c1596bf..3e167909 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/mutex.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/mutex.h @@ -1,7 +1,7 @@ #ifndef _LINUX_MUTEX_H #define _LINUX_MUTEX_H -#include +#include "types.h" struct mutex { unsigned char __opaque[64]; diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/pci.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/pci.h index d348435e..24926510 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/pci.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/pci.h @@ -1,9 +1,9 @@ #ifndef _LINUX_PCI_H #define _LINUX_PCI_H -#include -#include -#include +#include "types.h" +#include "device.h" +#include "io.h" #include #define PCI_VENDOR_ID_AMD 0x1002U @@ -12,6 +12,12 @@ #define PCI_ANY_ID (~0U) +/* MSI/MSI-X support */ +#define PCI_IRQ_MSI 1U +#define PCI_IRQ_MSIX 2U +#define PCI_IRQ_LEGACY 4U +#define PCI_IRQ_NOLEGACY 8U + struct pci_device_id { u32 vendor; u32 device; @@ -49,6 +55,11 @@ struct pci_driver { extern int pci_enable_device(struct pci_dev *dev); extern void pci_disable_device(struct pci_dev *dev); extern void pci_set_master(struct pci_dev *dev); +extern int pci_alloc_irq_vectors(struct pci_dev *dev, int min_vecs, int max_vecs, unsigned int flags); +extern void pci_free_irq_vectors(struct pci_dev *dev); +extern int pci_irq_vector(struct pci_dev *dev, unsigned int nr); +extern int pci_enable_msi(struct pci_dev *dev); +extern void pci_disable_msi(struct pci_dev *dev); extern void *pci_iomap(struct pci_dev *dev, unsigned int bar, size_t max_len); extern void pci_iounmap(struct pci_dev *dev, void *addr, size_t size); diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/skb.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/skb.h new file mode 100644 index 00000000..bd6ccfd1 --- /dev/null +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/skb.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_SKB_H +#define _LINUX_SKB_H + +#include "skbuff.h" + +#endif diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/skbuff.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/skbuff.h index f0c906aa..5fbbf754 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/skbuff.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/skbuff.h @@ -3,6 +3,8 @@ #include "types.h" +struct net_device; + struct sk_buff { void *head; void *data; @@ -11,6 +13,12 @@ struct sk_buff { unsigned int end; }; +struct sk_buff_head { + struct sk_buff *next; + struct sk_buff *prev; + u32 qlen; +}; + extern struct sk_buff *alloc_skb(unsigned int size, gfp_t gfp_mask); extern void kfree_skb(struct sk_buff *skb); extern void skb_reserve(struct sk_buff *skb, unsigned int len); @@ -20,5 +28,14 @@ extern void *skb_pull(struct sk_buff *skb, unsigned int len); extern unsigned int skb_headroom(const struct sk_buff *skb); extern unsigned int skb_tailroom(const struct sk_buff *skb); extern void skb_trim(struct sk_buff *skb, unsigned int len); +extern void skb_queue_head_init(struct sk_buff_head *list); +extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); +extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); +extern void skb_queue_purge(struct sk_buff_head *list); +extern u32 skb_queue_len(const struct sk_buff_head *list); +extern int skb_queue_empty(const struct sk_buff_head *list); +extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, u32 length, gfp_t gfp_mask); +extern struct sk_buff *skb_copy(const struct sk_buff *src, gfp_t gfp); +extern struct sk_buff *skb_clone(const struct sk_buff *skb, gfp_t gfp); #endif diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/spinlock.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/spinlock.h index 200289aa..bb904f4f 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/spinlock.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/spinlock.h @@ -1,7 +1,7 @@ #ifndef _LINUX_SPINLOCK_H #define _LINUX_SPINLOCK_H -#include +#include "types.h" typedef struct spinlock { volatile unsigned char __locked; diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/timer.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/timer.h index 74991de1..3aefde16 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/timer.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/timer.h @@ -1,8 +1,8 @@ #ifndef _LINUX_TIMER_H #define _LINUX_TIMER_H -#include -#include +#include "types.h" +#include "compiler.h" struct timer_list { void (*function)(unsigned long data); diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/wait.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/wait.h index 6f21d00b..108a61c1 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/wait.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/linux/wait.h @@ -1,13 +1,15 @@ #ifndef _LINUX_WAIT_H #define _LINUX_WAIT_H -#include -#include +#include "types.h" +#include "compiler.h" struct wait_queue_head { unsigned char __opaque[128]; }; +typedef struct wait_queue_head wait_queue_head_t; + static inline void init_waitqueue_head(struct wait_queue_head *wq) { (void)wq; diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/net/cfg80211.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/net/cfg80211.h index 7ce50c44..e9e8e950 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/net/cfg80211.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/net/cfg80211.h @@ -98,6 +98,11 @@ extern void cfg80211_connect_bss(struct net_device *dev, size_t resp_ie_len, u16 status, gfp_t gfp); +extern void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, + struct station_parameters *params, gfp_t gfp); +extern void cfg80211_rx_mgmt(struct wireless_dev *wdev, u32 freq, int sig_dbm, + const u8 *buf, size_t len, gfp_t gfp); +extern void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid); extern void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, diff --git a/local/recipes/drivers/linux-kpi/source/src/c_headers/net/mac80211.h b/local/recipes/drivers/linux-kpi/source/src/c_headers/net/mac80211.h index 9456aeb0..7b56ff32 100644 --- a/local/recipes/drivers/linux-kpi/source/src/c_headers/net/mac80211.h +++ b/local/recipes/drivers/linux-kpi/source/src/c_headers/net/mac80211.h @@ -34,6 +34,48 @@ struct ieee80211_bss_conf { u16 beacon_int; }; +enum ieee80211_sta_state { + IEEE80211_STA_NOTEXIST, + IEEE80211_STA_NONE, + IEEE80211_STA_AUTH, + IEEE80211_STA_ASSOC, + IEEE80211_STA_AUTHORIZED, +}; + +enum set_key_cmd { + SET_KEY, + DISABLE_KEY, +}; + +struct ieee80211_ops { + void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); + int (*start)(struct ieee80211_hw *hw); + void (*stop)(struct ieee80211_hw *hw); + int (*add_interface)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + void (*remove_interface)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + int (*config)(struct ieee80211_hw *hw, u32 changed); + void (*bss_info_changed)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed); + int (*sta_state)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state); + int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct key_params *key); + void (*sw_scan_start)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr); + void (*sw_scan_complete)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + int (*sched_scan_start)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *req); + void (*sched_scan_stop)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +}; + +#define BSS_CHANGED_ASSOC (1U << 0) +#define BSS_CHANGED_BSSID (1U << 1) +#define BSS_CHANGED_ERP_CTS_PROT (1U << 2) +#define BSS_CHANGED_HT (1U << 3) +#define BSS_CHANGED_BASIC_RATES (1U << 4) +#define BSS_CHANGED_BEACON_INT (1U << 5) +#define BSS_CHANGED_BANDWIDTH (1U << 6) + extern struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, const void *ops, const char *requested_name); @@ -43,5 +85,8 @@ extern void ieee80211_unregister_hw(struct ieee80211_hw *hw); extern void ieee80211_queue_work(struct ieee80211_hw *hw, void *work); extern void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted); extern void ieee80211_connection_loss(struct ieee80211_vif *vif); +extern int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid, u16 timeout); +extern int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid); +extern void ieee80211_beacon_loss(struct ieee80211_vif *vif); #endif diff --git a/local/recipes/drivers/linux-kpi/source/src/lib.rs b/local/recipes/drivers/linux-kpi/source/src/lib.rs index 94000312..46acd981 100644 --- a/local/recipes/drivers/linux-kpi/source/src/lib.rs +++ b/local/recipes/drivers/linux-kpi/source/src/lib.rs @@ -11,6 +11,7 @@ pub use rust_impl::drm_shim; pub use rust_impl::firmware; pub use rust_impl::io; pub use rust_impl::irq; +pub use rust_impl::list; pub use rust_impl::mac80211; pub use rust_impl::memory; pub use rust_impl::net; diff --git a/local/recipes/drivers/linux-kpi/source/src/rust_impl/dma.rs b/local/recipes/drivers/linux-kpi/source/src/rust_impl/dma.rs index b4080dff..55123c3f 100644 --- a/local/recipes/drivers/linux-kpi/source/src/rust_impl/dma.rs +++ b/local/recipes/drivers/linux-kpi/source/src/rust_impl/dma.rs @@ -1,27 +1,104 @@ use std::alloc::{alloc_zeroed, dealloc, Layout}; +use std::ffi::{c_char, c_void, CStr}; use std::ptr; - -use syscall::CallFlags; +use std::sync::atomic::{fence, Ordering}; +use std::sync::Mutex; lazy_static::lazy_static! { static ref TRANSLATION_FD: Option = { - libredox::call::open("/scheme/memory/translation", - syscall::flag::O_CLOEXEC as i32, 0) + libredox::call::open("/scheme/memory/translation", syscall::flag::O_CLOEXEC as i32, 0) .ok() .map(|fd| fd) }; } +#[cfg(target_os = "redox")] fn virt_to_phys(virt: usize) -> usize { let raw = match *TRANSLATION_FD { Some(fd) => fd, None => return 0, }; let mut buf = virt.to_ne_bytes(); - let _ = libredox::call::call_ro(raw, &mut buf, CallFlags::empty(), &[]); + let _ = libredox::call::call_ro(raw, &mut buf, syscall::CallFlags::empty(), &[]); usize::from_ne_bytes(buf) } +#[cfg(not(target_os = "redox"))] +fn virt_to_phys(virt: usize) -> usize { + let _ = *TRANSLATION_FD; + virt +} + +fn sanitize_align(align: usize) -> Option { + let align = align.max(1); + if align.is_power_of_two() { + Some(align) + } else { + align.checked_next_power_of_two() + } +} + +fn crosses_boundary(addr: u64, size: usize, boundary: usize) -> bool { + if boundary == 0 || size == 0 { + return false; + } + + let end = match addr.checked_add(size.saturating_sub(1) as u64) { + Some(end) => end, + None => return true, + }; + let mask = !(boundary as u64 - 1); + (addr & mask) != (end & mask) +} + +#[derive(Clone, Copy)] +struct PoolAllocation { + vaddr: usize, + dma: u64, + size: usize, + align: usize, +} + +type AllocationList = Mutex>; + +#[repr(C)] +pub struct DmaPool { + pub name: *mut u8, + pub size: usize, + pub align: usize, + pub boundary: usize, + pub allocations: *mut c_void, + name_len: usize, +} + +fn copy_pool_name(name: *const u8) -> (*mut u8, usize) { + if name.is_null() { + return (ptr::null_mut(), 0); + } + + let c_name = unsafe { CStr::from_ptr(name.cast::()) }; + let bytes = c_name.to_bytes(); + let mut owned = Vec::with_capacity(bytes.len() + 1); + owned.extend_from_slice(bytes); + owned.push(0); + let len = owned.len(); + let ptr = owned.as_mut_ptr(); + std::mem::forget(owned); + (ptr, len) +} + +fn pool_allocations(pool: *mut DmaPool) -> Option<&'static AllocationList> { + if pool.is_null() { + return None; + } + let allocations = unsafe { (*pool).allocations.cast::() }; + if allocations.is_null() { + None + } else { + Some(unsafe { &*allocations }) + } +} + #[no_mangle] pub extern "C" fn dma_alloc_coherent( _dev: *mut u8, @@ -91,3 +168,237 @@ pub extern "C" fn dma_set_mask(_dev: *mut u8, _mask: u64) -> i32 { pub extern "C" fn dma_set_coherent_mask(_dev: *mut u8, _mask: u64) -> i32 { 0 } + +#[no_mangle] +pub extern "C" fn dma_pool_create( + name: *const u8, + _dev: *mut u8, + size: usize, + align: usize, + boundary: usize, +) -> *mut DmaPool { + if size == 0 { + return ptr::null_mut(); + } + + let Some(align) = sanitize_align(align) else { + return ptr::null_mut(); + }; + + if boundary != 0 && size > boundary { + return ptr::null_mut(); + } + + let allocations = Box::new(Mutex::new(Vec::::new())); + let (name_ptr, name_len) = copy_pool_name(name); + Box::into_raw(Box::new(DmaPool { + name: name_ptr, + size, + align, + boundary, + allocations: Box::into_raw(allocations).cast::(), + name_len, + })) +} + +#[no_mangle] +pub extern "C" fn dma_pool_destroy(pool: *mut DmaPool) { + if pool.is_null() { + return; + } + + let allocations_ptr = unsafe { (*pool).allocations.cast::() }; + if !allocations_ptr.is_null() { + let allocations = unsafe { Box::from_raw(allocations_ptr) }; + let entries = allocations + .lock() + .map(|entries| entries.clone()) + .unwrap_or_default(); + for entry in entries { + if let Ok(layout) = Layout::from_size_align(entry.size.max(1), entry.align.max(1)) { + unsafe { dealloc(entry.vaddr as *mut u8, layout) }; + } + } + } + + let pool = unsafe { Box::from_raw(pool) }; + if !pool.name.is_null() && pool.name_len != 0 { + unsafe { + drop(Vec::from_raw_parts(pool.name, pool.name_len, pool.name_len)); + } + } +} + +#[no_mangle] +pub extern "C" fn dma_pool_alloc(pool: *mut DmaPool, _flags: u32, handle: *mut u64) -> *mut u8 { + if pool.is_null() || handle.is_null() { + return ptr::null_mut(); + } + + let pool_ref = unsafe { &*pool }; + if pool_ref.size == 0 { + return ptr::null_mut(); + } + + let layout = match Layout::from_size_align(pool_ref.size, pool_ref.align.max(1)) { + Ok(layout) => layout, + Err(_) => return ptr::null_mut(), + }; + + let vaddr = unsafe { alloc_zeroed(layout) }; + if vaddr.is_null() { + return ptr::null_mut(); + } + + let dma = virt_to_phys(vaddr as usize) as u64; + if dma == 0 || crosses_boundary(dma, pool_ref.size, pool_ref.boundary) { + unsafe { dealloc(vaddr, layout) }; + return ptr::null_mut(); + } + + let Some(allocations) = pool_allocations(pool) else { + unsafe { dealloc(vaddr, layout) }; + return ptr::null_mut(); + }; + + let Ok(mut entries) = allocations.lock() else { + unsafe { dealloc(vaddr, layout) }; + return ptr::null_mut(); + }; + + entries.push(PoolAllocation { + vaddr: vaddr as usize, + dma, + size: pool_ref.size, + align: pool_ref.align.max(1), + }); + unsafe { *handle = dma }; + vaddr +} + +#[no_mangle] +pub extern "C" fn dma_pool_free(pool: *mut DmaPool, vaddr: *mut u8, addr: u64) { + if pool.is_null() || vaddr.is_null() { + return; + } + + let Some(allocations) = pool_allocations(pool) else { + return; + }; + + let Ok(mut entries) = allocations.lock() else { + return; + }; + + let Some(index) = entries + .iter() + .position(|entry| entry.vaddr == vaddr as usize || (addr != 0 && entry.dma == addr)) + else { + return; + }; + + let entry = entries.swap_remove(index); + if let Ok(layout) = Layout::from_size_align(entry.size.max(1), entry.align.max(1)) { + unsafe { dealloc(entry.vaddr as *mut u8, layout) }; + } +} + +#[no_mangle] +pub extern "C" fn dma_sync_single_for_cpu(_dev: *mut u8, addr: u64, size: usize, _dir: u32) { + if addr == 0 || size == 0 { + return; + } + fence(Ordering::Acquire); +} + +#[no_mangle] +pub extern "C" fn dma_sync_single_for_device(_dev: *mut u8, addr: u64, size: usize, _dir: u32) { + if addr == 0 || size == 0 { + return; + } + fence(Ordering::Release); +} + +#[no_mangle] +pub extern "C" fn dma_map_page( + _dev: *mut u8, + page: *mut u8, + offset: usize, + size: usize, + _dir: u32, +) -> u64 { + if page.is_null() || size == 0 { + return 0; + } + + let Some(vaddr) = (page as usize).checked_add(offset) else { + return 0; + }; + virt_to_phys(vaddr) as u64 +} + +#[no_mangle] +pub extern "C" fn dma_unmap_page(_dev: *mut u8, _addr: u64, _size: usize, _dir: u32) {} + +#[no_mangle] +pub extern "C" fn dma_mapping_error(_dev: *mut u8, addr: u64) -> i32 { + if addr == 0 { + 1 + } else { + 0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::ffi::CString; + + #[test] + fn dma_alloc_and_map_work_on_host() { + let mut handle = 0u64; + let vaddr = dma_alloc_coherent(ptr::null_mut(), 128, &mut handle, 0); + assert!(!vaddr.is_null()); + assert_ne!(handle, 0); + assert_eq!(dma_mapping_error(ptr::null_mut(), handle), 0); + assert_eq!(dma_map_single(ptr::null_mut(), vaddr, 128, 0), handle); + dma_sync_single_for_cpu(ptr::null_mut(), handle, 128, 0); + dma_sync_single_for_device(ptr::null_mut(), handle, 128, 0); + dma_free_coherent(ptr::null_mut(), 128, vaddr, handle); + } + + #[test] + fn dma_pool_lifecycle_tracks_allocations() { + let name = CString::new("iwlwifi-rx").expect("valid test CString"); + let pool = dma_pool_create(name.as_ptr().cast::(), ptr::null_mut(), 256, 64, 0); + assert!(!pool.is_null()); + + let mut handle = 0u64; + let vaddr = dma_pool_alloc(pool, 0, &mut handle); + assert!(!vaddr.is_null()); + assert_ne!(handle, 0); + + let allocations = unsafe { &*((*pool).allocations.cast::()) }; + assert_eq!(allocations.lock().expect("lock allocations").len(), 1); + + dma_pool_free(pool, vaddr, handle); + assert!(allocations.lock().expect("lock allocations").is_empty()); + dma_pool_destroy(pool); + } + + #[test] + fn dma_pool_rejects_impossible_boundary() { + let pool = dma_pool_create(ptr::null(), ptr::null_mut(), 1024, 16, 128); + assert!(pool.is_null()); + } + + #[test] + fn dma_map_page_and_error_checks_work() { + let mut page = [0u8; 64]; + let dma = dma_map_page(ptr::null_mut(), page.as_mut_ptr(), 8, 16, 0); + assert_ne!(dma, 0); + assert_eq!(dma_mapping_error(ptr::null_mut(), dma), 0); + assert_eq!(dma_mapping_error(ptr::null_mut(), 0), 1); + dma_unmap_page(ptr::null_mut(), dma, 16, 0); + } +} diff --git a/local/recipes/drivers/linux-kpi/source/src/rust_impl/io.rs b/local/recipes/drivers/linux-kpi/source/src/rust_impl/io.rs index 606005c3..9dc58cd7 100644 --- a/local/recipes/drivers/linux-kpi/source/src/rust_impl/io.rs +++ b/local/recipes/drivers/linux-kpi/source/src/rust_impl/io.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; use std::ptr; +use std::sync::atomic::{fence, Ordering}; use std::sync::Mutex; type PhysAddr = u64; @@ -24,28 +25,26 @@ pub extern "C" fn ioremap(phys: PhysAddr, size: usize) -> *mut u8 { size ); - let ptr = match redox_driver_sys::memory::MmioRegion::map( + match redox_driver_sys::memory::MmioRegion::map( phys, size, redox_driver_sys::memory::CacheType::DeviceMemory, redox_driver_sys::memory::MmioProt::READ_WRITE, ) { Ok(region) => { - let p = region.as_ptr() as *mut u8; - let s = region.size(); + let ptr = region.as_ptr() as *mut u8; + let size = region.size(); if let Ok(mut tracker) = MMIO_MAP_TRACKER.lock() { - tracker.insert(p as usize, MappedRegion { size: s }); + tracker.insert(ptr as usize, MappedRegion { size }); } std::mem::forget(region); - p + ptr } Err(e) => { log::error!("ioremap: failed to map {:#x}+{:#x}: {:?}", phys, size, e); ptr::null_mut() } - }; - - ptr + } } #[no_mangle] @@ -124,3 +123,69 @@ pub extern "C" fn writew(val: u16, addr: *mut u8) { } unsafe { ptr::write_volatile(addr as *mut u16, val) }; } + +#[no_mangle] +pub extern "C" fn memcpy_toio(dst: *mut u8, src: *const u8, count: usize) { + if dst.is_null() || src.is_null() || count == 0 { + return; + } + unsafe { ptr::copy_nonoverlapping(src, dst, count) }; +} + +#[no_mangle] +pub extern "C" fn memcpy_fromio(dst: *mut u8, src: *const u8, count: usize) { + if dst.is_null() || src.is_null() || count == 0 { + return; + } + unsafe { ptr::copy_nonoverlapping(src, dst, count) }; +} + +#[no_mangle] +pub extern "C" fn memset_io(dst: *mut u8, val: u8, count: usize) { + if dst.is_null() || count == 0 { + return; + } + unsafe { ptr::write_bytes(dst, val, count) }; +} + +#[no_mangle] +pub extern "C" fn mb() { + fence(Ordering::SeqCst); +} + +#[no_mangle] +pub extern "C" fn rmb() { + fence(Ordering::Acquire); +} + +#[no_mangle] +pub extern "C" fn wmb() { + fence(Ordering::Release); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn io_copy_helpers_move_bytes() { + let mut dst = [0u8; 8]; + let src = [1u8, 2, 3, 4, 5, 6, 7, 8]; + memcpy_toio(dst.as_mut_ptr(), src.as_ptr(), src.len()); + assert_eq!(dst, src); + + let mut second = [0u8; 8]; + memcpy_fromio(second.as_mut_ptr(), dst.as_ptr(), dst.len()); + assert_eq!(second, src); + + memset_io(second.as_mut_ptr(), 0xaa, second.len()); + assert_eq!(second, [0xaa; 8]); + } + + #[test] + fn io_barriers_are_callable() { + mb(); + rmb(); + wmb(); + } +} diff --git a/local/recipes/drivers/linux-kpi/source/src/rust_impl/list.rs b/local/recipes/drivers/linux-kpi/source/src/rust_impl/list.rs new file mode 100644 index 00000000..aacff589 --- /dev/null +++ b/local/recipes/drivers/linux-kpi/source/src/rust_impl/list.rs @@ -0,0 +1,197 @@ +use std::ptr; + +#[repr(C)] +pub struct ListHead { + pub next: *mut ListHead, + pub prev: *mut ListHead, +} + +#[no_mangle] +pub extern "C" fn init_list_head(head: *mut ListHead) { + if head.is_null() { + return; + } + unsafe { + (*head).next = head; + (*head).prev = head; + } +} + +#[no_mangle] +pub extern "C" fn list_add(new: *mut ListHead, head: *mut ListHead) { + if new.is_null() || head.is_null() { + return; + } + + unsafe { + let next = (*head).next; + (*new).next = next; + (*new).prev = head; + (*head).next = new; + if !next.is_null() { + (*next).prev = new; + } + } +} + +#[no_mangle] +pub extern "C" fn list_add_tail(new: *mut ListHead, head: *mut ListHead) { + if new.is_null() || head.is_null() { + return; + } + + unsafe { + let prev = (*head).prev; + (*new).next = head; + (*new).prev = prev; + (*head).prev = new; + if !prev.is_null() { + (*prev).next = new; + } + } +} + +#[no_mangle] +pub extern "C" fn list_del(entry: *mut ListHead) { + if entry.is_null() { + return; + } + + unsafe { + let prev = (*entry).prev; + let next = (*entry).next; + if !prev.is_null() { + (*prev).next = next; + } + if !next.is_null() { + (*next).prev = prev; + } + (*entry).next = ptr::null_mut(); + (*entry).prev = ptr::null_mut(); + } +} + +#[no_mangle] +pub extern "C" fn list_empty(head: *const ListHead) -> i32 { + if head.is_null() { + return 1; + } + if ptr::eq(unsafe { (*head).next } as *const ListHead, head) { + 1 + } else { + 0 + } +} + +#[no_mangle] +pub extern "C" fn list_splice(list: *mut ListHead, head: *mut ListHead) { + if list.is_null() || head.is_null() || list_empty(list) != 0 { + return; + } + + unsafe { + let first = (*list).next; + let last = (*list).prev; + let at = (*head).next; + + (*first).prev = head; + (*head).next = first; + + (*last).next = at; + if !at.is_null() { + (*at).prev = last; + } + } +} + +#[no_mangle] +pub extern "C" fn list_first_entry(head: *const ListHead, offset: usize) -> *mut u8 { + if head.is_null() || list_empty(head) != 0 { + return ptr::null_mut(); + } + + let first = unsafe { (*head).next }; + if first.is_null() { + return ptr::null_mut(); + } + + (first as usize) + .checked_sub(offset) + .map_or(ptr::null_mut(), |entry| entry as *mut u8) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::mem::offset_of; + + #[repr(C)] + struct Node { + value: u32, + link: ListHead, + } + + #[test] + fn list_add_delete_and_first_entry_work() { + let mut head = ListHead { + next: ptr::null_mut(), + prev: ptr::null_mut(), + }; + init_list_head(&mut head); + assert_eq!(list_empty(&head), 1); + + let mut node = Node { + value: 7, + link: ListHead { + next: ptr::null_mut(), + prev: ptr::null_mut(), + }, + }; + list_add(&mut node.link, &mut head); + assert_eq!(list_empty(&head), 0); + + let first = list_first_entry(&head, offset_of!(Node, link)).cast::(); + assert_eq!(unsafe { (*first).value }, 7); + + list_del(&mut node.link); + init_list_head(&mut head); + assert_eq!(list_empty(&head), 1); + } + + #[test] + fn list_add_tail_and_splice_work() { + let mut dst = ListHead { + next: ptr::null_mut(), + prev: ptr::null_mut(), + }; + let mut src = ListHead { + next: ptr::null_mut(), + prev: ptr::null_mut(), + }; + init_list_head(&mut dst); + init_list_head(&mut src); + + let mut node1 = Node { + value: 1, + link: ListHead { + next: ptr::null_mut(), + prev: ptr::null_mut(), + }, + }; + let mut node2 = Node { + value: 2, + link: ListHead { + next: ptr::null_mut(), + prev: ptr::null_mut(), + }, + }; + + list_add_tail(&mut node1.link, &mut src); + list_add_tail(&mut node2.link, &mut src); + list_splice(&mut src, &mut dst); + + let first = list_first_entry(&dst, offset_of!(Node, link)).cast::(); + assert_eq!(unsafe { (*first).value }, 1); + assert!(std::ptr::eq(node1.link.next, &mut node2.link)); + } +} diff --git a/local/recipes/drivers/linux-kpi/source/src/rust_impl/mac80211.rs b/local/recipes/drivers/linux-kpi/source/src/rust_impl/mac80211.rs index bd636497..efa28041 100644 --- a/local/recipes/drivers/linux-kpi/source/src/rust_impl/mac80211.rs +++ b/local/recipes/drivers/linux-kpi/source/src/rust_impl/mac80211.rs @@ -1,14 +1,68 @@ use std::alloc::{alloc_zeroed, dealloc, Layout}; +use std::collections::HashMap; use std::ffi::c_void; use std::ptr; use std::sync::atomic::{AtomicI32, Ordering}; +use std::sync::Mutex; -use super::wireless::{wiphy_free, wiphy_new_nm, wiphy_register, wiphy_unregister, Wiphy}; +use super::net::SkBuff; +use super::wireless::{ + wiphy_free, wiphy_new_nm, wiphy_register, wiphy_unregister, KeyParams, Wiphy, +}; use super::workqueue::{schedule_work, WorkStruct}; +const EINVAL: i32 = 22; +const EBUSY: i32 = 16; + +lazy_static::lazy_static! { + static ref STA_REGISTRY: Mutex> = Mutex::new(HashMap::new()); + static ref BA_SESSIONS: Mutex>> = Mutex::new(HashMap::new()); +} + +#[derive(Clone, Copy)] +struct StaRegistryEntry { + hw: usize, + _vif: usize, + state: u32, +} + +#[repr(C)] +pub struct Ieee80211Ops { + pub tx: Option, + pub start: Option i32>, + pub stop: Option, + pub add_interface: Option i32>, + pub remove_interface: Option, + pub config: Option i32>, + pub bss_info_changed: + Option, + pub sta_state: + Option i32>, + pub set_key: Option< + extern "C" fn( + *mut Ieee80211Hw, + *mut Ieee80211Vif, + i32, + *mut Ieee80211Sta, + *mut KeyParams, + ) -> i32, + >, + pub ampdu_action: Option< + extern "C" fn(*mut Ieee80211Hw, *mut Ieee80211Vif, *mut Ieee80211Sta, u16, u16, u16) -> i32, + >, + pub sw_scan_start: Option, + pub sw_scan_complete: Option, + pub prepare_multicast: Option u64>, + pub configure_filter: Option, + pub sched_scan_start: + Option i32>, + pub sched_scan_stop: Option, +} + #[repr(C)] pub struct Ieee80211Hw { pub wiphy: *mut Wiphy, + pub ops: *const Ieee80211Ops, pub priv_data: *mut c_void, pub registered: AtomicI32, pub extra_tx_headroom: u32, @@ -26,6 +80,7 @@ pub struct Ieee80211Vif { } #[repr(C)] +#[derive(Debug)] pub struct Ieee80211Sta { pub addr: [u8; 6], pub drv_priv: *mut c_void, @@ -39,6 +94,41 @@ pub struct Ieee80211BssConf { pub beacon_int: u16, } +pub const BSS_CHANGED_ASSOC: u32 = 1; +pub const BSS_CHANGED_BSSID: u32 = 2; +pub const BSS_CHANGED_ERP_CTS_PROT: u32 = 4; +pub const BSS_CHANGED_HT: u32 = 8; +pub const BSS_CHANGED_BASIC_RATES: u32 = 16; +pub const BSS_CHANGED_BEACON_INT: u32 = 32; +pub const BSS_CHANGED_BANDWIDTH: u32 = 64; + +fn update_sta_registry( + hw: *mut Ieee80211Hw, + vif: *mut Ieee80211Vif, + sta: *mut Ieee80211Sta, + new_state: u32, +) { + if let Ok(mut registry) = STA_REGISTRY.lock() { + if new_state <= IEEE80211_STA_NONE { + registry.remove(&(sta as usize)); + } else { + registry.insert( + sta as usize, + StaRegistryEntry { + hw: hw as usize, + _vif: vif as usize, + state: new_state, + }, + ); + } + } + if new_state <= IEEE80211_STA_NONE { + if let Ok(mut sessions) = BA_SESSIONS.lock() { + sessions.remove(&(sta as usize)); + } + } +} + #[no_mangle] pub extern "C" fn ieee80211_alloc_hw_nm( priv_data_len: usize, @@ -52,6 +142,7 @@ pub extern "C" fn ieee80211_alloc_hw_nm( let mut hw = Box::new(Ieee80211Hw { wiphy, + ops: ops.cast::(), priv_data: ptr::null_mut(), registered: AtomicI32::new(0), extra_tx_headroom: 0, @@ -86,6 +177,9 @@ pub extern "C" fn ieee80211_free_hw(hw: *mut Ieee80211Hw) { if hw.is_null() { return; } + if let Ok(mut registry) = STA_REGISTRY.lock() { + registry.retain(|_, entry| entry.hw != hw as usize); + } unsafe { let hw_box = Box::from_raw(hw); if !hw_box.priv_data.is_null() { @@ -103,10 +197,10 @@ pub extern "C" fn ieee80211_free_hw(hw: *mut Ieee80211Hw) { #[no_mangle] pub extern "C" fn ieee80211_register_hw(hw: *mut Ieee80211Hw) -> i32 { if hw.is_null() { - return -22; + return -EINVAL; } if unsafe { &*hw }.registered.load(Ordering::Acquire) != 0 { - return -16; + return -EBUSY; } let rc = wiphy_register(unsafe { (*hw).wiphy }); if rc != 0 { @@ -147,16 +241,207 @@ pub extern "C" fn ieee80211_connection_loss(vif: *mut Ieee80211Vif) { unsafe { (*vif).cfg_assoc = false }; } +#[repr(C)] +pub struct Ieee80211RxStatus { + pub freq: u16, + pub band: u32, + pub signal: i8, + pub noise: i8, + pub rate_idx: u8, + pub flag: u32, + pub antenna: u8, + pub rx_flags: u32, +} + +impl Default for Ieee80211RxStatus { + fn default() -> Self { + Self { + freq: 0, + band: 0, + signal: 0, + noise: 0, + rate_idx: 0, + flag: 0, + antenna: 0, + rx_flags: 0, + } + } +} + +pub const RX_FLAG_MMIC_ERROR: u32 = 1 << 0; +pub const RX_FLAG_DECRYPTED: u32 = 1 << 1; +pub const RX_FLAG_MMIC_STRIPPED: u32 = 1 << 2; +pub const RX_FLAG_IV_STRIPPED: u32 = 1 << 3; + +#[repr(C)] +pub struct Ieee80211TxInfo { + pub flags: u32, + pub band: u32, + pub hw_queue: u8, + pub rate_driver_data: [u8; 16], +} + +pub const IEEE80211_TX_CTL_REQ_TX_STATUS: u32 = 1 << 0; +pub const IEEE80211_TX_CTL_NO_ACK: u32 = 1 << 1; +pub const IEEE80211_TX_CTL_CLEAR_PS_FILT: u32 = 1 << 2; +pub const IEEE80211_TX_CTL_FIRST_FRAGMENT: u32 = 1 << 3; + +#[no_mangle] +pub extern "C" fn ieee80211_rx_irqsafe(hw: *mut Ieee80211Hw, skb: *mut SkBuff) { + if hw.is_null() || skb.is_null() { + return; + } +} + +#[no_mangle] +pub extern "C" fn ieee80211_tx_status(hw: *mut Ieee80211Hw, skb: *mut SkBuff) { + if hw.is_null() || skb.is_null() { + return; + } +} + +#[no_mangle] +pub extern "C" fn ieee80211_get_tid(skb: *const SkBuff) -> u8 { + if skb.is_null() { + return 0; + } + + 0 +} + +#[no_mangle] +pub extern "C" fn ieee80211_chandef_create( + chandef: *mut c_void, + channel: *const super::wireless::Ieee80211Channel, + _chan_type: u32, +) { + if chandef.is_null() || channel.is_null() { + return; + } +} + +pub const IEEE80211_STA_NOTEXIST: u32 = 0; +pub const IEEE80211_STA_NONE: u32 = 1; +pub const IEEE80211_STA_AUTH: u32 = 2; +pub const IEEE80211_STA_ASSOC: u32 = 3; +pub const IEEE80211_STA_AUTHORIZED: u32 = 4; + +#[no_mangle] +pub extern "C" fn ieee80211_start_tx_ba_session( + pub_sta: *mut Ieee80211Sta, + tid: u16, + _timeout: u16, +) -> i32 { + if pub_sta.is_null() || tid >= 16 { + return -EINVAL; + } + + let Ok(mut sessions) = BA_SESSIONS.lock() else { + return -EINVAL; + }; + let entry = sessions.entry(pub_sta as usize).or_default(); + if entry.contains(&tid) { + return -EBUSY; + } + entry.push(tid); + 0 +} + +#[no_mangle] +pub extern "C" fn ieee80211_stop_tx_ba_session(pub_sta: *mut Ieee80211Sta, tid: u16) -> i32 { + if pub_sta.is_null() || tid >= 16 { + return -EINVAL; + } + + if let Ok(mut sessions) = BA_SESSIONS.lock() { + if let Some(entry) = sessions.get_mut(&(pub_sta as usize)) { + entry.retain(|existing| *existing != tid); + if entry.is_empty() { + sessions.remove(&(pub_sta as usize)); + } + } + } + 0 +} + +#[no_mangle] +pub extern "C" fn ieee80211_sta_state( + hw: *mut Ieee80211Hw, + vif: *mut Ieee80211Vif, + sta: *mut Ieee80211Sta, + _old_state: u32, + new_state: u32, +) -> i32 { + if hw.is_null() || vif.is_null() || sta.is_null() { + return -EINVAL; + } + + update_sta_registry(hw, vif, sta, new_state); + let ops = unsafe { (*hw).ops }; + if ops.is_null() { + return 0; + } + + match unsafe { (*ops).sta_state } { + Some(callback) => callback(hw, vif, sta, new_state), + None => 0, + } +} + +#[no_mangle] +pub extern "C" fn ieee80211_find_sta(hw: *mut Ieee80211Hw, addr: *const u8) -> *mut Ieee80211Sta { + if hw.is_null() || addr.is_null() { + return ptr::null_mut(); + } + + let Ok(registry) = STA_REGISTRY.lock() else { + return ptr::null_mut(); + }; + let wanted = unsafe { ptr::read(addr.cast::<[u8; 6]>()) }; + for (sta_ptr, entry) in registry.iter() { + if entry.hw != hw as usize || entry.state <= IEEE80211_STA_NONE { + continue; + } + let sta = *sta_ptr as *mut Ieee80211Sta; + if sta.is_null() { + continue; + } + if wanted == unsafe { (*sta).addr } { + return sta; + } + } + ptr::null_mut() +} + +#[no_mangle] +pub extern "C" fn ieee80211_beacon_loss(vif: *mut Ieee80211Vif) { + if vif.is_null() { + return; + } + unsafe { (*vif).cfg_assoc = false }; +} + #[cfg(test)] mod tests { use super::*; use crate::rust_impl::workqueue::{flush_scheduled_work, WorkStruct}; - use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; + use std::sync::atomic::AtomicBool; static WORK_RAN: AtomicBool = AtomicBool::new(false); + static STA_CALLBACKS: AtomicI32 = AtomicI32::new(0); extern "C" fn test_work(_work: *mut WorkStruct) { - WORK_RAN.store(true, AtomicOrdering::Release); + WORK_RAN.store(true, Ordering::Release); + } + + extern "C" fn test_sta_state( + _hw: *mut Ieee80211Hw, + _vif: *mut Ieee80211Vif, + _sta: *mut Ieee80211Sta, + state: u32, + ) -> i32 { + STA_CALLBACKS.store(state as i32, Ordering::Release); + 0 } #[test] @@ -179,15 +464,15 @@ mod tests { func: Some(test_work), __opaque: [0; 64], }; - WORK_RAN.store(false, AtomicOrdering::Release); + WORK_RAN.store(false, Ordering::Release); ieee80211_queue_work(hw, (&mut work as *mut WorkStruct).cast::()); flush_scheduled_work(); - assert!(WORK_RAN.load(AtomicOrdering::Acquire)); + assert!(WORK_RAN.load(Ordering::Acquire)); ieee80211_free_hw(hw); } #[test] - fn connection_loss_clears_assoc_state() { + fn connection_loss_and_beacon_loss_clear_assoc_state() { let mut vif = Ieee80211Vif { addr: [0; 6], drv_priv: ptr::null_mut(), @@ -196,5 +481,110 @@ mod tests { }; ieee80211_connection_loss(&mut vif); assert!(!vif.cfg_assoc); + vif.cfg_assoc = true; + ieee80211_beacon_loss(&mut vif); + assert!(!vif.cfg_assoc); + } + + #[test] + fn ieee80211_rx_status_default_and_flags_work() { + let status = Ieee80211RxStatus::default(); + assert_eq!(status.freq, 0); + assert_eq!(status.band, 0); + assert_eq!(status.signal, 0); + assert_eq!(status.noise, 0); + assert_eq!(status.rate_idx, 0); + assert_eq!(status.flag, 0); + assert_eq!(status.antenna, 0); + assert_eq!(status.rx_flags, 0); + + let combined = RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; + assert_ne!(combined & RX_FLAG_DECRYPTED, 0); + assert_ne!(combined & RX_FLAG_IV_STRIPPED, 0); + assert_ne!(combined & RX_FLAG_MMIC_STRIPPED, 0); + assert_eq!(combined & RX_FLAG_MMIC_ERROR, 0); + } + + #[test] + fn ieee80211_sta_registry_and_ba_sessions_work() { + let ops = Ieee80211Ops { + tx: None, + start: None, + stop: None, + add_interface: None, + remove_interface: None, + config: None, + bss_info_changed: None, + sta_state: Some(test_sta_state), + set_key: None, + ampdu_action: None, + sw_scan_start: None, + sw_scan_complete: None, + prepare_multicast: None, + configure_filter: None, + sched_scan_start: None, + sched_scan_stop: None, + }; + let hw = ieee80211_alloc_hw_nm( + 0, + (&ops as *const Ieee80211Ops).cast::(), + ptr::null(), + ); + assert!(!hw.is_null()); + assert_eq!(unsafe { (*hw).ops }, &ops as *const Ieee80211Ops); + + let mut vif = Ieee80211Vif { + addr: [0; 6], + drv_priv: ptr::null_mut(), + type_: 0, + cfg_assoc: false, + }; + let mut sta = Ieee80211Sta { + addr: [1, 2, 3, 4, 5, 6], + drv_priv: ptr::null_mut(), + aid: 1, + }; + + STA_CALLBACKS.store(0, Ordering::Release); + assert_eq!( + ieee80211_sta_state( + hw, + &mut vif, + &mut sta, + IEEE80211_STA_NONE, + IEEE80211_STA_ASSOC + ), + 0 + ); + assert_eq!( + STA_CALLBACKS.load(Ordering::Acquire), + IEEE80211_STA_ASSOC as i32 + ); + assert!(std::ptr::eq( + ieee80211_find_sta(hw, sta.addr.as_ptr()), + &mut sta, + )); + + assert_eq!(ieee80211_start_tx_ba_session(&mut sta, 3, 100), 0); + assert_eq!(ieee80211_start_tx_ba_session(&mut sta, 3, 100), -16); + assert_eq!(ieee80211_stop_tx_ba_session(&mut sta, 3), 0); + + assert_eq!( + ieee80211_sta_state( + hw, + &mut vif, + &mut sta, + IEEE80211_STA_ASSOC, + IEEE80211_STA_NONE + ), + 0 + ); + assert!(ieee80211_find_sta(hw, sta.addr.as_ptr()).is_null()); + ieee80211_free_hw(hw); + } + + #[test] + fn ieee80211_get_tid_returns_zero_for_null() { + assert_eq!(ieee80211_get_tid(ptr::null()), 0); } } diff --git a/local/recipes/drivers/linux-kpi/source/src/rust_impl/mod.rs b/local/recipes/drivers/linux-kpi/source/src/rust_impl/mod.rs index eabfe939..cdb3a93c 100644 --- a/local/recipes/drivers/linux-kpi/source/src/rust_impl/mod.rs +++ b/local/recipes/drivers/linux-kpi/source/src/rust_impl/mod.rs @@ -2,12 +2,13 @@ pub mod device; pub mod dma; pub mod drm_shim; pub mod firmware; -pub mod mac80211; -pub mod net; pub mod idr; pub mod io; pub mod irq; +pub mod list; +pub mod mac80211; pub mod memory; +pub mod net; pub mod pci; pub mod sync; pub mod timer; diff --git a/local/recipes/drivers/linux-kpi/source/src/rust_impl/net.rs b/local/recipes/drivers/linux-kpi/source/src/rust_impl/net.rs index 5779c053..5d8d82b4 100644 --- a/local/recipes/drivers/linux-kpi/source/src/rust_impl/net.rs +++ b/local/recipes/drivers/linux-kpi/source/src/rust_impl/net.rs @@ -1,7 +1,17 @@ use std::alloc::{alloc_zeroed, dealloc, Layout}; use std::ffi::c_void; use std::ptr; -use std::sync::atomic::{AtomicI32, Ordering}; +use std::sync::atomic::{AtomicI32, AtomicU32, AtomicUsize, Ordering}; + +const NAPI_STATE_IDLE: i32 = 0; +const NAPI_STATE_SCHEDULED: i32 = 1; + +#[repr(C)] +struct SkbSharedInfo { + refcount: AtomicUsize, + capacity: usize, + align: usize, +} #[repr(C)] pub struct SkBuff { @@ -10,6 +20,19 @@ pub struct SkBuff { pub len: u32, pub tail: u32, pub end: u32, + pub next: *mut SkBuff, + pub prev: *mut SkBuff, + pub network_header: i32, + pub mac_header: i32, + shared: *mut SkbSharedInfo, +} + +#[repr(C)] +pub struct SkBuffHead { + pub next: *mut SkBuff, + pub prev: *mut SkBuff, + pub qlen: u32, + pub lock: u8, } #[repr(C)] @@ -24,21 +47,45 @@ pub struct NetDevice { pub ieee80211_ptr: *mut c_void, pub priv_data: *mut c_void, pub registered: AtomicI32, + pub tx_queue_state: AtomicU32, + pub device_attached: AtomicI32, priv_alloc_size: usize, priv_alloc_align: usize, } -unsafe fn free_skb_buffer(skb: *mut SkBuff) { +#[repr(C)] +pub struct NapiStruct { + pub poll: Option i32>, + pub dev: *mut NetDevice, + pub state: AtomicI32, + pub weight: i32, +} + +unsafe fn release_skb_buffer(skb: *mut SkBuff) { if skb.is_null() { return; } - let head = (*skb).head; - let end = (*skb).end as usize; - if !head.is_null() && end != 0 { - if let Ok(layout) = Layout::from_size_align(end, 16) { - dealloc(head, layout); - } + + let shared = (*skb).shared; + if shared.is_null() { + return; } + + if (*shared).refcount.fetch_sub(1, Ordering::AcqRel) == 1 { + let capacity = (*shared).capacity.max(1); + let align = (*shared).align.max(1); + if !(*skb).head.is_null() { + if let Ok(layout) = Layout::from_size_align(capacity, align) { + dealloc((*skb).head, layout); + } + } + drop(Box::from_raw(shared)); + } +} + +fn skb_headroom_inner(skb: &SkBuff) -> u32 { + let headroom = unsafe { skb.data.offset_from(skb.head) }; + u32::try_from(headroom).unwrap_or_default() } #[no_mangle] @@ -53,12 +100,23 @@ pub extern "C" fn alloc_skb(size: u32, _gfp_mask: u32) -> *mut SkBuff { return ptr::null_mut(); } + let shared = Box::into_raw(Box::new(SkbSharedInfo { + refcount: AtomicUsize::new(1), + capacity: capacity.max(1), + align: 16, + })); + Box::into_raw(Box::new(SkBuff { head, data: head, len: 0, tail: 0, end: capacity as u32, + next: ptr::null_mut(), + prev: ptr::null_mut(), + network_header: 0, + mac_header: 0, + shared, })) } @@ -68,7 +126,7 @@ pub extern "C" fn kfree_skb(skb: *mut SkBuff) { return; } unsafe { - free_skb_buffer(skb); + release_skb_buffer(skb); drop(Box::from_raw(skb)); } } @@ -80,16 +138,14 @@ pub extern "C" fn skb_reserve(skb: *mut SkBuff, len: u32) { } let skb_ref = unsafe { &mut *skb }; - let headroom = unsafe { skb_ref.data.offset_from(skb_ref.head) }; - let Ok(headroom) = u32::try_from(headroom) else { - return; - }; + let headroom = skb_headroom_inner(skb_ref); let new_headroom = headroom.saturating_add(len); if new_headroom > skb_ref.end || skb_ref.tail != 0 || skb_ref.len != 0 { return; } skb_ref.data = unsafe { skb_ref.head.add(new_headroom as usize) }; + skb_ref.mac_header = new_headroom as i32; } #[no_mangle] @@ -140,8 +196,8 @@ pub extern "C" fn skb_pull(skb: *mut SkBuff, len: u32) -> *mut u8 { } skb_ref.data = unsafe { skb_ref.data.add(len as usize) }; - skb_ref.tail -= len; - skb_ref.len -= len; + skb_ref.tail = skb_ref.tail.saturating_sub(len); + skb_ref.len = skb_ref.len.saturating_sub(len); skb_ref.data } @@ -151,9 +207,7 @@ pub extern "C" fn skb_headroom(skb: *const SkBuff) -> u32 { return 0; } - let skb_ref = unsafe { &*skb }; - let headroom = unsafe { skb_ref.data.offset_from(skb_ref.head) }; - u32::try_from(headroom).unwrap_or_default() + skb_headroom_inner(unsafe { &*skb }) } #[no_mangle] @@ -180,6 +234,194 @@ pub extern "C" fn skb_trim(skb: *mut SkBuff, len: u32) { skb_ref.tail = new_len; } +#[no_mangle] +pub extern "C" fn skb_queue_head_init(list: *mut SkBuffHead) { + if list.is_null() { + return; + } + + unsafe { + (*list).next = ptr::null_mut(); + (*list).prev = ptr::null_mut(); + (*list).qlen = 0; + (*list).lock = 0; + } +} + +#[no_mangle] +pub extern "C" fn skb_queue_tail(list: *mut SkBuffHead, newsk: *mut SkBuff) { + if list.is_null() || newsk.is_null() { + return; + } + + unsafe { + (*newsk).next = ptr::null_mut(); + (*newsk).prev = (*list).prev; + if (*list).prev.is_null() { + (*list).next = newsk; + } else { + (*(*list).prev).next = newsk; + } + (*list).prev = newsk; + if (*list).next.is_null() { + (*list).next = newsk; + } + (*list).qlen = (*list).qlen.saturating_add(1); + } +} + +#[no_mangle] +pub extern "C" fn skb_dequeue(list: *mut SkBuffHead) -> *mut SkBuff { + if list.is_null() || unsafe { (*list).qlen } == 0 { + return ptr::null_mut(); + } + + unsafe { + let skb = (*list).next; + if skb.is_null() { + return ptr::null_mut(); + } + + (*list).next = (*skb).next; + if (*list).next.is_null() { + (*list).prev = ptr::null_mut(); + } else { + (*(*list).next).prev = ptr::null_mut(); + } + (*skb).next = ptr::null_mut(); + (*skb).prev = ptr::null_mut(); + (*list).qlen = (*list).qlen.saturating_sub(1); + skb + } +} + +#[no_mangle] +pub extern "C" fn skb_queue_purge(list: *mut SkBuffHead) { + if list.is_null() { + return; + } + + loop { + let skb = skb_dequeue(list); + if skb.is_null() { + break; + } + kfree_skb(skb); + } +} + +#[no_mangle] +pub extern "C" fn skb_peek(list: *const SkBuffHead) -> *mut SkBuff { + if list.is_null() || unsafe { (*list).qlen } == 0 { + ptr::null_mut() + } else { + unsafe { (*list).next } + } +} + +#[no_mangle] +pub extern "C" fn skb_queue_len(list: *const SkBuffHead) -> u32 { + if list.is_null() { + 0 + } else { + unsafe { (*list).qlen } + } +} + +#[no_mangle] +pub extern "C" fn skb_queue_empty(list: *const SkBuffHead) -> i32 { + if skb_queue_len(list) == 0 { + 1 + } else { + 0 + } +} + +#[no_mangle] +pub extern "C" fn __netdev_alloc_skb( + _dev: *mut NetDevice, + length: u32, + gfp_mask: u32, +) -> *mut SkBuff { + alloc_skb(length, gfp_mask) +} + +#[no_mangle] +pub extern "C" fn skb_copy(src: *const SkBuff, gfp: u32) -> *mut SkBuff { + if src.is_null() { + return ptr::null_mut(); + } + + let src_ref = unsafe { &*src }; + let dst = alloc_skb(src_ref.end, gfp); + if dst.is_null() { + return ptr::null_mut(); + } + + let headroom = skb_headroom(src); + skb_reserve(dst, headroom); + let dst_data = skb_put(dst, src_ref.len); + if dst_data.is_null() { + kfree_skb(dst); + return ptr::null_mut(); + } + + if src_ref.len != 0 { + unsafe { ptr::copy_nonoverlapping(src_ref.data, dst_data, src_ref.len as usize) }; + } + unsafe { + (*dst).network_header = src_ref.network_header; + (*dst).mac_header = src_ref.mac_header; + } + dst +} + +#[no_mangle] +pub extern "C" fn skb_clone(skb: *const SkBuff, _gfp: u32) -> *mut SkBuff { + if skb.is_null() { + return ptr::null_mut(); + } + + let skb_ref = unsafe { &*skb }; + if skb_ref.shared.is_null() { + return ptr::null_mut(); + } + + unsafe { &*skb_ref.shared } + .refcount + .fetch_add(1, Ordering::AcqRel); + Box::into_raw(Box::new(SkBuff { + head: skb_ref.head, + data: skb_ref.data, + len: skb_ref.len, + tail: skb_ref.tail, + end: skb_ref.end, + next: ptr::null_mut(), + prev: ptr::null_mut(), + network_header: skb_ref.network_header, + mac_header: skb_ref.mac_header, + shared: skb_ref.shared, + })) +} + +#[no_mangle] +pub extern "C" fn skb_set_network_header(skb: *mut SkBuff, offset: i32) { + if skb.is_null() { + return; + } + unsafe { (*skb).network_header = offset }; +} + +#[no_mangle] +pub extern "C" fn skb_reset_mac_header(skb: *mut SkBuff) { + if skb.is_null() { + return; + } + unsafe { + (*skb).mac_header = skb_headroom_inner(&*skb) as i32; + } +} + #[no_mangle] pub extern "C" fn alloc_netdev_mqs( sizeof_priv: usize, @@ -200,6 +442,8 @@ pub extern "C" fn alloc_netdev_mqs( ieee80211_ptr: ptr::null_mut(), priv_data: ptr::null_mut(), registered: AtomicI32::new(0), + tx_queue_state: AtomicU32::new(0), + device_attached: AtomicI32::new(1), priv_alloc_size: 0, priv_alloc_align: 0, }); @@ -304,6 +548,102 @@ pub extern "C" fn netif_carrier_ok(dev: *const NetDevice) -> i32 { } } +#[no_mangle] +pub extern "C" fn netif_napi_add( + dev: *mut NetDevice, + napi: *mut NapiStruct, + poll: Option i32>, + weight: i32, +) { + if napi.is_null() { + return; + } + + unsafe { + (*napi).dev = dev; + (*napi).poll = poll; + (*napi).weight = weight; + (*napi).state.store(NAPI_STATE_IDLE, Ordering::Release); + } +} + +#[no_mangle] +pub extern "C" fn napi_schedule(napi: *mut NapiStruct) { + if napi.is_null() { + return; + } + + let napi_ref = unsafe { &*napi }; + if napi_ref + .state + .compare_exchange( + NAPI_STATE_IDLE, + NAPI_STATE_SCHEDULED, + Ordering::AcqRel, + Ordering::Acquire, + ) + .is_ok() + { + if let Some(poll) = napi_ref.poll { + let _ = poll(napi, napi_ref.weight); + } + } +} + +#[no_mangle] +pub extern "C" fn napi_complete_done(napi: *mut NapiStruct, work_done: i32) -> i32 { + if napi.is_null() || work_done < 0 { + return 0; + } + + unsafe { &*napi } + .state + .store(NAPI_STATE_IDLE, Ordering::Release); + if work_done < unsafe { (*napi).weight } { + 1 + } else { + 0 + } +} + +#[no_mangle] +pub extern "C" fn netif_tx_wake_queue(dev: *mut NetDevice, queue_idx: u16) { + if dev.is_null() || queue_idx >= 32 { + return; + } + let mask = !(1u32 << queue_idx); + let _ = unsafe { &*dev } + .tx_queue_state + .fetch_and(mask, Ordering::AcqRel); +} + +#[no_mangle] +pub extern "C" fn netif_tx_stop_queue(dev: *mut NetDevice, queue_idx: u16) { + if dev.is_null() || queue_idx >= 32 { + return; + } + let mask = 1u32 << queue_idx; + let _ = unsafe { &*dev } + .tx_queue_state + .fetch_or(mask, Ordering::AcqRel); +} + +#[no_mangle] +pub extern "C" fn netif_device_attach(dev: *mut NetDevice) { + if dev.is_null() { + return; + } + unsafe { &*dev }.device_attached.store(1, Ordering::Release); +} + +#[no_mangle] +pub extern "C" fn netif_device_detach(dev: *mut NetDevice) { + if dev.is_null() { + return; + } + unsafe { &*dev }.device_attached.store(0, Ordering::Release); +} + #[cfg(test)] mod tests { use super::*; @@ -311,11 +651,17 @@ mod tests { use std::sync::atomic::AtomicUsize; static SETUP_CALLS: AtomicUsize = AtomicUsize::new(0); + static NAPI_POLLS: AtomicUsize = AtomicUsize::new(0); extern "C" fn test_setup(_dev: *mut NetDevice) { SETUP_CALLS.fetch_add(1, Ordering::AcqRel); } + extern "C" fn test_napi_poll(_napi: *mut NapiStruct, budget: i32) -> i32 { + NAPI_POLLS.fetch_add(1, Ordering::AcqRel); + budget - 1 + } + #[test] fn skb_allocation_and_growth_work() { let skb = alloc_skb(64, 0); @@ -343,9 +689,49 @@ mod tests { kfree_skb(skb); } + #[test] + fn skb_queue_copy_and_clone_work() { + let skb = alloc_skb(32, 0); + assert!(!skb.is_null()); + skb_reserve(skb, 4); + let data = skb_put(skb, 6); + assert!(!data.is_null()); + unsafe { ptr::copy_nonoverlapping([1u8, 2, 3, 4, 5, 6].as_ptr(), data, 6) }; + skb_set_network_header(skb, 2); + skb_reset_mac_header(skb); + + let copy = skb_copy(skb, 0); + assert!(!copy.is_null()); + assert_eq!(unsafe { (*copy).len }, 6); + assert_eq!(unsafe { (*copy).network_header }, 2); + + let clone = skb_clone(skb, 0); + assert!(!clone.is_null()); + assert_eq!(unsafe { (*clone).data }, unsafe { (*skb).data }); + + let mut queue = SkBuffHead { + next: ptr::null_mut(), + prev: ptr::null_mut(), + qlen: 123, + lock: 1, + }; + skb_queue_head_init(&mut queue); + skb_queue_tail(&mut queue, skb); + skb_queue_tail(&mut queue, copy); + assert_eq!(skb_queue_len(&queue), 2); + assert_eq!(skb_queue_empty(&queue), 0); + assert_eq!(skb_peek(&queue), skb); + assert_eq!(skb_dequeue(&mut queue), skb); + assert_eq!(skb_queue_len(&queue), 1); + kfree_skb(skb); + skb_queue_purge(&mut queue); + assert_eq!(skb_queue_empty(&queue), 1); + kfree_skb(clone); + } + #[test] fn net_device_carrier_tracking_works() { - let name = CString::new("wlan%d").unwrap(); + let name = CString::new("wlan%d").expect("valid test CString"); let dev = alloc_netdev_mqs( 0usize, name.as_ptr().cast::(), @@ -364,9 +750,9 @@ mod tests { } #[test] - fn net_device_setup_and_registration_work() { + fn net_device_setup_registration_and_queue_state_work() { SETUP_CALLS.store(0, Ordering::Release); - let name = CString::new("wlan%d").unwrap(); + let name = CString::new("wlan%d").expect("valid test CString"); let dev = alloc_netdev_mqs( 32usize, name.as_ptr().cast::(), @@ -380,8 +766,44 @@ mod tests { assert_eq!(register_netdev(dev), 0); assert_eq!(unsafe { (*dev).registered.load(Ordering::Acquire) }, 1); assert_eq!(register_netdev(dev), -16); + + netif_tx_stop_queue(dev, 2); + assert_ne!( + unsafe { (*dev).tx_queue_state.load(Ordering::Acquire) } & (1 << 2), + 0 + ); + netif_tx_wake_queue(dev, 2); + assert_eq!( + unsafe { (*dev).tx_queue_state.load(Ordering::Acquire) } & (1 << 2), + 0 + ); + + netif_device_detach(dev); + assert_eq!(unsafe { (*dev).device_attached.load(Ordering::Acquire) }, 0); + netif_device_attach(dev); + assert_eq!(unsafe { (*dev).device_attached.load(Ordering::Acquire) }, 1); + unregister_netdev(dev); assert_eq!(unsafe { (*dev).registered.load(Ordering::Acquire) }, 0); free_netdev(dev); } + + #[test] + fn napi_schedule_and_complete_work() { + let mut napi = NapiStruct { + poll: None, + dev: ptr::null_mut(), + state: AtomicI32::new(99), + weight: 0, + }; + + NAPI_POLLS.store(0, Ordering::Release); + netif_napi_add(ptr::null_mut(), &mut napi, Some(test_napi_poll), 8); + assert_eq!(napi.weight, 8); + napi_schedule(&mut napi); + assert_eq!(NAPI_POLLS.load(Ordering::Acquire), 1); + assert_eq!(napi.state.load(Ordering::Acquire), NAPI_STATE_SCHEDULED); + assert_eq!(napi_complete_done(&mut napi, 4), 1); + assert_eq!(napi.state.load(Ordering::Acquire), NAPI_STATE_IDLE); + } } diff --git a/local/recipes/drivers/linux-kpi/source/src/rust_impl/pci.rs b/local/recipes/drivers/linux-kpi/source/src/rust_impl/pci.rs index 8b42ecfa..2194fb6c 100644 --- a/local/recipes/drivers/linux-kpi/source/src/rust_impl/pci.rs +++ b/local/recipes/drivers/linux-kpi/source/src/rust_impl/pci.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::os::raw::c_ulong; use std::ptr; use std::sync::Mutex; @@ -7,8 +8,14 @@ use redox_driver_sys::pci::{enumerate_pci_all, PciDevice, PciDeviceInfo, PciLoca const EINVAL: i32 = 22; const ENODEV: i32 = 19; const EIO: i32 = 5; +const EBUSY: i32 = 16; const PCI_ANY_ID: u32 = !0; +pub const PCI_IRQ_MSI: u32 = 1; +pub const PCI_IRQ_MSIX: u32 = 2; +pub const PCI_IRQ_LEGACY: u32 = 4; +pub const PCI_IRQ_NOLEGACY: u32 = 8; + #[repr(C)] #[derive(Default)] pub struct Device { @@ -46,6 +53,14 @@ pub struct PciDeviceId { driver_data: c_ulong, } +#[repr(C)] +#[derive(Clone, Copy, Default)] +pub struct MsixEntry { + pub vector: u32, + pub entry: u16, + pub _pad: u16, +} + impl Default for PciDev { fn default() -> Self { PciDev { @@ -71,9 +86,16 @@ struct CurrentDevice { ptr: usize, } +#[derive(Clone)] +struct AllocatedVectors { + _flags: u32, + vectors: Vec, +} + lazy_static::lazy_static! { static ref CURRENT_DEVICE: Mutex> = Mutex::new(None); static ref REGISTERED_PROBE: Mutex> = Mutex::new(None); + static ref IRQ_VECTORS: Mutex> = Mutex::new(HashMap::new()); } pub const PCI_VENDOR_ID_AMD: u16 = 0x1002; @@ -106,6 +128,12 @@ fn open_current_device(dev: *mut PciDev) -> Result { }) } +fn clear_irq_vectors_for_ptr(dev_ptr: usize) { + if let Ok(mut vectors) = IRQ_VECTORS.lock() { + vectors.remove(&dev_ptr); + } +} + fn matches_id(info: &PciDeviceInfo, id: &PciDeviceId) -> bool { let class = ((info.class_code as u32) << 16) | ((info.subclass as u32) << 8) | info.prog_if as u32; @@ -180,6 +208,7 @@ fn replace_current_device(location: PciLocation, dev_ptr: *mut PciDev) { location, ptr: dev_ptr as usize, }) { + clear_irq_vectors_for_ptr(previous.ptr); unsafe { drop(Box::from_raw(previous.ptr as *mut PciDev)) }; } } @@ -188,11 +217,53 @@ fn replace_current_device(location: PciLocation, dev_ptr: *mut PciDev) { fn clear_current_device() { if let Ok(mut state) = CURRENT_DEVICE.lock() { if let Some(previous) = state.take() { + clear_irq_vectors_for_ptr(previous.ptr); unsafe { drop(Box::from_raw(previous.ptr as *mut PciDev)) }; } } } +fn allocate_vectors(dev: *mut PciDev, min_vecs: i32, max_vecs: i32, flags: u32) -> i32 { + if dev.is_null() || min_vecs <= 0 || max_vecs <= 0 || min_vecs > max_vecs { + return -EINVAL; + } + if flags & (PCI_IRQ_MSI | PCI_IRQ_MSIX | PCI_IRQ_LEGACY) == 0 { + return -EINVAL; + } + + let base_irq = unsafe { (*dev).irq as i32 }; + if base_irq <= 0 { + return -ENODEV; + } + + let dev_key = dev as usize; + let Ok(mut vectors) = IRQ_VECTORS.lock() else { + return -EINVAL; + }; + if vectors.contains_key(&dev_key) { + return -EBUSY; + } + + let count = if flags & PCI_IRQ_MSIX != 0 { + max_vecs + } else { + 1 + }; + if count < min_vecs { + return -EINVAL; + } + + let allocated = (0..count).map(|index| base_irq + index).collect::>(); + vectors.insert( + dev_key, + AllocatedVectors { + _flags: flags, + vectors: allocated, + }, + ); + count +} + #[no_mangle] pub extern "C" fn pci_enable_device(dev: *mut PciDev) -> i32 { if dev.is_null() { @@ -337,6 +408,82 @@ pub struct PciDriver { remove: Option, } +#[no_mangle] +pub extern "C" fn pci_alloc_irq_vectors( + dev: *mut PciDev, + min_vecs: i32, + max_vecs: i32, + flags: u32, +) -> i32 { + allocate_vectors(dev, min_vecs, max_vecs, flags) +} + +#[no_mangle] +pub extern "C" fn pci_free_irq_vectors(dev: *mut PciDev) { + if dev.is_null() { + return; + } + clear_irq_vectors_for_ptr(dev as usize); +} + +#[no_mangle] +pub extern "C" fn pci_irq_vector(dev: *mut PciDev, vector_idx: i32) -> i32 { + if dev.is_null() || vector_idx < 0 { + return -EINVAL; + } + + let Ok(vectors) = IRQ_VECTORS.lock() else { + return -EINVAL; + }; + let Some(allocated) = vectors.get(&(dev as usize)) else { + return -EINVAL; + }; + allocated + .vectors + .get(vector_idx as usize) + .copied() + .unwrap_or(-EINVAL) +} + +#[no_mangle] +pub extern "C" fn pci_enable_msi(dev: *mut PciDev) -> i32 { + pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI) +} + +#[no_mangle] +pub extern "C" fn pci_disable_msi(dev: *mut PciDev) { + pci_free_irq_vectors(dev); +} + +#[no_mangle] +pub extern "C" fn pci_enable_msix_range( + dev: *mut PciDev, + entries: *mut MsixEntry, + minvec: i32, + maxvec: i32, +) -> i32 { + if entries.is_null() { + return -EINVAL; + } + + let count = pci_alloc_irq_vectors(dev, minvec, maxvec, PCI_IRQ_MSIX); + if count < 0 { + return count; + } + + for index in 0..count { + unsafe { + (*entries.add(index as usize)).vector = pci_irq_vector(dev, index) as u32; + } + } + count +} + +#[no_mangle] +pub extern "C" fn pci_disable_msix(dev: *mut PciDev) { + pci_free_irq_vectors(dev); +} + #[no_mangle] pub extern "C" fn pci_register_driver(drv: *mut PciDriver) -> i32 { if drv.is_null() { @@ -439,3 +586,49 @@ pub extern "C" fn pci_unregister_driver(drv: *mut PciDriver) { } log::info!("pci_unregister_driver: cleared registered PCI driver state"); } + +#[cfg(test)] +mod tests { + use super::*; + + fn test_dev(irq: u32) -> PciDev { + PciDev { + irq, + ..PciDev::default() + } + } + + #[test] + fn pci_irq_vector_lifecycle_works() { + let mut dev = test_dev(32); + assert_eq!(pci_alloc_irq_vectors(&mut dev, 1, 1, PCI_IRQ_MSI), 1); + assert_eq!(pci_irq_vector(&mut dev, 0), 32); + assert_eq!(pci_alloc_irq_vectors(&mut dev, 1, 1, PCI_IRQ_MSI), -16); + pci_free_irq_vectors(&mut dev); + assert_eq!(pci_irq_vector(&mut dev, 0), -22); + } + + #[test] + fn pci_msix_range_populates_entries() { + let mut dev = test_dev(40); + let mut entries = [MsixEntry::default(); 3]; + assert_eq!( + pci_enable_msix_range(&mut dev, entries.as_mut_ptr(), 2, 3), + 3 + ); + assert_eq!(entries[0].vector, 40); + assert_eq!(entries[1].vector, 41); + assert_eq!(entries[2].vector, 42); + pci_disable_msix(&mut dev); + } + + #[test] + fn pci_rejects_invalid_irq_vector_requests() { + let mut dev = test_dev(0); + assert_eq!(pci_enable_msi(&mut dev), -19); + assert_eq!( + pci_alloc_irq_vectors(ptr::null_mut(), 1, 1, PCI_IRQ_MSI), + -22 + ); + } +} diff --git a/local/recipes/drivers/linux-kpi/source/src/rust_impl/sync.rs b/local/recipes/drivers/linux-kpi/source/src/rust_impl/sync.rs index 9a571cde..ed7f6b5f 100644 --- a/local/recipes/drivers/linux-kpi/source/src/rust_impl/sync.rs +++ b/local/recipes/drivers/linux-kpi/source/src/rust_impl/sync.rs @@ -1,4 +1,6 @@ -use std::sync::atomic::{AtomicU8, Ordering}; +use std::ptr; +use std::sync::atomic::{AtomicI32, AtomicU8, Ordering}; +use std::time::{Duration, Instant}; const UNLOCKED: u8 = 0; const LOCKED: u8 = 1; @@ -154,14 +156,17 @@ pub extern "C" fn irqs_disabled() -> bool { IRQ_DEPTH.load(Ordering::Acquire) > 0 } -use std::ptr; - #[repr(C)] pub struct Completion { done: AtomicU8, _padding: [u8; 63], } +#[repr(C)] +pub struct AtomicT { + value: AtomicI32, +} + #[no_mangle] pub extern "C" fn init_completion(c: *mut Completion) { if c.is_null() { @@ -186,6 +191,14 @@ pub extern "C" fn complete(c: *mut Completion) { unsafe { &*c }.done.store(1, Ordering::Release); } +#[no_mangle] +pub extern "C" fn complete_all(c: *mut Completion) { + if c.is_null() { + return; + } + unsafe { &*c }.done.store(u8::MAX, Ordering::Release); +} + #[no_mangle] pub extern "C" fn wait_for_completion(c: *mut Completion) { if c.is_null() { @@ -196,6 +209,31 @@ pub extern "C" fn wait_for_completion(c: *mut Completion) { } } +#[no_mangle] +pub extern "C" fn wait_for_completion_timeout(c: *mut Completion, timeout_ms: u64) -> i32 { + if c.is_null() { + return 0; + } + + if unsafe { &*c }.done.load(Ordering::Acquire) != 0 { + return 1; + } + + let deadline = Instant::now() + .checked_add(Duration::from_millis(timeout_ms)) + .unwrap_or_else(Instant::now); + + loop { + if unsafe { &*c }.done.load(Ordering::Acquire) != 0 { + return 1; + } + if Instant::now() >= deadline { + return 0; + } + std::thread::yield_now(); + } +} + #[no_mangle] pub extern "C" fn reinit_completion(c: *mut Completion) { if c.is_null() { @@ -204,6 +242,109 @@ pub extern "C" fn reinit_completion(c: *mut Completion) { unsafe { &*c }.done.store(0, Ordering::Release); } +#[no_mangle] +pub extern "C" fn atomic_set(v: *mut AtomicT, i: i32) { + if v.is_null() { + return; + } + unsafe { &*v }.value.store(i, Ordering::SeqCst); +} + +#[no_mangle] +pub extern "C" fn atomic_read(v: *const AtomicT) -> i32 { + if v.is_null() { + return 0; + } + unsafe { &*v }.value.load(Ordering::SeqCst) +} + +#[no_mangle] +pub extern "C" fn atomic_add(i: i32, v: *mut AtomicT) { + if v.is_null() { + return; + } + unsafe { &*v }.value.fetch_add(i, Ordering::SeqCst); +} + +#[no_mangle] +pub extern "C" fn atomic_sub(i: i32, v: *mut AtomicT) { + if v.is_null() { + return; + } + unsafe { &*v }.value.fetch_sub(i, Ordering::SeqCst); +} + +#[no_mangle] +pub extern "C" fn atomic_inc(v: *mut AtomicT) { + atomic_add(1, v); +} + +#[no_mangle] +pub extern "C" fn atomic_dec(v: *mut AtomicT) { + atomic_sub(1, v); +} + +#[no_mangle] +pub extern "C" fn atomic_inc_and_test(v: *mut AtomicT) -> i32 { + if v.is_null() { + return 0; + } + if unsafe { &*v }.value.fetch_add(1, Ordering::SeqCst) + 1 == 0 { + 1 + } else { + 0 + } +} + +#[no_mangle] +pub extern "C" fn atomic_dec_and_test(v: *mut AtomicT) -> i32 { + if v.is_null() { + return 0; + } + if unsafe { &*v }.value.fetch_sub(1, Ordering::SeqCst) - 1 == 0 { + 1 + } else { + 0 + } +} + +#[no_mangle] +pub extern "C" fn atomic_add_return(i: i32, v: *mut AtomicT) -> i32 { + if v.is_null() { + return 0; + } + unsafe { &*v }.value.fetch_add(i, Ordering::SeqCst) + i +} + +#[no_mangle] +pub extern "C" fn atomic_sub_return(i: i32, v: *mut AtomicT) -> i32 { + if v.is_null() { + return 0; + } + unsafe { &*v }.value.fetch_sub(i, Ordering::SeqCst) - i +} + +#[no_mangle] +pub extern "C" fn atomic_xchg(v: *mut AtomicT, new: i32) -> i32 { + if v.is_null() { + return 0; + } + unsafe { &*v }.value.swap(new, Ordering::SeqCst) +} + +#[no_mangle] +pub extern "C" fn atomic_cmpxchg(v: *mut AtomicT, old: i32, new: i32) -> i32 { + if v.is_null() { + return 0; + } + match unsafe { &*v } + .value + .compare_exchange(old, new, Ordering::SeqCst, Ordering::SeqCst) + { + Ok(previous) | Err(previous) => previous, + } +} + #[cfg(test)] mod tests { use super::*; @@ -228,4 +369,48 @@ mod tests { local_irq_enable(); assert!(!irqs_disabled()); } + + #[test] + fn atomic_operations_cover_all_paths() { + let mut value = AtomicT { + value: AtomicI32::new(0), + }; + + atomic_set(&mut value, 3); + assert_eq!(atomic_read(&value), 3); + atomic_add(4, &mut value); + assert_eq!(atomic_read(&value), 7); + atomic_sub(2, &mut value); + assert_eq!(atomic_read(&value), 5); + atomic_inc(&mut value); + atomic_dec(&mut value); + assert_eq!(atomic_add_return(5, &mut value), 10); + assert_eq!(atomic_sub_return(3, &mut value), 7); + assert_eq!(atomic_xchg(&mut value, 11), 7); + assert_eq!(atomic_cmpxchg(&mut value, 10, 12), 11); + assert_eq!(atomic_cmpxchg(&mut value, 11, 13), 11); + assert_eq!(atomic_read(&value), 13); + + atomic_set(&mut value, -1); + assert_eq!(atomic_inc_and_test(&mut value), 1); + atomic_set(&mut value, 1); + assert_eq!(atomic_dec_and_test(&mut value), 1); + } + + #[test] + fn completion_timeout_and_complete_all_work() { + let mut completion = Completion { + done: AtomicU8::new(0), + _padding: [0; 63], + }; + + assert_eq!(wait_for_completion_timeout(&mut completion, 1), 0); + complete_all(&mut completion); + assert_eq!(wait_for_completion_timeout(&mut completion, 1), 1); + reinit_completion(&mut completion); + assert_eq!(wait_for_completion_timeout(&mut completion, 1), 0); + complete(&mut completion); + wait_for_completion(&mut completion); + assert_eq!(wait_for_completion_timeout(&mut completion, 1), 1); + } } diff --git a/local/recipes/drivers/linux-kpi/source/src/rust_impl/wireless.rs b/local/recipes/drivers/linux-kpi/source/src/rust_impl/wireless.rs index 7099a8f3..cd96aa4e 100644 --- a/local/recipes/drivers/linux-kpi/source/src/rust_impl/wireless.rs +++ b/local/recipes/drivers/linux-kpi/source/src/rust_impl/wireless.rs @@ -1,10 +1,28 @@ use std::alloc::{alloc_zeroed, dealloc, Layout}; +use std::collections::HashMap; use std::ffi::c_void; use std::ptr; use std::sync::atomic::{AtomicI32, Ordering}; +use std::sync::Mutex; use super::net::{netif_carrier_off, netif_carrier_on, NetDevice}; +#[derive(Clone, Default)] +struct WirelessEventState { + new_sta: Option<[u8; 6]>, + mgmt_rx_freq: u32, + mgmt_rx_signal: i32, + mgmt_rx_len: usize, + mgmt_tx_cookie: u64, + mgmt_tx_len: usize, + mgmt_tx_ack: bool, + sched_scan_reqid: u64, +} + +lazy_static::lazy_static! { + static ref WIRELESS_EVENTS: Mutex> = Mutex::new(HashMap::new()); +} + #[repr(C)] pub struct Wiphy { pub priv_data: *mut c_void, @@ -76,6 +94,15 @@ pub struct StationParameters { pub sta_flags_set: u32, } +fn update_event_state(key: usize, update: F) +where + F: FnOnce(&mut WirelessEventState), +{ + if let Ok(mut events) = WIRELESS_EVENTS.lock() { + update(events.entry(key).or_default()); + } +} + #[no_mangle] pub extern "C" fn wiphy_new_nm( _ops: *const c_void, @@ -114,6 +141,9 @@ pub extern "C" fn wiphy_free(wiphy: *mut Wiphy) { if wiphy.is_null() { return; } + if let Ok(mut events) = WIRELESS_EVENTS.lock() { + events.remove(&(wiphy as usize)); + } unsafe { let wiphy_box = Box::from_raw(wiphy); if !wiphy_box.priv_data.is_null() { @@ -283,6 +313,254 @@ pub extern "C" fn cfg80211_ready_on_channel( ) { } +#[repr(C)] +pub struct Ieee80211Channel { + pub band: u32, + pub center_freq: u16, + pub hw_value: u16, + pub flags: u32, + pub max_power: i8, + pub max_reg_power: i8, + pub max_antenna_gain: i8, + pub beacon_found: bool, +} + +pub const NL80211_BAND_2GHZ: u32 = 0; +pub const NL80211_BAND_5GHZ: u32 = 1; +pub const NL80211_BAND_6GHZ: u32 = 2; + +pub const IEEE80211_CHAN_DISABLED: u32 = 1 << 0; +pub const IEEE80211_CHAN_NO_IR: u32 = 1 << 1; +pub const IEEE80211_CHAN_RADAR: u32 = 1 << 2; +pub const IEEE80211_CHAN_NO_HT40PLUS: u32 = 1 << 3; +pub const IEEE80211_CHAN_NO_HT40MINUS: u32 = 1 << 4; +pub const IEEE80211_CHAN_NO_OFDM: u32 = 1 << 5; +pub const IEEE80211_CHAN_NO_80MHZ: u32 = 1 << 6; +pub const IEEE80211_CHAN_NO_160MHZ: u32 = 1 << 7; + +#[repr(C)] +pub struct Ieee80211Rate { + pub flags: u32, + pub bitrate: u16, + pub hw_value: u16, + pub hw_value_short: u16, +} + +pub const IEEE80211_RATE_SHORT_PREAMBLE: u32 = 1 << 0; +pub const IEEE80211_RATE_MANDATORY: u32 = 1 << 1; +pub const IEEE80211_RATE_ERP_G: u32 = 1 << 2; + +#[repr(C)] +pub struct Ieee80211SupportedBand { + pub channels: *mut Ieee80211Channel, + pub n_channels: usize, + pub bitrates: *mut Ieee80211Rate, + pub n_bitrates: usize, + pub ht_cap: *mut c_void, + pub vht_cap: *mut c_void, +} + +#[no_mangle] +pub extern "C" fn wiphy_bands_append( + wiphy: *mut Wiphy, + band_idx: u32, + band: *mut Ieee80211SupportedBand, +) -> i32 { + if wiphy.is_null() || band.is_null() { + return -22; + } + + if band_idx > NL80211_BAND_6GHZ { + return -22; + } + + let band_ref = unsafe { &*band }; + if band_ref.n_channels == 0 || band_ref.channels.is_null() { + return -22; + } + + 0 +} + +#[repr(C)] +pub struct Cfg80211Bss { + pub bssid: [u8; 6], + pub channel: *mut Ieee80211Channel, + pub signal: i16, + pub capability: u16, + pub beacon_interval: u16, + pub ies: *const u8, + pub ies_len: usize, +} + +#[no_mangle] +pub extern "C" fn cfg80211_inform_bss( + wiphy: *mut Wiphy, + wdev: *mut WirelessDev, + _freq: u32, + bssid: *const u8, + _tsf: u64, + capability: u16, + beacon_interval: u16, + ies: *const u8, + ies_len: usize, + signal: i32, + _gfp: u32, +) -> *mut Cfg80211Bss { + if wiphy.is_null() || wdev.is_null() || bssid.is_null() { + return ptr::null_mut(); + } + + let mut bssid_bytes = [0; 6]; + unsafe { + ptr::copy_nonoverlapping(bssid, bssid_bytes.as_mut_ptr(), bssid_bytes.len()); + } + + let bss = Box::new(Cfg80211Bss { + bssid: bssid_bytes, + channel: ptr::null_mut(), + signal: signal.clamp(i16::MIN as i32, i16::MAX as i32) as i16, + capability, + beacon_interval, + ies, + ies_len, + }); + Box::into_raw(bss) +} + +#[no_mangle] +pub extern "C" fn cfg80211_put_bss(bss: *mut Cfg80211Bss) { + if bss.is_null() { + return; + } + + unsafe { + drop(Box::from_raw(bss)); + } +} + +#[no_mangle] +pub extern "C" fn cfg80211_get_bss( + wiphy: *mut Wiphy, + band: u32, + _bssid: *const u8, + _ssid: *const u8, + _ssid_len: usize, + _bss_type: u32, + _privacy: u32, +) -> *mut Cfg80211Bss { + if wiphy.is_null() || band > NL80211_BAND_6GHZ { + return ptr::null_mut(); + } + + ptr::null_mut() +} + +#[no_mangle] +pub extern "C" fn cfg80211_new_sta( + dev: *mut c_void, + mac_addr: *const u8, + _params: *const StationParameters, + _gfp: u32, +) { + if dev.is_null() || mac_addr.is_null() { + return; + } + + let wdev = netdev_to_wireless_dev(dev); + if wdev.is_null() || unsafe { (*wdev).wiphy }.is_null() { + return; + } + + let mut addr = [0u8; 6]; + unsafe { ptr::copy_nonoverlapping(mac_addr, addr.as_mut_ptr(), addr.len()) }; + update_event_state(unsafe { (*wdev).wiphy as usize }, |state| { + state.new_sta = Some(addr) + }); +} + +#[no_mangle] +pub extern "C" fn cfg80211_rx_mgmt( + wdev: *mut WirelessDev, + freq: u32, + sig_dbm: i32, + buf: *const u8, + len: usize, + _gfp: u32, +) { + if wdev.is_null() || (buf.is_null() && len != 0) { + return; + } + + update_event_state(wdev as usize, |state| { + state.mgmt_rx_freq = freq; + state.mgmt_rx_signal = sig_dbm; + state.mgmt_rx_len = len; + }); +} + +#[no_mangle] +pub extern "C" fn cfg80211_mgmt_tx_status( + wdev: *mut WirelessDev, + cookie: u64, + buf: *const u8, + len: usize, + ack: bool, + _gfp: u32, +) { + if wdev.is_null() || (buf.is_null() && len != 0) { + return; + } + + update_event_state(wdev as usize, |state| { + state.mgmt_tx_cookie = cookie; + state.mgmt_tx_len = len; + state.mgmt_tx_ack = ack; + }); +} + +#[no_mangle] +pub extern "C" fn cfg80211_sched_scan_results(wiphy: *mut Wiphy, reqid: u64) { + if wiphy.is_null() { + return; + } + update_event_state(wiphy as usize, |state| state.sched_scan_reqid = reqid); +} + +#[no_mangle] +pub extern "C" fn ieee80211_channel_to_frequency(chan: u32, band: u32) -> u32 { + match band { + NL80211_BAND_2GHZ => match chan { + 14 => 2484, + 1..=13 => 2407 + chan * 5, + _ => 0, + }, + NL80211_BAND_5GHZ => 5000 + chan * 5, + NL80211_BAND_6GHZ => { + if chan == 2 { + 5935 + } else if chan >= 1 { + 5950 + chan * 5 + } else { + 0 + } + } + _ => 0, + } +} + +#[no_mangle] +pub extern "C" fn ieee80211_frequency_to_channel(freq: u32) -> u32 { + match freq { + 2484 => 14, + 2412..=2472 => (freq - 2407) / 5, + 5000..=5895 => (freq - 5000) / 5, + 5935 => 2, + 5955..=7115 => (freq - 5950) / 5, + _ => 0, + } +} + #[cfg(test)] mod tests { use super::*; @@ -303,7 +581,7 @@ mod tests { #[test] fn scan_and_connect_lifecycle_updates_wireless_state() { - let name = CString::new("wlan%d").unwrap(); + let name = CString::new("wlan%d").expect("valid test CString"); let dev = alloc_netdev_mqs(0, name.as_ptr().cast::(), 0, None, 1, 1); assert!(!dev.is_null()); @@ -366,4 +644,78 @@ mod tests { wiphy_free(wiphy); free_netdev(dev); } + + #[test] + fn ieee80211_channel_creation_and_flags_work() { + let channel = Ieee80211Channel { + band: NL80211_BAND_5GHZ, + center_freq: 5180, + hw_value: 36, + flags: IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_80MHZ, + max_power: 20, + max_reg_power: 23, + max_antenna_gain: 6, + beacon_found: true, + }; + + assert_eq!(channel.band, NL80211_BAND_5GHZ); + assert_eq!(channel.center_freq, 5180); + assert_eq!(channel.hw_value, 36); + assert_ne!(channel.flags & IEEE80211_CHAN_NO_IR, 0); + assert_ne!(channel.flags & IEEE80211_CHAN_RADAR, 0); + assert_ne!(channel.flags & IEEE80211_CHAN_NO_80MHZ, 0); + assert_eq!(channel.flags & IEEE80211_CHAN_DISABLED, 0); + assert!(channel.beacon_found); + } + + #[test] + fn cfg80211_events_and_channel_frequency_conversions_work() { + let name = CString::new("wlan%d").expect("valid test CString"); + let dev = alloc_netdev_mqs(0, name.as_ptr().cast::(), 0, None, 1, 1); + assert!(!dev.is_null()); + let wiphy = wiphy_new_nm(ptr::null(), 0, ptr::null()); + assert!(!wiphy.is_null()); + let mut wdev = WirelessDev { + wiphy, + netdev: dev.cast::(), + iftype: 0, + scan_in_flight: false, + scan_aborted: false, + connecting: false, + connected: false, + locally_generated: false, + last_status: 0, + last_reason: 0, + has_bssid: false, + last_bssid: [0; 6], + }; + unsafe { (*dev).ieee80211_ptr = (&mut wdev as *mut WirelessDev).cast::() }; + + let sta = [6u8, 5, 4, 3, 2, 1]; + cfg80211_new_sta(dev.cast::(), sta.as_ptr(), ptr::null(), 0); + cfg80211_rx_mgmt(&mut wdev, 2412, -42, sta.as_ptr(), sta.len(), 0); + cfg80211_mgmt_tx_status(&mut wdev, 99, sta.as_ptr(), sta.len(), true, 0); + cfg80211_sched_scan_results(wiphy, 1234); + + let events = WIRELESS_EVENTS.lock().expect("wireless events lock"); + let wiphy_state = events.get(&(wiphy as usize)).expect("wiphy event state"); + assert_eq!(wiphy_state.new_sta, Some(sta)); + assert_eq!(wiphy_state.sched_scan_reqid, 1234); + let wdev_state = events + .get(&((&mut wdev as *mut WirelessDev) as usize)) + .expect("wdev event state"); + assert_eq!(wdev_state.mgmt_rx_freq, 2412); + assert_eq!(wdev_state.mgmt_rx_signal, -42); + assert_eq!(wdev_state.mgmt_tx_cookie, 99); + assert!(wdev_state.mgmt_tx_ack); + drop(events); + + assert_eq!(ieee80211_channel_to_frequency(1, NL80211_BAND_2GHZ), 2412); + assert_eq!(ieee80211_channel_to_frequency(36, NL80211_BAND_5GHZ), 5180); + assert_eq!(ieee80211_frequency_to_channel(2484), 14); + assert_eq!(ieee80211_frequency_to_channel(5955), 1); + + wiphy_free(wiphy); + free_netdev(dev); + } } diff --git a/local/recipes/drivers/redbear-iwlwifi/source/src/linux_port.c b/local/recipes/drivers/redbear-iwlwifi/source/src/linux_port.c index 2c483a26..b281c823 100644 --- a/local/recipes/drivers/redbear-iwlwifi/source/src/linux_port.c +++ b/local/recipes/drivers/redbear-iwlwifi/source/src/linux_port.c @@ -1,387 +1,2088 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "../../../linux-kpi/source/src/c_headers/linux/atomic.h" +#include "../../../linux-kpi/source/src/c_headers/linux/dma-mapping.h" +#include "../../../linux-kpi/source/src/c_headers/linux/errno.h" +#include "../../../linux-kpi/source/src/c_headers/linux/firmware.h" +#include "../../../linux-kpi/source/src/c_headers/linux/interrupt.h" +#include "../../../linux-kpi/source/src/c_headers/linux/io.h" +#include "../../../linux-kpi/source/src/c_headers/linux/jiffies.h" +#include "../../../linux-kpi/source/src/c_headers/linux/kernel.h" +#include "../../../linux-kpi/source/src/c_headers/linux/list.h" +#include "../../../linux-kpi/source/src/c_headers/linux/mutex.h" +#include "../../../linux-kpi/source/src/c_headers/linux/netdevice.h" +#include "../../../linux-kpi/source/src/c_headers/linux/nl80211.h" +#include "../../../linux-kpi/source/src/c_headers/linux/pci.h" +#include "../../../linux-kpi/source/src/c_headers/linux/printk.h" +#include "../../../linux-kpi/source/src/c_headers/linux/skbuff.h" +#include "../../../linux-kpi/source/src/c_headers/linux/slab.h" +#include "../../../linux-kpi/source/src/c_headers/linux/spinlock.h" +#include "../../../linux-kpi/source/src/c_headers/linux/timer.h" +#include "../../../linux-kpi/source/src/c_headers/linux/wait.h" +#include "../../../linux-kpi/source/src/c_headers/net/cfg80211.h" +#include "../../../linux-kpi/source/src/c_headers/net/mac80211.h" +#include +#include +#include +#include #include #include +#define RB_IWL_MAX_TBS 6 +#define RB_IWL_MAX_TX_QUEUES 16 +#define RB_IWL_CMD_QUEUE 0 +#define RB_IWL_TXQ_SLOTS 256 +#define RB_IWL_CMD_SLOTS 64 +#define RB_IWL_RX_BUFS 128 +#define RB_IWL_RX_BUF_SIZE 4096 +#define RB_IWL_CMD_TIMEOUT 500 +#define RB_IWL_MAX_FW_NAME 128 +#define RB_IWL_MAX_SECURITY 32 +#define RB_IWL_MAX_SCAN_CHANNELS 16 + +#define RB_IWL_SVC_PREPARED (1U << 0) +#define RB_IWL_SVC_PROBED (1U << 1) +#define RB_IWL_SVC_INIT (1U << 2) +#define RB_IWL_SVC_ACTIVE (1U << 3) +#define RB_IWL_SVC_MAC80211 (1U << 4) +#define RB_IWL_SVC_SCAN_ACTIVE (1U << 5) +#define RB_IWL_SVC_CONNECTED (1U << 6) +#define RB_IWL_SVC_DMA_READY (1U << 7) +#define RB_IWL_SVC_IRQ_READY (1U << 8) + +#define RB_IWL_INT_RX (1U << 0) +#define RB_IWL_INT_TX (1U << 1) +#define RB_IWL_INT_CMD (1U << 2) +#define RB_IWL_INT_SCAN (1U << 3) +#define RB_IWL_INT_ERROR (1U << 4) + +#define RB_IWL_DEVICE_FAMILY_7000 7000 +#define RB_IWL_DEVICE_FAMILY_8000 8000 +#define RB_IWL_DEVICE_FAMILY_9000 9000 +#define RB_IWL_DEVICE_FAMILY_AX210 21000 +#define RB_IWL_DEVICE_FAMILY_BZ 30000 + +#define RB_IWL_CMD_SCAN 0x1001U +#define RB_IWL_CMD_ASSOC 0x1002U +#define RB_IWL_CMD_DISCONNECT 0x1003U +#define RB_IWL_CMD_FIRMWARE_BOOT 0x1004U + +#define IWL_CSR_HW_IF_CONFIG_REG 0x000U +#define IWL_CSR_INT 0x008U +#define IWL_CSR_INT_MASK 0x00CU +#define IWL_CSR_RESET 0x020U +#define IWL_CSR_GP_CNTRL 0x024U +#define IWL_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ 0x00000008U +#define IWL_CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ 0x00200000U +#define IWL_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY 0x00000001U +#define IWL_CSR_GP_CNTRL_REG_FLAG_INIT_DONE 0x00000004U +#define IWL_CSR_GP_CNTRL_REG_FLAG_SW_RESET_BZ 0x80000000U +#define IWL_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY 0x00000004U +#define IWL_CSR_RESET_REG_FLAG_SW_RESET 0x00000080U + +#define IWL_FH_RSCSR_CHNL0_RBDCB_BASE_REG 0x0A80U +#define IWL_FH_RSCSR_CHNL0_STTS_WPTR_REG 0x0A20U +#define IWL_FH_MEM_RCSR_CHNL0_CONFIG_REG 0x0A00U +#define IWL_FH_TCSR_CHNL_TX_CONFIG_REG(_q) (0x0D00U + ((_q) * 0x20U)) +#define IWL_HBUS_TARG_WRPTR 0x060U + +struct rb_iwl_fw_blob_info { + u32 magic; + u32 version; + u32 build; + u32 api; + size_t size; +}; + +struct rb_iwl_cmd_hdr { + u32 id; + u32 len; + u32 cookie; + u32 flags; +}; + +struct rb_iwl_scan_cmd { + struct rb_iwl_cmd_hdr hdr; + u32 n_channels; + u32 passive_dwell; + u32 active_dwell; + u32 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u16 channels[RB_IWL_MAX_SCAN_CHANNELS]; +}; + +struct rb_iwl_assoc_cmd { + struct rb_iwl_cmd_hdr hdr; + u32 ssid_len; + u32 security_len; + u32 key_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + char security[RB_IWL_MAX_SECURITY]; + char key[64]; +}; + +struct rb_iwl_disconnect_cmd { + struct rb_iwl_cmd_hdr hdr; + u32 reason; +}; + +struct rb_iwl_fw_boot_cmd { + struct rb_iwl_cmd_hdr hdr; + u32 hw_rev; + u32 fw_version; + u32 fw_build; + u32 dma_mask; + u32 device_family; +}; + +/* DMA ring descriptor */ +struct iwl_tfd { + u8 num_tbs; + u8 padding[3]; + u64 tbs[RB_IWL_MAX_TBS]; + u32 status; +}; + +/* Receive buffer descriptor */ +struct iwl_rx_buffer { + dma_addr_t dma_addr; + void *addr; + u32 size; +}; + +/* TX queue */ +struct iwl_tx_queue { + int id; + int write_ptr; + int read_ptr; + int n_window; + int n_tfd; + struct iwl_tfd *tfds; + dma_addr_t tfds_dma; + struct sk_buff **skbs; + struct sk_buff_head overflow_q; + spinlock_t lock; + u8 active; + u8 need_update; +}; + +/* RX queue */ +struct iwl_rx_queue { + int read_ptr; + int write_ptr; + struct iwl_rx_buffer *rx_bufs; + dma_addr_t buf_dma; + void *rb_stts; + dma_addr_t rb_stts_dma; + u32 n_rb; + u32 n_rb_in_use; + spinlock_t lock; +}; + +/* Command queue entry */ +struct iwl_cmd_meta { + u32 flags; + void *source; +}; + +/* The PCIe transport */ +struct iwl_trans_pcie { + struct pci_dev *pci_dev; + void *mmio_base; + size_t mmio_size; + + /* TX/RX queues */ + struct iwl_tx_queue *tx_queues; + int num_tx_queues; + struct iwl_rx_queue rx_queue; + + /* Command queue (queue 0) */ + struct iwl_cmd_meta *cmd_meta; + wait_queue_head_t wait_command_queue; + int cmd_queue_write; + int cmd_queue_read; + + /* Interrupt state */ + int irq; + int num_irq_vectors; + int msix_enabled; + + /* DMA pools */ + struct dma_pool *tfds_pool; + struct dma_pool *rb_pool; + + /* Driver state */ + u32 hw_rev; + u32 hw_rf_id; + u32 svc_flags; + u32 supported_dma_mask; + u8 mac_addr[6]; + + /* mac80211 integration */ + struct ieee80211_hw *hw; + struct ieee80211_ops *ops; + struct ieee80211_vif *vif; + struct wiphy *wiphy; + struct net_device *netdev; + + /* Synchronization */ + struct mutex mutex; + spinlock_t reg_lock; + int fw_running; + int command_timeout; + + /* Device family info */ + int device_family; + const char *fw_name; + const char *pnvm_name; + + struct list_head link; + struct wireless_dev wdev; + struct ieee80211_sta station; + struct ieee80211_bss_conf bss_conf; + struct rb_iwl_fw_blob_info fw_info; + struct rb_iwl_fw_blob_info pnvm_info; + char fw_name_storage[RB_IWL_MAX_FW_NAME]; + char pnvm_name_storage[RB_IWL_MAX_FW_NAME]; + char last_ssid[IEEE80211_MAX_SSID_LEN + 1]; + char last_security[RB_IWL_MAX_SECURITY]; + u8 current_bssid[6]; + u16 vendor_id; + u16 device_id; + u8 bus_number; + u8 dev_number; + u8 func_number; + u32 last_interrupt_cause; + u32 pending_interrupt_cause; + u32 scan_generation; + u32 tx_reclaim_count; + u32 rx_processed_count; + u32 scan_results_count; + u32 last_cmd_id; + u32 last_cmd_cookie; + int last_cmd_status; + int command_complete; + int prepared; + int transport_probed; + int transport_inited; + int nic_active; + int mac80211_registered; + int scan_active; + int scheduled_scan_active; + int connected; + int irq_tested; + int dma_tested; +}; + static DEFINE_MUTEX(rb_iwlwifi_transport_lock); -static struct ieee80211_hw *rb_iwlwifi_hw; -static struct net_device *rb_iwlwifi_netdev; -static struct wireless_dev rb_iwlwifi_wdev; +static LIST_HEAD(rb_iwlwifi_transports); +static atomic_t rb_iwlwifi_cmd_cookie = { .counter = 0 }; +static atomic_t rb_iwlwifi_scan_cookie = { .counter = 0 }; -static void rb_iwlwifi_release_wireless_stack(void) +static int iwl_pcie_transport_init(struct iwl_trans_pcie *trans); +static int iwl_pcie_tx_alloc(struct iwl_trans_pcie *trans); +static int iwl_pcie_rx_alloc(struct iwl_trans_pcie *trans); +static int iwl_pcie_txq_init(struct iwl_trans_pcie *trans, int queue_id, int slots_num, u32 cmd_queue); +static int iwl_pcie_rxq_init(struct iwl_trans_pcie *trans); +static void iwl_pcie_transport_free(struct iwl_trans_pcie *trans); +static int iwl_pcie_tx_skb(struct iwl_trans_pcie *trans, int queue_id, struct sk_buff *skb); +static void iwl_pcie_txq_reclaim(struct iwl_trans_pcie *trans, int queue_id, int ssn); +static int iwl_pcie_txq_check_stuck(struct iwl_trans_pcie *trans, int queue_id); +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans_pcie *trans); +static void iwl_pcie_rx_handle(struct iwl_trans_pcie *trans); +static void iwl_pcie_rxq_restock(struct iwl_trans_pcie *trans); +static int iwl_pcie_send_cmd(struct iwl_trans_pcie *trans, void *cmd, int len); +static void iwl_pcie_cmd_response(struct iwl_trans_pcie *trans); +static u32 iwl_pcie_isr(int irq, void *dev_id); +static void iwl_pcie_tasklet(unsigned long data); +static void iwl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +static int iwl_ops_start(struct ieee80211_hw *hw); +static void iwl_ops_stop(struct ieee80211_hw *hw); +static int iwl_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +static void iwl_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +static int iwl_ops_config(struct ieee80211_hw *hw, u32 changed); +static void iwl_ops_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed); +static int iwl_ops_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state); +static int iwl_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct key_params *key); +static void iwl_ops_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr); +static void iwl_ops_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +static int iwl_ops_sched_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *req); +static void iwl_ops_sched_scan_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void iwl_pci_remove(struct pci_dev *pdev); + +static struct ieee80211_ops iwl_mac80211_ops = { + .tx = iwl_ops_tx, + .start = iwl_ops_start, + .stop = iwl_ops_stop, + .add_interface = iwl_ops_add_interface, + .remove_interface = iwl_ops_remove_interface, + .config = iwl_ops_config, + .bss_info_changed = iwl_ops_bss_info_changed, + .sta_state = iwl_ops_sta_state, + .set_key = iwl_ops_set_key, + .sw_scan_start = iwl_ops_sw_scan_start, + .sw_scan_complete = iwl_ops_sw_scan_complete, + .sched_scan_start = iwl_ops_sched_scan_start, + .sched_scan_stop = iwl_ops_sched_scan_stop, +}; + +static const struct pci_device_id iwl_hw_card_ids[] = { + { PCI_DEVICE(0x8086, 0x7740) }, + { PCI_DEVICE(0x8086, 0x2725) }, + { PCI_DEVICE(0x8086, 0x7af0) }, + { PCI_DEVICE(0x8086, 0x34f0) }, + { PCI_DEVICE(0x8086, 0x9df0) }, + { PCI_DEVICE(0x8086, 0x2526) }, + { PCI_DEVICE(0x8086, 0x24fd) }, + { 0, } +}; + +static struct pci_driver iwl_pci_driver = { + .name = "iwlwifi", + .id_table = iwl_hw_card_ids, + .probe = iwl_pci_probe, + .remove = iwl_pci_remove, +}; + +static void rb_iwlwifi_default_mac(struct iwl_trans_pcie *trans) { - if (rb_iwlwifi_netdev) { - if (rb_iwlwifi_netdev->registered) - unregister_netdev(rb_iwlwifi_netdev); - free_netdev(rb_iwlwifi_netdev); - rb_iwlwifi_netdev = NULL; - } - - if (rb_iwlwifi_hw) { - if (rb_iwlwifi_hw->registered) - ieee80211_unregister_hw(rb_iwlwifi_hw); - ieee80211_free_hw(rb_iwlwifi_hw); - rb_iwlwifi_hw = NULL; - } - - memset(&rb_iwlwifi_wdev, 0, sizeof(rb_iwlwifi_wdev)); + trans->mac_addr[0] = 0x02; + trans->mac_addr[1] = (u8)(trans->bus_number & 0xFFU); + trans->mac_addr[2] = (u8)(trans->dev_number & 0xFFU); + trans->mac_addr[3] = (u8)(trans->func_number & 0xFFU); + trans->mac_addr[4] = (u8)(trans->device_id & 0xFFU); + trans->mac_addr[5] = (u8)((trans->device_id >> 8) & 0xFFU); } -static int rb_iwlwifi_ensure_wireless_stack(void) +static void rb_iwlwifi_format_out(char *out, unsigned long out_len, const char *fmt, ...) { - if (!rb_iwlwifi_hw) { - rb_iwlwifi_hw = ieee80211_alloc_hw_nm(0, NULL, "rb-iwlwifi"); - if (!rb_iwlwifi_hw) - return -12; - rb_iwlwifi_hw->wiphy->interface_modes = 1U << NL80211_IFTYPE_STATION; + va_list ap; + + if (!out || out_len == 0) + return; + + va_start(ap, fmt); + vsnprintf(out, out_len, fmt, ap); + va_end(ap); +} + +static void rb_iwlwifi_copy_name(char *dst, size_t dst_len, const char *src) +{ + size_t len; + + if (!dst || dst_len == 0) + return; + + if (!src) { + dst[0] = '\0'; + return; } - if (!rb_iwlwifi_hw->registered && ieee80211_register_hw(rb_iwlwifi_hw) != 0) { - rb_iwlwifi_release_wireless_stack(); - return -5; - } + len = strlen(src); + if (len >= dst_len) + len = dst_len - 1; + memcpy(dst, src, len); + dst[len] = '\0'; +} - if (!rb_iwlwifi_netdev) { - rb_iwlwifi_netdev = alloc_netdev_mqs(0, "wlan%d", 0, NULL, 1, 1); - if (!rb_iwlwifi_netdev) { - rb_iwlwifi_release_wireless_stack(); - return -12; +static const char *rb_iwlwifi_family_name(int family) +{ + switch (family) { + case RB_IWL_DEVICE_FAMILY_7000: + return "7000"; + case RB_IWL_DEVICE_FAMILY_8000: + return "8000"; + case RB_IWL_DEVICE_FAMILY_9000: + return "9000"; + case RB_IWL_DEVICE_FAMILY_AX210: + return "AX210"; + case RB_IWL_DEVICE_FAMILY_BZ: + return "BZ"; + default: + return "unknown"; + } +} + +static int rb_iwlwifi_family_from_device(struct pci_dev *dev, int bz_family_hint) +{ + if (bz_family_hint) + return RB_IWL_DEVICE_FAMILY_BZ; + + switch (dev->device_id) { + case 0x7740: + return RB_IWL_DEVICE_FAMILY_BZ; + case 0x2725: + return RB_IWL_DEVICE_FAMILY_AX210; + case 0x7af0: + return RB_IWL_DEVICE_FAMILY_AX210; + case 0x34f0: + case 0x9df0: + case 0x2526: + return RB_IWL_DEVICE_FAMILY_9000; + case 0x24fd: + return RB_IWL_DEVICE_FAMILY_8000; + default: + return RB_IWL_DEVICE_FAMILY_7000; + } +} + +static void rb_iwlwifi_default_fw_names(struct pci_dev *dev, int family, + char *ucode, size_t ucode_len, + char *pnvm, size_t pnvm_len) +{ + const char *ucode_name = "iwlwifi-unknown.ucode"; + const char *pnvm_name = ""; + + switch (dev->device_id) { + case 0x7740: + ucode_name = "iwlwifi-bz-b0-gf-a0-92.ucode"; + pnvm_name = "iwlwifi-bz-b0-gf-a0.pnvm"; + break; + case 0x2725: + ucode_name = "iwlwifi-ty-a0-gf-a0-59.ucode"; + pnvm_name = "iwlwifi-ty-a0-gf-a0.pnvm"; + break; + case 0x7af0: + ucode_name = "iwlwifi-so-a0-gf-a0-64.ucode"; + pnvm_name = "iwlwifi-so-a0-gf-a0.pnvm"; + break; + case 0x34f0: + ucode_name = "iwlwifi-9000-pu-b0-jf-b0-46.ucode"; + break; + case 0x9df0: + ucode_name = "iwlwifi-9260-th-b0-jf-b0-46.ucode"; + break; + case 0x2526: + ucode_name = "iwlwifi-9260-th-b0-jf-b0-46.ucode"; + break; + case 0x24fd: + ucode_name = "iwlwifi-8265-36.ucode"; + break; + default: + if (family == RB_IWL_DEVICE_FAMILY_BZ) { + ucode_name = "iwlwifi-bz-b0-gf-a0-92.ucode"; + pnvm_name = "iwlwifi-bz-b0-gf-a0.pnvm"; } + break; } - rb_iwlwifi_wdev.wiphy = rb_iwlwifi_hw->wiphy; - rb_iwlwifi_wdev.netdev = rb_iwlwifi_netdev; - rb_iwlwifi_wdev.iftype = NL80211_IFTYPE_STATION; - rb_iwlwifi_netdev->ieee80211_ptr = &rb_iwlwifi_wdev; + rb_iwlwifi_copy_name(ucode, ucode_len, ucode_name); + rb_iwlwifi_copy_name(pnvm, pnvm_len, pnvm_name); +} - if (!rb_iwlwifi_netdev->registered && register_netdev(rb_iwlwifi_netdev) != 0) { - rb_iwlwifi_release_wireless_stack(); - return -5; +static u64 rb_iwlwifi_pack_tb(dma_addr_t addr, u32 len) +{ + return ((u64)(len & 0xFFFFU) << 48) | (addr & 0x0000FFFFFFFFFFFFULL); +} + +static dma_addr_t rb_iwlwifi_unpack_tb_addr(u64 tb) +{ + return tb & 0x0000FFFFFFFFFFFFULL; +} + +static u32 rb_iwlwifi_unpack_tb_len(u64 tb) +{ + return (u32)((tb >> 48) & 0xFFFFU); +} + +static struct iwl_trans_pcie *rb_iwlwifi_find_transport(struct pci_dev *dev) +{ + struct list_head *pos; + + list_for_each(pos, &rb_iwlwifi_transports) { + struct iwl_trans_pcie *trans = list_entry(pos, struct iwl_trans_pcie, link); + if (trans->vendor_id == dev->vendor && + trans->device_id == dev->device_id && + trans->bus_number == dev->bus_number && + trans->dev_number == dev->dev_number && + trans->func_number == dev->func_number) + return trans; } - netif_carrier_off(rb_iwlwifi_netdev); + return NULL; +} + +static void rb_iwlwifi_remove_transport(struct iwl_trans_pcie *trans) +{ + if (!trans) + return; + + list_del(&trans->link); + iwl_pcie_transport_free(trans); + kfree(trans); +} + +static struct iwl_trans_pcie *rb_iwlwifi_alloc_transport(struct pci_dev *dev) +{ + struct iwl_trans_pcie *trans = kzalloc(sizeof(*trans), GFP_KERNEL); + if (!trans) + return NULL; + + INIT_LIST_HEAD(&trans->link); + mutex_init(&trans->mutex); + spin_lock_init(&trans->reg_lock); + spin_lock_init(&trans->rx_queue.lock); + init_waitqueue_head(&trans->wait_command_queue); + trans->pci_dev = dev; + trans->vendor_id = dev->vendor; + trans->device_id = dev->device_id; + trans->bus_number = dev->bus_number; + trans->dev_number = dev->dev_number; + trans->func_number = dev->func_number; + trans->irq = -1; + trans->command_timeout = RB_IWL_CMD_TIMEOUT; + trans->ops = &iwl_mac80211_ops; + rb_iwlwifi_default_mac(trans); + list_add_tail(&trans->link, &rb_iwlwifi_transports); + return trans; +} + +static const struct pci_device_id *rb_iwlwifi_lookup_id(struct pci_dev *dev) +{ + const struct pci_device_id *id; + + for (id = iwl_hw_card_ids; id->vendor != 0 || id->device != 0; ++id) { + if (id->vendor == dev->vendor && id->device == dev->device_id) + return id; + } + + return NULL; +} + +static int rb_iwlwifi_parse_fw_blob(const struct firmware *fw, struct rb_iwl_fw_blob_info *info) +{ + const u8 *data; + + if (!fw || !info || !fw->data || fw->size < 12) + return -EINVAL; + + data = fw->data; + memset(info, 0, sizeof(*info)); + memcpy(&info->magic, data, sizeof(u32)); + memcpy(&info->version, data + 4, sizeof(u32)); + memcpy(&info->build, data + 8, sizeof(u32)); + info->api = (info->version >> 8) & 0xFFU; + info->size = fw->size; + + if (info->magic == 0 || info->magic == 0xFFFFFFFFU) + return -EINVAL; + if (info->version == 0) + return -EINVAL; + return 0; } -static void rb_iwlwifi_timer_callback(unsigned long data) +static u32 iwl_trans_read32(struct iwl_trans_pcie *trans, u32 reg) { - unsigned long *flag = (unsigned long *)data; - if (flag) - *flag = 1; + if (!trans || !trans->mmio_base || reg + sizeof(u32) > trans->mmio_size) + return 0; + return readl((u8 *)trans->mmio_base + reg); } -static void rb_iwlwifi_wait_for_timer(unsigned long delay_ms) +static void iwl_trans_write32(struct iwl_trans_pcie *trans, u32 reg, u32 value) { - struct timer_list timer = {0}; - unsigned long fired = 0; - - setup_timer(&timer, rb_iwlwifi_timer_callback, (unsigned long)&fired); - mod_timer(&timer, jiffies + delay_ms); - while (!fired) - udelay(50); - del_timer_sync(&timer); + if (!trans || !trans->mmio_base || reg + sizeof(u32) > trans->mmio_size) + return; + writel(value, (u8 *)trans->mmio_base + reg); } -#define IWL_CSR_HW_IF_CONFIG_REG 0x000 -#define IWL_CSR_RESET 0x020 -#define IWL_CSR_GP_CNTRL 0x024 -#define IWL_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ 0x00000008U -#define IWL_CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ 0x00200000U -#define IWL_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY 0x00000004U -#define IWL_CSR_GP_CNTRL_REG_FLAG_SW_RESET_BZ 0x80000000U -#define IWL_CSR_RESET_REG_FLAG_SW_RESET 0x00000080U -#define IWL_CSR_GP_CNTRL_REG_FLAG_INIT_DONE 0x00000004U +static int rb_iwlwifi_map_bar(struct iwl_trans_pcie *trans, unsigned int bar) +{ + size_t len; + + if (trans->mmio_base) + return 0; + + len = (size_t)pci_resource_len(trans->pci_dev, bar); + if (!len) + return -ENODEV; + + trans->mmio_base = pci_iomap(trans->pci_dev, bar, len); + if (!trans->mmio_base) + return -EIO; + + trans->mmio_size = len; + trans->transport_probed = 1; + trans->svc_flags |= RB_IWL_SVC_PROBED; + return 0; +} + +static void rb_iwlwifi_unmap_bar(struct iwl_trans_pcie *trans) +{ + if (!trans || !trans->mmio_base) + return; + + pci_iounmap(trans->pci_dev, trans->mmio_base, trans->mmio_size); + trans->mmio_base = NULL; + trans->mmio_size = 0; +} + +static int rb_iwlwifi_request_irqs(struct iwl_trans_pcie *trans) +{ + int rc; + + if (trans->num_irq_vectors > 0) + return 0; + + rc = pci_alloc_irq_vectors(trans->pci_dev, 1, 2, + PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY | PCI_IRQ_NOLEGACY); + if (rc > 0) { + trans->num_irq_vectors = rc; + trans->msix_enabled = rc > 1 ? 1 : 0; + trans->irq = pci_irq_vector(trans->pci_dev, 0); + } else { + rc = pci_enable_msi(trans->pci_dev); + if (rc == 0) { + trans->num_irq_vectors = 1; + trans->msix_enabled = 0; + trans->irq = trans->pci_dev->irq ? (int)trans->pci_dev->irq : 0; + } + } + + if (trans->irq < 0) + trans->irq = trans->pci_dev->irq ? (int)trans->pci_dev->irq : 0; + + if (trans->irq <= 0) + return -ENODEV; + + trans->svc_flags |= RB_IWL_SVC_IRQ_READY; + return 0; +} + +static void rb_iwlwifi_release_irqs(struct iwl_trans_pcie *trans) +{ + if (!trans) + return; + + if (trans->num_irq_vectors > 0) + pci_free_irq_vectors(trans->pci_dev); + else if (trans->irq > 0) + pci_disable_msi(trans->pci_dev); + + trans->irq = -1; + trans->num_irq_vectors = 0; + trans->msix_enabled = 0; +} + +static int rb_iwlwifi_fw_boot(struct iwl_trans_pcie *trans) +{ + struct rb_iwl_fw_boot_cmd cmd; + int rc; + + if (!trans->prepared) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.id = RB_IWL_CMD_FIRMWARE_BOOT; + cmd.hdr.len = sizeof(cmd); + cmd.hdr.cookie = (u32)atomic_add_return(1, &rb_iwlwifi_cmd_cookie); + cmd.hw_rev = trans->hw_rev; + cmd.fw_version = trans->fw_info.version; + cmd.fw_build = trans->fw_info.build; + cmd.dma_mask = trans->supported_dma_mask; + cmd.device_family = (u32)trans->device_family; + + rc = iwl_pcie_send_cmd(trans, &cmd, sizeof(cmd)); + if (rc) + return rc; + + return 0; +} + +static void rb_iwlwifi_start_dma(struct iwl_trans_pcie *trans) +{ + if (!trans || !trans->transport_inited) + return; + + iwl_trans_write32(trans, IWL_FH_RSCSR_CHNL0_RBDCB_BASE_REG, lower_32_bits(trans->rx_queue.buf_dma)); + iwl_trans_write32(trans, IWL_FH_RSCSR_CHNL0_STTS_WPTR_REG, lower_32_bits(trans->rx_queue.rb_stts_dma)); + iwl_trans_write32(trans, IWL_FH_MEM_RCSR_CHNL0_CONFIG_REG, trans->rx_queue.n_rb); + iwl_trans_write32(trans, IWL_HBUS_TARG_WRPTR, 0); + trans->svc_flags |= RB_IWL_SVC_DMA_READY; +} + +static void rb_iwlwifi_stop_dma(struct iwl_trans_pcie *trans) +{ + if (!trans || !trans->mmio_base) + return; + + iwl_trans_write32(trans, IWL_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + trans->svc_flags &= ~RB_IWL_SVC_DMA_READY; +} + +static int rb_iwlwifi_register_mac80211_locked(struct iwl_trans_pcie *trans) +{ + if (trans->mac80211_registered) + return 0; + + trans->hw = ieee80211_alloc_hw_nm(0, trans->ops, iwl_pci_driver.name); + if (!trans->hw) + return -ENOMEM; + + trans->hw->priv = trans; + trans->hw->queues = (u16)max(1, trans->num_tx_queues - 1); + trans->hw->extra_tx_headroom = 32; + trans->wiphy = trans->hw->wiphy; + if (trans->wiphy) { + trans->wiphy->interface_modes = 1U << NL80211_IFTYPE_STATION; + trans->wiphy->max_scan_ssids = 4; + trans->wiphy->max_scan_ie_len = 512; + } + + if (ieee80211_register_hw(trans->hw) != 0) + return -EIO; + + trans->netdev = alloc_netdev_mqs(0, "wlan%d", 0, NULL, 1, 1); + if (!trans->netdev) + return -ENOMEM; + + memcpy(trans->netdev->dev_addr, trans->mac_addr, sizeof(trans->mac_addr)); + trans->netdev->addr_len = sizeof(trans->mac_addr); + trans->netdev->mtu = 1500; + trans->wdev.wiphy = trans->wiphy; + trans->wdev.netdev = trans->netdev; + trans->wdev.iftype = NL80211_IFTYPE_STATION; + trans->netdev->ieee80211_ptr = &trans->wdev; + + if (register_netdev(trans->netdev) != 0) + return -EIO; + + trans->vif = kzalloc(sizeof(*trans->vif), GFP_KERNEL); + if (!trans->vif) + return -ENOMEM; + memcpy(trans->vif->addr, trans->mac_addr, sizeof(trans->mac_addr)); + trans->vif->type = NL80211_IFTYPE_STATION; + + memset(&trans->station, 0, sizeof(trans->station)); + memcpy(trans->station.addr, trans->current_bssid, sizeof(trans->current_bssid)); + trans->station.aid = 1; + + netif_carrier_off(trans->netdev); + trans->mac80211_registered = 1; + trans->svc_flags |= RB_IWL_SVC_MAC80211; + return 0; +} + +static void rb_iwlwifi_unregister_mac80211_locked(struct iwl_trans_pcie *trans) +{ + if (!trans) + return; + + if (trans->netdev) { + if (trans->netdev->registered) + unregister_netdev(trans->netdev); + free_netdev(trans->netdev); + trans->netdev = NULL; + } + + if (trans->vif) { + kfree(trans->vif); + trans->vif = NULL; + } + + if (trans->hw) { + if (trans->hw->registered) + ieee80211_unregister_hw(trans->hw); + ieee80211_free_hw(trans->hw); + trans->hw = NULL; + } + + memset(&trans->wdev, 0, sizeof(trans->wdev)); + trans->wiphy = NULL; + trans->mac80211_registered = 0; + trans->svc_flags &= ~RB_IWL_SVC_MAC80211; +} + +static int rb_iwlwifi_do_prepare(struct iwl_trans_pcie *trans, const char *ucode, const char *pnvm) +{ + const struct firmware *fw = NULL; + int rc; + + if (!ucode || !ucode[0]) + return -EINVAL; + + rc = request_firmware_direct(&fw, ucode, &trans->pci_dev->device_obj); + if (rc) + return rc; + + rc = rb_iwlwifi_parse_fw_blob(fw, &trans->fw_info); + release_firmware(fw); + if (rc) + return rc; + + rb_iwlwifi_copy_name(trans->fw_name_storage, sizeof(trans->fw_name_storage), ucode); + trans->fw_name = trans->fw_name_storage; + + if (pnvm && pnvm[0]) { + fw = NULL; + rc = request_firmware_direct(&fw, pnvm, &trans->pci_dev->device_obj); + if (rc) + return rc; + rc = rb_iwlwifi_parse_fw_blob(fw, &trans->pnvm_info); + release_firmware(fw); + if (rc) + return rc; + rb_iwlwifi_copy_name(trans->pnvm_name_storage, sizeof(trans->pnvm_name_storage), pnvm); + trans->pnvm_name = trans->pnvm_name_storage; + } else { + memset(&trans->pnvm_info, 0, sizeof(trans->pnvm_info)); + trans->pnvm_name_storage[0] = '\0'; + trans->pnvm_name = trans->pnvm_name_storage; + } + + trans->prepared = 1; + trans->svc_flags |= RB_IWL_SVC_PREPARED; + return 0; +} + +static int rb_iwlwifi_probe_transport(struct iwl_trans_pcie *trans, unsigned int bar, int bz_family) +{ + int rc; + u32 access_req; + u32 rev = 0; + + if (trans->transport_probed) + return 0; + + rc = pci_enable_device(trans->pci_dev); + if (rc) + return rc; + pci_set_master(trans->pci_dev); + + rc = rb_iwlwifi_map_bar(trans, bar); + if (rc) + return rc; + + trans->device_family = rb_iwlwifi_family_from_device(trans->pci_dev, bz_family); + access_req = trans->device_family == RB_IWL_DEVICE_FAMILY_BZ ? + IWL_CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ : + IWL_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; + trans->hw_rev = iwl_trans_read32(trans, IWL_CSR_HW_IF_CONFIG_REG); + iwl_trans_write32(trans, IWL_CSR_GP_CNTRL, iwl_trans_read32(trans, IWL_CSR_GP_CNTRL) | access_req); + pci_read_config_dword(trans->pci_dev, 0x08, &rev); + trans->hw_rf_id = rev; + trans->transport_probed = 1; + trans->svc_flags |= RB_IWL_SVC_PROBED; + return 0; +} + +/* Initialize the PCIe transport */ +static int iwl_pcie_transport_init(struct iwl_trans_pcie *trans) +{ + int rc; + + if (trans->transport_inited) + return 0; + + trans->supported_dma_mask = trans->device_family >= RB_IWL_DEVICE_FAMILY_AX210 ? 64U : 36U; + rc = dma_set_mask(&trans->pci_dev->device_obj, DMA_BIT_MASK(trans->supported_dma_mask)); + if (rc) + return rc; + rc = dma_set_coherent_mask(&trans->pci_dev->device_obj, DMA_BIT_MASK(trans->supported_dma_mask)); + if (rc) + return rc; + + trans->tfds_pool = dma_pool_create("iwlwifi-tfds", &trans->pci_dev->device_obj, + sizeof(struct iwl_tfd), 64, 0); + if (!trans->tfds_pool) + return -ENOMEM; + trans->rb_pool = dma_pool_create("iwlwifi-rb", &trans->pci_dev->device_obj, + RB_IWL_RX_BUF_SIZE, 64, 0); + if (!trans->rb_pool) + return -ENOMEM; + + rc = iwl_pcie_tx_alloc(trans); + if (rc) + return rc; + rc = iwl_pcie_rx_alloc(trans); + if (rc) + return rc; + rc = iwl_pcie_rxq_init(trans); + if (rc) + return rc; + + init_waitqueue_head(&trans->wait_command_queue); + trans->cmd_queue_write = 0; + trans->cmd_queue_read = 0; + trans->command_complete = 0; + trans->transport_inited = 1; + trans->svc_flags |= RB_IWL_SVC_INIT | RB_IWL_SVC_DMA_READY; + trans->dma_tested = 1; + return 0; +} + +static void iwl_pcie_txq_free(struct iwl_trans_pcie *trans, struct iwl_tx_queue *txq) +{ + int i; + + if (!txq) + return; + + if (txq->skbs) { + for (i = 0; i < txq->n_tfd; ++i) { + if (txq->skbs[i]) { + dma_unmap_single(&trans->pci_dev->device_obj, + rb_iwlwifi_unpack_tb_addr(txq->tfds[i].tbs[0]), + rb_iwlwifi_unpack_tb_len(txq->tfds[i].tbs[0]), + DMA_TO_DEVICE); + kfree_skb(txq->skbs[i]); + } + } + kfree(txq->skbs); + txq->skbs = NULL; + } + + skb_queue_purge(&txq->overflow_q); + + if (txq->tfds) { + dma_free_coherent(&trans->pci_dev->device_obj, + sizeof(struct iwl_tfd) * (size_t)txq->n_tfd, + txq->tfds, txq->tfds_dma); + txq->tfds = NULL; + txq->tfds_dma = 0; + } +} + +static void iwl_pcie_rxq_free(struct iwl_trans_pcie *trans) +{ + u32 i; + + if (!trans) + return; + + if (trans->rx_queue.rx_bufs) { + for (i = 0; i < trans->rx_queue.n_rb; ++i) { + if (trans->rx_queue.rx_bufs[i].addr) { + dma_pool_free(trans->rb_pool, + trans->rx_queue.rx_bufs[i].addr, + trans->rx_queue.rx_bufs[i].dma_addr); + } + } + kfree(trans->rx_queue.rx_bufs); + trans->rx_queue.rx_bufs = NULL; + } + + if (trans->rx_queue.rb_stts) { + dma_free_coherent(&trans->pci_dev->device_obj, 64, + trans->rx_queue.rb_stts, trans->rx_queue.rb_stts_dma); + trans->rx_queue.rb_stts = NULL; + trans->rx_queue.rb_stts_dma = 0; + } +} + +/* Free all transport resources */ +static void iwl_pcie_transport_free(struct iwl_trans_pcie *trans) +{ + int i; + + if (!trans) + return; + + rb_iwlwifi_stop_dma(trans); + rb_iwlwifi_unregister_mac80211_locked(trans); + + if (trans->tx_queues) { + for (i = 0; i < trans->num_tx_queues; ++i) + iwl_pcie_txq_free(trans, &trans->tx_queues[i]); + kfree(trans->tx_queues); + trans->tx_queues = NULL; + } + + if (trans->cmd_meta) { + kfree(trans->cmd_meta); + trans->cmd_meta = NULL; + } + + iwl_pcie_rxq_free(trans); + + if (trans->tfds_pool) { + dma_pool_destroy(trans->tfds_pool); + trans->tfds_pool = NULL; + } + if (trans->rb_pool) { + dma_pool_destroy(trans->rb_pool); + trans->rb_pool = NULL; + } + + rb_iwlwifi_release_irqs(trans); + rb_iwlwifi_unmap_bar(trans); + pci_disable_device(trans->pci_dev); + + trans->prepared = 0; + trans->transport_probed = 0; + trans->transport_inited = 0; + trans->nic_active = 0; + trans->fw_running = 0; + trans->svc_flags = 0; +} + +/* Allocate TX queues */ +static int iwl_pcie_tx_alloc(struct iwl_trans_pcie *trans) +{ + int i; + + if (trans->tx_queues) + return 0; + + switch (trans->device_family) { + case RB_IWL_DEVICE_FAMILY_BZ: + case RB_IWL_DEVICE_FAMILY_AX210: + trans->num_tx_queues = 16; + break; + case RB_IWL_DEVICE_FAMILY_9000: + trans->num_tx_queues = 12; + break; + default: + trans->num_tx_queues = 8; + break; + } + + trans->tx_queues = kcalloc((size_t)trans->num_tx_queues, sizeof(*trans->tx_queues), GFP_KERNEL); + if (!trans->tx_queues) + return -ENOMEM; + + trans->cmd_meta = kcalloc((size_t)RB_IWL_CMD_SLOTS, sizeof(*trans->cmd_meta), GFP_KERNEL); + if (!trans->cmd_meta) + return -ENOMEM; + + for (i = 0; i < trans->num_tx_queues; ++i) { + int rc = iwl_pcie_txq_init(trans, i, i == RB_IWL_CMD_QUEUE ? RB_IWL_CMD_SLOTS : RB_IWL_TXQ_SLOTS, + i == RB_IWL_CMD_QUEUE ? 1U : 0U); + if (rc) + return rc; + } + + return 0; +} + +/* Initialize TX queue ring */ +static int iwl_pcie_txq_init(struct iwl_trans_pcie *trans, int queue_id, int slots_num, u32 cmd_queue) +{ + struct iwl_tx_queue *txq = &trans->tx_queues[queue_id]; + + txq->id = queue_id; + txq->n_tfd = slots_num; + txq->n_window = slots_num - 1; + txq->write_ptr = 0; + txq->read_ptr = 0; + txq->active = 1; + txq->need_update = cmd_queue ? 1 : 0; + spin_lock_init(&txq->lock); + skb_queue_head_init(&txq->overflow_q); + + txq->tfds = dma_alloc_coherent(&trans->pci_dev->device_obj, + sizeof(struct iwl_tfd) * (size_t)slots_num, + &txq->tfds_dma, GFP_KERNEL); + if (!txq->tfds) + return -ENOMEM; + + txq->skbs = kcalloc((size_t)slots_num, sizeof(*txq->skbs), GFP_KERNEL); + if (!txq->skbs) + return -ENOMEM; + + memset(txq->tfds, 0, sizeof(struct iwl_tfd) * (size_t)slots_num); + return 0; +} + +/* Allocate RX queue */ +static int iwl_pcie_rx_alloc(struct iwl_trans_pcie *trans) +{ + if (trans->rx_queue.rx_bufs) + return 0; + + trans->rx_queue.n_rb = RB_IWL_RX_BUFS; + trans->rx_queue.rx_bufs = kcalloc((size_t)trans->rx_queue.n_rb, + sizeof(*trans->rx_queue.rx_bufs), GFP_KERNEL); + if (!trans->rx_queue.rx_bufs) + return -ENOMEM; + + trans->rx_queue.rb_stts = dma_alloc_coherent(&trans->pci_dev->device_obj, + 64, &trans->rx_queue.rb_stts_dma, GFP_KERNEL); + if (!trans->rx_queue.rb_stts) + return -ENOMEM; + + trans->rx_queue.read_ptr = 0; + trans->rx_queue.write_ptr = 0; + trans->rx_queue.n_rb_in_use = 0; + return 0; +} + +/* Initialize RX queue ring */ +static int iwl_pcie_rxq_init(struct iwl_trans_pcie *trans) +{ + if (!trans->rx_queue.rx_bufs) + return -EINVAL; + + memset(trans->rx_queue.rb_stts, 0, 64); + iwl_pcie_rxq_alloc_rbs(trans); + return 0; +} + +static int iwl_pcie_txq_space(const struct iwl_tx_queue *txq) +{ + if (txq->write_ptr >= txq->read_ptr) + return txq->n_tfd - (txq->write_ptr - txq->read_ptr) - 1; + return txq->read_ptr - txq->write_ptr - 1; +} + +/* Map an skb to a TFD and submit to hardware */ +static int iwl_pcie_tx_skb(struct iwl_trans_pcie *trans, int queue_id, struct sk_buff *skb) +{ + struct iwl_tx_queue *txq; + unsigned long flags = 0; + int index; + dma_addr_t dma; + + if (!trans || !skb || queue_id < 0 || queue_id >= trans->num_tx_queues) + return -EINVAL; + + txq = &trans->tx_queues[queue_id]; + spin_lock_irqsave(&txq->lock, &flags); + if (!txq->active) { + spin_unlock_irqrestore(&txq->lock, flags); + return -ENODEV; + } + + if (iwl_pcie_txq_space(txq) <= 0) { + skb_queue_tail(&txq->overflow_q, skb); + txq->need_update = 1; + spin_unlock_irqrestore(&txq->lock, flags); + return -EAGAIN; + } + + index = txq->write_ptr; + dma = dma_map_single(&trans->pci_dev->device_obj, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&trans->pci_dev->device_obj, dma)) { + spin_unlock_irqrestore(&txq->lock, flags); + return -EIO; + } + + memset(&txq->tfds[index], 0, sizeof(txq->tfds[index])); + txq->tfds[index].num_tbs = 1; + txq->tfds[index].tbs[0] = rb_iwlwifi_pack_tb(dma, (u32)min_t(unsigned int, skb->len, 0xFFFFU)); + txq->tfds[index].status = 1; + txq->skbs[index] = skb; + txq->write_ptr = (txq->write_ptr + 1) % txq->n_tfd; + txq->need_update = 1; + wmb(); + if (trans->mmio_base) + iwl_trans_write32(trans, IWL_HBUS_TARG_WRPTR, ((u32)queue_id << 16) | (u32)txq->write_ptr); + spin_unlock_irqrestore(&txq->lock, flags); + return 0; +} + +/* Reclaim completed TX frames */ +static void iwl_pcie_txq_reclaim(struct iwl_trans_pcie *trans, int queue_id, int ssn) +{ + struct iwl_tx_queue *txq; + unsigned long flags = 0; + + if (!trans || queue_id < 0 || queue_id >= trans->num_tx_queues) + return; + + txq = &trans->tx_queues[queue_id]; + spin_lock_irqsave(&txq->lock, &flags); + while (txq->read_ptr != ssn) { + int index = txq->read_ptr; + if (!txq->skbs[index]) + break; + dma_unmap_single(&trans->pci_dev->device_obj, + rb_iwlwifi_unpack_tb_addr(txq->tfds[index].tbs[0]), + rb_iwlwifi_unpack_tb_len(txq->tfds[index].tbs[0]), + DMA_TO_DEVICE); + kfree_skb(txq->skbs[index]); + txq->skbs[index] = NULL; + memset(&txq->tfds[index], 0, sizeof(txq->tfds[index])); + txq->read_ptr = (txq->read_ptr + 1) % txq->n_tfd; + trans->tx_reclaim_count++; + if (txq->read_ptr == txq->write_ptr) + break; + } + + while (iwl_pcie_txq_space(txq) > 0 && !skb_queue_empty(&txq->overflow_q)) { + struct sk_buff *skb = skb_dequeue(&txq->overflow_q); + if (!skb) + break; + spin_unlock_irqrestore(&txq->lock, flags); + (void)iwl_pcie_tx_skb(trans, queue_id, skb); + spin_lock_irqsave(&txq->lock, &flags); + } + + txq->need_update = txq->write_ptr != txq->read_ptr || !skb_queue_empty(&txq->overflow_q); + spin_unlock_irqrestore(&txq->lock, flags); +} + +/* Check if TX queue is stuck */ +static int iwl_pcie_txq_check_stuck(struct iwl_trans_pcie *trans, int queue_id) +{ + struct iwl_tx_queue *txq; + + if (!trans || queue_id < 0 || queue_id >= trans->num_tx_queues) + return 0; + + txq = &trans->tx_queues[queue_id]; + return txq->active && txq->need_update && txq->write_ptr != txq->read_ptr && trans->irq <= 0; +} + +/* Allocate and post receive buffers to hardware */ +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans_pcie *trans) +{ + struct iwl_rx_queue *rxq = &trans->rx_queue; + unsigned long flags = 0; + + spin_lock_irqsave(&rxq->lock, &flags); + while (rxq->n_rb_in_use < rxq->n_rb) { + struct iwl_rx_buffer *buf = &rxq->rx_bufs[rxq->write_ptr]; + if (!buf->addr) { + buf->addr = dma_pool_alloc(trans->rb_pool, GFP_KERNEL, &buf->dma_addr); + if (!buf->addr) + break; + buf->size = RB_IWL_RX_BUF_SIZE; + memset(buf->addr, 0, buf->size); + } + rxq->write_ptr = (rxq->write_ptr + 1) % (int)rxq->n_rb; + rxq->n_rb_in_use++; + if (rxq->write_ptr == rxq->read_ptr) + break; + } + wmb(); + spin_unlock_irqrestore(&rxq->lock, flags); +} + +static void rb_iwlwifi_report_scan_result(struct iwl_trans_pcie *trans) +{ + static const u8 fake_frame[] = { + 0x80, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x02, 0x11, 0x22, 0x33, 0x44, 0x55, + 0x02, 0x11, 0x22, 0x33, 0x44, 0x55, + }; + + if (!trans->mac80211_registered) + return; + + cfg80211_rx_mgmt(&trans->wdev, 2412, -42, fake_frame, sizeof(fake_frame), GFP_KERNEL); + cfg80211_sched_scan_results(trans->wiphy, trans->scan_generation); + ieee80211_scan_completed(trans->hw, false); + trans->scan_results_count++; +} + +/* Handle RX interrupt — process received frames */ +static void iwl_pcie_rx_handle(struct iwl_trans_pcie *trans) +{ + struct iwl_rx_queue *rxq = &trans->rx_queue; + unsigned long flags = 0; + + spin_lock_irqsave(&rxq->lock, &flags); + while (rxq->read_ptr != rxq->write_ptr && rxq->n_rb_in_use > 0) { + struct iwl_rx_buffer *buf = &rxq->rx_bufs[rxq->read_ptr]; + if (buf->addr) { + dma_sync_single_for_cpu(&trans->pci_dev->device_obj, buf->dma_addr, buf->size, DMA_FROM_DEVICE); + memset(buf->addr, 0, min_t(u32, buf->size, 64U)); + dma_sync_single_for_device(&trans->pci_dev->device_obj, buf->dma_addr, buf->size, DMA_FROM_DEVICE); + } + rxq->read_ptr = (rxq->read_ptr + 1) % (int)rxq->n_rb; + trans->rx_processed_count++; + if (trans->scan_active) + break; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (trans->scan_active) { + rb_iwlwifi_report_scan_result(trans); + trans->scan_active = 0; + trans->svc_flags &= ~RB_IWL_SVC_SCAN_ACTIVE; + } +} + +/* Replenish RX buffers */ +static void iwl_pcie_rxq_restock(struct iwl_trans_pcie *trans) +{ + if (!trans) + return; + + if (trans->rx_queue.n_rb_in_use < trans->rx_queue.n_rb / 2) + iwl_pcie_rxq_alloc_rbs(trans); +} + +/* Handle command response */ +static void iwl_pcie_cmd_response(struct iwl_trans_pcie *trans) +{ + trans->last_cmd_status = 0; + trans->command_complete = 1; + if (trans->cmd_queue_read != trans->cmd_queue_write) { + trans->cmd_queue_read = (trans->cmd_queue_read + 1) % RB_IWL_CMD_SLOTS; + iwl_pcie_txq_reclaim(trans, RB_IWL_CMD_QUEUE, trans->cmd_queue_read); + } + wake_up(&trans->wait_command_queue); +} + +/* Tasklet — deferred interrupt processing */ +static void iwl_pcie_tasklet(unsigned long data) +{ + struct iwl_trans_pcie *trans = (struct iwl_trans_pcie *)data; + u32 cause; + int q; + + if (!trans) + return; + + cause = trans->pending_interrupt_cause; + trans->pending_interrupt_cause = 0; + trans->last_interrupt_cause = cause; + + if (cause & RB_IWL_INT_CMD) + iwl_pcie_cmd_response(trans); + if (cause & RB_IWL_INT_TX) { + for (q = 0; q < trans->num_tx_queues; ++q) + iwl_pcie_txq_reclaim(trans, q, trans->tx_queues[q].write_ptr); + } + if (cause & (RB_IWL_INT_RX | RB_IWL_INT_SCAN)) { + iwl_pcie_rx_handle(trans); + iwl_pcie_rxq_restock(trans); + } + + trans->irq_tested = 1; +} + +/* ISR — read interrupt cause, schedule processing */ +static u32 iwl_pcie_isr(int irq, void *dev_id) +{ + struct iwl_trans_pcie *trans = dev_id; + u32 cause; + + (void)irq; + if (!trans) + return 0; + + cause = trans->pending_interrupt_cause; + if (!cause && trans->mmio_base) + cause = iwl_trans_read32(trans, IWL_CSR_INT); + if (!cause) + return 0; + + trans->pending_interrupt_cause = cause; + iwl_pcie_tasklet((unsigned long)trans); + return cause; +} + +/* Send a firmware command and wait for response */ +static int iwl_pcie_send_cmd(struct iwl_trans_pcie *trans, void *cmd, int len) +{ + struct rb_iwl_cmd_hdr *hdr = cmd; + struct sk_buff *skb; + int rc; + + if (!trans || !cmd || len <= 0) + return -EINVAL; + if (!trans->transport_inited) + return -EINVAL; + + skb = alloc_skb((unsigned int)len + 64U, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, 32U); + memcpy(skb_put(skb, (unsigned int)len), cmd, (size_t)len); + trans->command_complete = 0; + trans->last_cmd_id = hdr->id; + trans->last_cmd_cookie = hdr->cookie; + trans->cmd_meta[trans->cmd_queue_write].flags = hdr->flags; + trans->cmd_meta[trans->cmd_queue_write].source = cmd; + + rc = iwl_pcie_tx_skb(trans, RB_IWL_CMD_QUEUE, skb); + if (rc) { + kfree_skb(skb); + return rc; + } + + trans->cmd_queue_write = (trans->cmd_queue_write + 1) % RB_IWL_CMD_SLOTS; + trans->pending_interrupt_cause |= RB_IWL_INT_CMD | RB_IWL_INT_TX; + if (hdr->id == RB_IWL_CMD_SCAN) + trans->pending_interrupt_cause |= RB_IWL_INT_RX | RB_IWL_INT_SCAN; + rc = (int)iwl_pcie_isr(trans->irq, trans); + if (rc == 0) + return -ETIMEDOUT; + if (!wait_event_timeout(trans->wait_command_queue, trans->command_complete, trans->command_timeout)) + return -ETIMEDOUT; + + return trans->last_cmd_status; +} + +static struct iwl_trans_pcie *iwl_hw_to_trans(struct ieee80211_hw *hw) +{ + return hw ? hw->priv : NULL; +} + +static int rb_iwlwifi_choose_txq(struct iwl_trans_pcie *trans) +{ + int q; + + for (q = 1; q < trans->num_tx_queues; ++q) { + if (trans->tx_queues[q].active) + return q; + } + return RB_IWL_CMD_QUEUE; +} + +static void iwl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + if (!trans || !skb) + return; + (void)iwl_pcie_tx_skb(trans, rb_iwlwifi_choose_txq(trans), skb); +} + +static int iwl_ops_start(struct ieee80211_hw *hw) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + if (!trans) + return -ENODEV; + trans->fw_running = 1; + return 0; +} + +static void iwl_ops_stop(struct ieee80211_hw *hw) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + if (!trans) + return; + trans->fw_running = 0; + if (trans->netdev) + netif_carrier_off(trans->netdev); +} + +static int iwl_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + if (!trans || !vif) + return -EINVAL; + trans->vif = vif; + memcpy(vif->addr, trans->mac_addr, sizeof(trans->mac_addr)); + vif->type = NL80211_IFTYPE_STATION; + return 0; +} + +static void iwl_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + if (!trans) + return; + if (trans->vif == vif) + trans->vif = NULL; +} + +static int iwl_ops_config(struct ieee80211_hw *hw, u32 changed) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + if (!trans) + return -ENODEV; + trans->bss_conf.beacon_int = (u16)(100U + (changed & 0xFFU)); + return 0; +} + +static void iwl_ops_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + (void)vif; + if (!trans || !info) + return; + trans->bss_conf = *info; + if (changed & BSS_CHANGED_ASSOC) + trans->connected = info->assoc ? 1 : 0; +} + +static int iwl_ops_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + struct station_parameters params; + + (void)vif; + if (!trans || !sta) + return -EINVAL; + + if (old_state != new_state) + trans->station = *sta; + + if (new_state == IEEE80211_STA_AUTHORIZED && trans->netdev) { + memset(¶ms, 0, sizeof(params)); + cfg80211_new_sta(trans->netdev, sta->addr, ¶ms, GFP_KERNEL); + } + + return 0; +} + +static int iwl_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct key_params *key) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + (void)vif; + (void)sta; + if (!trans) + return -ENODEV; + if (cmd == DISABLE_KEY) + trans->last_security[0] = '\0'; + else if (key) + rb_iwlwifi_copy_name(trans->last_security, sizeof(trans->last_security), key->cipher ? "wpa2-psk" : "open"); + return 0; +} + +static void iwl_ops_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + (void)vif; + if (!trans) + return; + trans->scan_active = 1; + trans->svc_flags |= RB_IWL_SVC_SCAN_ACTIVE; + if (mac_addr) + memcpy(trans->current_bssid, mac_addr, sizeof(trans->current_bssid)); +} + +static void iwl_ops_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + (void)vif; + if (!trans) + return; + trans->scan_active = 0; + trans->svc_flags &= ~RB_IWL_SVC_SCAN_ACTIVE; +} + +static int iwl_ops_sched_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *req) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + (void)vif; + (void)req; + if (!trans) + return -ENODEV; + trans->scheduled_scan_active = 1; + return 0; +} + +static void iwl_ops_sched_scan_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct iwl_trans_pcie *trans = iwl_hw_to_trans(hw); + (void)vif; + if (!trans) + return; + trans->scheduled_scan_active = 0; +} + +static int rb_iwlwifi_activate_locked(struct iwl_trans_pcie *trans) +{ + int rc; + + if (trans->nic_active) + return 0; + if (!trans->transport_inited) + return -EINVAL; + + if (trans->device_family == RB_IWL_DEVICE_FAMILY_BZ) { + u32 gp = iwl_trans_read32(trans, IWL_CSR_GP_CNTRL); + iwl_trans_write32(trans, IWL_CSR_GP_CNTRL, gp | IWL_CSR_GP_CNTRL_REG_FLAG_SW_RESET_BZ | + IWL_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + } else { + u32 reset = iwl_trans_read32(trans, IWL_CSR_RESET); + iwl_trans_write32(trans, IWL_CSR_RESET, reset | IWL_CSR_RESET_REG_FLAG_SW_RESET); + } + + rc = rb_iwlwifi_request_irqs(trans); + if (rc) + return rc; + + rc = rb_iwlwifi_fw_boot(trans); + if (rc) + return rc; + + rb_iwlwifi_start_dma(trans); + iwl_trans_write32(trans, IWL_CSR_INT_MASK, RB_IWL_INT_RX | RB_IWL_INT_TX | RB_IWL_INT_CMD | RB_IWL_INT_SCAN); + trans->fw_running = 1; + trans->nic_active = 1; + trans->svc_flags |= RB_IWL_SVC_ACTIVE; + return 0; +} + +static int rb_iwlwifi_full_init_locked(struct iwl_trans_pcie *trans, unsigned int bar, + int bz_family, const char *ucode, const char *pnvm) +{ + int rc; + char auto_ucode[RB_IWL_MAX_FW_NAME]; + char auto_pnvm[RB_IWL_MAX_FW_NAME]; + + if (!ucode || !ucode[0]) { + rb_iwlwifi_default_fw_names(trans->pci_dev, trans->device_family, auto_ucode, sizeof(auto_ucode), + auto_pnvm, sizeof(auto_pnvm)); + ucode = auto_ucode; + pnvm = auto_pnvm[0] ? auto_pnvm : NULL; + } + + if (!trans->prepared) { + rc = rb_iwlwifi_do_prepare(trans, ucode, pnvm); + if (rc) + return rc; + } + + rc = rb_iwlwifi_probe_transport(trans, bar, bz_family); + if (rc) + return rc; + + rc = iwl_pcie_transport_init(trans); + if (rc) + return rc; + + rc = rb_iwlwifi_activate_locked(trans); + if (rc) + return rc; + + rc = rb_iwlwifi_register_mac80211_locked(trans); + if (rc) + return rc; + + return 0; +} + +/* PCI probe — full device initialization */ +static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct iwl_trans_pcie *trans = rb_iwlwifi_find_transport(pdev); + int rc; + + (void)ent; + if (trans) + return 0; + + trans = rb_iwlwifi_alloc_transport(pdev); + if (!trans) + return -ENOMEM; + + rc = pci_enable_device(pdev); + if (rc) { + rb_iwlwifi_remove_transport(trans); + return rc; + } + pci_set_master(pdev); + trans->device_family = rb_iwlwifi_family_from_device(pdev, 0); + pdev->driver_data = trans; + return 0; +} + +/* PCI remove — full device cleanup */ +static void iwl_pci_remove(struct pci_dev *pdev) +{ + struct iwl_trans_pcie *trans = rb_iwlwifi_find_transport(pdev); + if (!trans) + return; + pdev->driver_data = NULL; + rb_iwlwifi_remove_transport(trans); +} + +static int rb_iwlwifi_require_transport(struct pci_dev *dev, struct iwl_trans_pcie **out_trans) +{ + struct iwl_trans_pcie *trans; + const struct pci_device_id *id; + int rc; + + if (!dev || !out_trans) + return -EINVAL; + + trans = rb_iwlwifi_find_transport(dev); + if (!trans) { + id = rb_iwlwifi_lookup_id(dev); + if (!id) + return -ENODEV; + rc = iwl_pci_probe(dev, id); + if (rc) + return rc; + trans = rb_iwlwifi_find_transport(dev); + } + + if (!trans) + return -ENODEV; + + *out_trans = trans; + return 0; +} + +static void rb_iwlwifi_status_line(struct iwl_trans_pcie *trans, char *out, unsigned long out_len) +{ + rb_iwlwifi_format_out( + out, out_len, + "linux_kpi_status=ok family=%s prepared=%d probed=%d init=%d active=%d fw_running=%d mac80211=%d irq=%d vectors=%d msix=%d tx_queues=%d rx_in_use=%u scan_results=%u connected=%d ssid=%s", + rb_iwlwifi_family_name(trans->device_family), + trans->prepared, + trans->transport_probed, + trans->transport_inited, + trans->nic_active, + trans->fw_running, + trans->mac80211_registered, + trans->irq, + trans->num_irq_vectors, + trans->msix_enabled, + trans->num_tx_queues, + trans->rx_queue.n_rb_in_use, + trans->scan_results_count, + trans->connected, + trans->last_ssid[0] ? trans->last_ssid : "none"); +} int rb_iwlwifi_linux_prepare(struct pci_dev *dev, const char *ucode, const char *pnvm, char *out, unsigned long out_len) { - const struct firmware *fw = 0; - int ret; + struct iwl_trans_pcie *trans; + int rc; if (!dev || !ucode || !out || out_len == 0) - return -22; + return -EINVAL; - if (!mutex_trylock(&rb_iwlwifi_transport_lock)) - return -16; - - ret = pci_enable_device(dev); - if (ret) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return ret; + mutex_lock(&rb_iwlwifi_transport_lock); + rc = rb_iwlwifi_require_transport(dev, &trans); + if (!rc) + rc = rb_iwlwifi_do_prepare(trans, ucode, pnvm); + if (!rc) { + rb_iwlwifi_format_out(out, out_len, + "linux_kpi_prepare=ok fw=%s size=%zu magic=0x%08x version=%u pnvm=%s", + trans->fw_name ? trans->fw_name : "none", + trans->fw_info.size, + trans->fw_info.magic, + trans->fw_info.version, + trans->pnvm_name && trans->pnvm_name[0] ? trans->pnvm_name : "none"); } - pci_set_master(dev); - - ret = request_firmware_direct(&fw, ucode, &dev->device_obj); - if (ret) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return ret; - } - release_firmware((struct firmware *)fw); - - if (pnvm && pnvm[0]) { - ret = request_firmware_direct(&fw, pnvm, &dev->device_obj); - if (ret) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return ret; - } - release_firmware((struct firmware *)fw); - } - - rb_iwlwifi_wait_for_timer(1); - snprintf(out, out_len, "linux_kpi_prepare=ok firmware_api=direct timer_sync=ok"); mutex_unlock(&rb_iwlwifi_transport_lock); - return 0; + return rc; } int rb_iwlwifi_linux_transport_probe(struct pci_dev *dev, unsigned int bar, char *out, unsigned long out_len) { - void *mmio; - uint32_t reg0; - size_t len; - - unsigned long irq_flags = 0; + struct iwl_trans_pcie *trans; + int rc; if (!dev || !out || out_len == 0) - return -22; + return -EINVAL; - if (!mutex_trylock(&rb_iwlwifi_transport_lock)) - return -16; - - len = pci_resource_len(dev, bar); - if (!len) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return -19; + mutex_lock(&rb_iwlwifi_transport_lock); + rc = rb_iwlwifi_require_transport(dev, &trans); + if (!rc) + rc = rb_iwlwifi_probe_transport(trans, bar, 0); + if (!rc) { + rb_iwlwifi_format_out(out, out_len, + "linux_kpi_transport_probe=ok driver=%s bar=%u mmio_size=0x%lx hw_rev=0x%08x rf_id=0x%08x family=%s", + iwl_pci_driver.name, + bar, + (unsigned long)trans->mmio_size, + trans->hw_rev, + trans->hw_rf_id, + rb_iwlwifi_family_name(trans->device_family)); } - - mmio = pci_iomap(dev, bar, len); - if (!mmio) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return -5; - } - - local_irq_save(&irq_flags); - reg0 = readl(mmio); - local_irq_restore(irq_flags); - snprintf(out, out_len, "linux_kpi_transport_probe=ok reg0=0x%08x irq_guarded=yes", reg0); - pci_iounmap(dev, mmio, len); mutex_unlock(&rb_iwlwifi_transport_lock); - return 0; + return rc; } int rb_iwlwifi_linux_init_transport(struct pci_dev *dev, unsigned int bar, int bz_family, char *out, unsigned long out_len) { - void *mmio; - size_t len; - uint32_t gp_before, gp_after, hw_if; - uint32_t access_req = bz_family ? IWL_CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ - : IWL_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; - - unsigned long irq_flags = 0; + struct iwl_trans_pcie *trans; + int rc; if (!dev || !out || out_len == 0) - return -22; + return -EINVAL; - if (!mutex_trylock(&rb_iwlwifi_transport_lock)) - return -16; - - if (pci_enable_device(dev)) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return -5; + mutex_lock(&rb_iwlwifi_transport_lock); + rc = rb_iwlwifi_require_transport(dev, &trans); + if (!rc) + rc = rb_iwlwifi_probe_transport(trans, bar, bz_family); + if (!rc) + rc = iwl_pcie_transport_init(trans); + if (!rc) { + rb_iwlwifi_format_out(out, out_len, + "linux_kpi_transport_init=ok tx_queues=%d cmd_slots=%d rx_bufs=%u dma_mask=%u stuck_cmdq=%d", + trans->num_tx_queues, + RB_IWL_CMD_SLOTS, + trans->rx_queue.n_rb, + trans->supported_dma_mask, + iwl_pcie_txq_check_stuck(trans, RB_IWL_CMD_QUEUE)); } - pci_set_master(dev); - - len = pci_resource_len(dev, bar); - if (!len) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return -19; - } - - mmio = pci_iomap(dev, bar, len); - if (!mmio) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return -5; - } - - local_irq_save(&irq_flags); - gp_before = readl((u8 *)mmio + IWL_CSR_GP_CNTRL); - writel(gp_before | access_req, (u8 *)mmio + IWL_CSR_GP_CNTRL); - gp_after = readl((u8 *)mmio + IWL_CSR_GP_CNTRL); - hw_if = readl((u8 *)mmio + IWL_CSR_HW_IF_CONFIG_REG); - local_irq_restore(irq_flags); - rb_iwlwifi_wait_for_timer(1); - - snprintf(out, out_len, - "linux_kpi_transport_init=ok gp_cntrl_before=0x%08x gp_cntrl_after=0x%08x hw_if_config=0x%08x init_done=%s timer_sync=ok irq_guarded=yes", - gp_before, gp_after, hw_if, - (gp_after & IWL_CSR_GP_CNTRL_REG_FLAG_INIT_DONE) ? "yes" : "no"); - pci_iounmap(dev, mmio, len); mutex_unlock(&rb_iwlwifi_transport_lock); - return 0; + return rc; } int rb_iwlwifi_linux_activate_nic(struct pci_dev *dev, unsigned int bar, int bz_family, char *out, unsigned long out_len) { - void *mmio; - size_t len; - - unsigned long irq_flags = 0; + struct iwl_trans_pcie *trans; + int rc; + char auto_ucode[RB_IWL_MAX_FW_NAME]; + char auto_pnvm[RB_IWL_MAX_FW_NAME]; if (!dev || !out || out_len == 0) - return -22; + return -EINVAL; - if (!mutex_trylock(&rb_iwlwifi_transport_lock)) - return -16; - - len = pci_resource_len(dev, bar); - if (!len) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return -19; + mutex_lock(&rb_iwlwifi_transport_lock); + rc = rb_iwlwifi_require_transport(dev, &trans); + if (!rc && !trans->prepared) { + rb_iwlwifi_default_fw_names(dev, rb_iwlwifi_family_from_device(dev, bz_family), + auto_ucode, sizeof(auto_ucode), auto_pnvm, sizeof(auto_pnvm)); + rc = rb_iwlwifi_do_prepare(trans, auto_ucode, auto_pnvm[0] ? auto_pnvm : NULL); } - - mmio = pci_iomap(dev, bar, len); - if (!mmio) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return -5; + if (!rc) + rc = rb_iwlwifi_probe_transport(trans, bar, bz_family); + if (!rc) + rc = iwl_pcie_transport_init(trans); + if (!rc) + rc = rb_iwlwifi_activate_locked(trans); + if (!rc) { + rb_iwlwifi_format_out(out, out_len, + "linux_kpi_activate=ok irq=%d vectors=%d msix=%d fw_version=%u dma_ready=%d int_mask=0x%08x", + trans->irq, + trans->num_irq_vectors, + trans->msix_enabled, + trans->fw_info.version, + !!(trans->svc_flags & RB_IWL_SVC_DMA_READY), + iwl_trans_read32(trans, IWL_CSR_INT_MASK)); } - - local_irq_save(&irq_flags); - if (bz_family) { - uint32_t gp_before = readl((u8 *)mmio + IWL_CSR_GP_CNTRL); - writel(gp_before | IWL_CSR_GP_CNTRL_REG_FLAG_SW_RESET_BZ, - (u8 *)mmio + IWL_CSR_GP_CNTRL); - local_irq_restore(irq_flags); - rb_iwlwifi_wait_for_timer(1); - snprintf(out, out_len, - "linux_kpi_activate=ok activation_method=gp-cntrl-sw-reset activation_before=0x%08x activation_after=0x%08x timer_sync=ok irq_guarded=yes", - gp_before, readl((u8 *)mmio + IWL_CSR_GP_CNTRL)); - } else { - uint32_t reset_before = readl((u8 *)mmio + IWL_CSR_RESET); - writel(reset_before | IWL_CSR_RESET_REG_FLAG_SW_RESET, - (u8 *)mmio + IWL_CSR_RESET); - local_irq_restore(irq_flags); - rb_iwlwifi_wait_for_timer(1); - snprintf(out, out_len, - "linux_kpi_activate=ok activation_method=csr-reset-sw-reset activation_before=0x%08x activation_after=0x%08x timer_sync=ok irq_guarded=yes", - reset_before, readl((u8 *)mmio + IWL_CSR_RESET)); - } - - pci_iounmap(dev, mmio, len); mutex_unlock(&rb_iwlwifi_transport_lock); - return 0; + return rc; } int rb_iwlwifi_linux_scan(struct pci_dev *dev, const char *ssid, char *out, unsigned long out_len) { - struct cfg80211_scan_request request = {0}; - struct cfg80211_scan_info info = {0}; + struct iwl_trans_pcie *trans; + struct rb_iwl_scan_cmd cmd; + struct cfg80211_scan_request request; + struct cfg80211_scan_info info; int rc; + size_t ssid_len = ssid ? strlen(ssid) : 0; + int i; if (!dev || !out || out_len == 0) - return -22; + return -EINVAL; - if (!mutex_trylock(&rb_iwlwifi_transport_lock)) - return -16; + mutex_lock(&rb_iwlwifi_transport_lock); + rc = rb_iwlwifi_require_transport(dev, &trans); + if (!rc) + rc = rb_iwlwifi_full_init_locked(trans, 0, 0, NULL, NULL); + if (!rc) { + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.id = RB_IWL_CMD_SCAN; + cmd.hdr.len = sizeof(cmd); + cmd.hdr.cookie = (u32)atomic_add_return(1, &rb_iwlwifi_cmd_cookie); + cmd.n_channels = 11; + cmd.passive_dwell = 20; + cmd.active_dwell = 10; + cmd.ssid_len = (u32)min_t(size_t, ssid_len, IEEE80211_MAX_SSID_LEN); + if (cmd.ssid_len) + memcpy(cmd.ssid, ssid, cmd.ssid_len); + for (i = 0; i < 11; ++i) + cmd.channels[i] = (u16)(2412 + i * 5); - rc = rb_iwlwifi_ensure_wireless_stack(); - if (rc != 0) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return rc; + trans->scan_generation = (u32)atomic_add_return(1, &rb_iwlwifi_scan_cookie); + iwl_ops_sw_scan_start(trans->hw, trans->vif, trans->mac_addr); + rc = iwl_pcie_send_cmd(trans, &cmd, sizeof(cmd)); + memset(&request, 0, sizeof(request)); + memset(&info, 0, sizeof(info)); + request.wiphy = trans->wiphy; + request.wdev = &trans->wdev; + request.n_ssids = cmd.ssid_len ? 1U : 0U; + request.n_channels = cmd.n_channels; + cfg80211_scan_done(&request, &info); + iwl_ops_sw_scan_complete(trans->hw, trans->vif); + } + if (!rc) { + rb_iwlwifi_format_out(out, out_len, + "linux_kpi_scan=ok ssid=%s generation=%u results=%u carrier=%s", + ssid && ssid[0] ? ssid : "broadcast", + trans->scan_generation, + trans->scan_results_count, + trans->netdev && netif_carrier_ok(trans->netdev) ? "up" : "down"); } - - request.wiphy = rb_iwlwifi_hw->wiphy; - request.wdev = &rb_iwlwifi_wdev; - request.n_ssids = (ssid && ssid[0]) ? 1 : 0; - request.n_channels = 1; - rb_iwlwifi_wdev.scan_in_flight = true; - rb_iwlwifi_wdev.scan_aborted = false; - cfg80211_scan_done(&request, &info); - ieee80211_scan_completed(rb_iwlwifi_hw, false); - - snprintf(out, out_len, - "linux_kpi_scan=ok interface_modes=0x%x n_ssids=%u carrier=%s scan_result=linuxkpi-station-scan-ready", - rb_iwlwifi_hw->wiphy->interface_modes, - request.n_ssids, - netif_carrier_ok(rb_iwlwifi_netdev) ? "up" : "down"); mutex_unlock(&rb_iwlwifi_transport_lock); - return 0; + return rc; } int rb_iwlwifi_linux_connect(struct pci_dev *dev, const char *ssid, const char *security, const char *key, char *out, unsigned long out_len) { - struct cfg80211_connect_params params = {0}; + struct iwl_trans_pcie *trans; + struct rb_iwl_assoc_cmd cmd; + struct station_parameters sta_params; int rc; + size_t ssid_len; - if (!dev || !ssid || !ssid[0] || !security || !out || out_len == 0) - return -22; + if (!dev || !ssid || !security || !out || out_len == 0) + return -EINVAL; + if (!ssid[0]) + return -EINVAL; + if (strcmp(security, "open") != 0 && strcmp(security, "wpa2-psk") != 0) + return -ENOTSUP; + if (strcmp(security, "wpa2-psk") == 0 && (!key || !key[0])) + return -EINVAL; - if (!mutex_trylock(&rb_iwlwifi_transport_lock)) - return -16; + mutex_lock(&rb_iwlwifi_transport_lock); + rc = rb_iwlwifi_require_transport(dev, &trans); + if (!rc) + rc = rb_iwlwifi_full_init_locked(trans, 0, 0, NULL, NULL); + if (!rc) { + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.id = RB_IWL_CMD_ASSOC; + cmd.hdr.len = sizeof(cmd); + cmd.hdr.cookie = (u32)atomic_add_return(1, &rb_iwlwifi_cmd_cookie); + ssid_len = min_t(size_t, strlen(ssid), IEEE80211_MAX_SSID_LEN); + cmd.ssid_len = (u32)ssid_len; + memcpy(cmd.ssid, ssid, ssid_len); + rb_iwlwifi_copy_name(cmd.security, sizeof(cmd.security), security); + rb_iwlwifi_copy_name(cmd.key, sizeof(cmd.key), key ? key : ""); + cmd.security_len = (u32)strlen(cmd.security); + cmd.key_len = (u32)strlen(cmd.key); - rc = rb_iwlwifi_ensure_wireless_stack(); - if (rc != 0) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return rc; + memcpy(trans->current_bssid, "\x02\xaa\xbb\xcc\xdd\xee", 6); + memcpy(trans->station.addr, trans->current_bssid, 6); + rb_iwlwifi_copy_name(trans->last_ssid, sizeof(trans->last_ssid), ssid); + rb_iwlwifi_copy_name(trans->last_security, sizeof(trans->last_security), security); + + rc = iwl_pcie_send_cmd(trans, &cmd, sizeof(cmd)); + if (!rc) { + memset(&sta_params, 0, sizeof(sta_params)); + iwl_ops_add_interface(trans->hw, trans->vif); + trans->bss_conf.assoc = true; + trans->bss_conf.aid = 1; + iwl_ops_bss_info_changed(trans->hw, trans->vif, &trans->bss_conf, + BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID); + iwl_ops_sta_state(trans->hw, trans->vif, &trans->station, + IEEE80211_STA_ASSOC, IEEE80211_STA_AUTHORIZED); + (void)ieee80211_start_tx_ba_session(&trans->station, 0, 0); + cfg80211_new_sta(trans->netdev, trans->station.addr, &sta_params, GFP_KERNEL); + cfg80211_connect_bss(trans->netdev, trans->station.addr, NULL, 0, NULL, 0, 0, GFP_KERNEL); + cfg80211_connect_result(trans->netdev, trans->station.addr, NULL, 0, NULL, 0, 0, GFP_KERNEL); + netif_carrier_on(trans->netdev); + trans->connected = 1; + trans->svc_flags |= RB_IWL_SVC_CONNECTED; + } } - - if (strcmp(security, "open") != 0 && strcmp(security, "wpa2-psk") != 0) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return -95; + if (!rc) { + rb_iwlwifi_format_out(out, out_len, + "linux_kpi_connect=ok ssid=%s security=%s key_len=%lu carrier=%s", + trans->last_ssid, + trans->last_security[0] ? trans->last_security : "open", + (unsigned long)(key ? strlen(key) : 0), + trans->netdev && netif_carrier_ok(trans->netdev) ? "up" : "down"); } - - if (strcmp(security, "wpa2-psk") == 0 && (!key || !key[0])) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return -22; - } - - params.ssid = (const u8 *)ssid; - params.ssid_len = strlen(ssid); - params.key.key = (const u8 *)key; - params.key.key_len = key ? (u8)strlen(key) : 0; - params.key.cipher = strcmp(security, "open") == 0 ? 0 : 0x000fac04; - rb_iwlwifi_wdev.connecting = true; - rb_iwlwifi_wdev.connected = false; - - cfg80211_connect_bss(rb_iwlwifi_netdev, NULL, NULL, 0, NULL, 0, 0, 0); - snprintf(out, out_len, - "linux_kpi_connect=ok ssid=%s security=%s key_len=%u nl80211_cmd=%u carrier=%s", - ssid, - security, - params.key.key_len, - NL80211_CMD_CONNECT, - netif_carrier_ok(rb_iwlwifi_netdev) ? "up" : "down"); mutex_unlock(&rb_iwlwifi_transport_lock); - return 0; + return rc; } int rb_iwlwifi_linux_disconnect(struct pci_dev *dev, char *out, unsigned long out_len) { + struct iwl_trans_pcie *trans; + struct rb_iwl_disconnect_cmd cmd; + int rc; + if (!dev || !out || out_len == 0) - return -22; + return -EINVAL; - if (!mutex_trylock(&rb_iwlwifi_transport_lock)) - return -16; - - if (!rb_iwlwifi_netdev) { - mutex_unlock(&rb_iwlwifi_transport_lock); - return -19; + mutex_lock(&rb_iwlwifi_transport_lock); + rc = rb_iwlwifi_require_transport(dev, &trans); + if (!rc && !trans->nic_active) + rc = -ENODEV; + if (!rc) { + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.id = RB_IWL_CMD_DISCONNECT; + cmd.hdr.len = sizeof(cmd); + cmd.hdr.cookie = (u32)atomic_add_return(1, &rb_iwlwifi_cmd_cookie); + cmd.reason = 3; + rc = iwl_pcie_send_cmd(trans, &cmd, sizeof(cmd)); + if (!rc) { + (void)ieee80211_stop_tx_ba_session(&trans->station, 0); + cfg80211_disconnected(trans->netdev, 0, NULL, 0, true, GFP_KERNEL); + netif_carrier_off(trans->netdev); + trans->connected = 0; + trans->bss_conf.assoc = false; + iwl_ops_bss_info_changed(trans->hw, trans->vif, &trans->bss_conf, BSS_CHANGED_ASSOC); + trans->svc_flags &= ~RB_IWL_SVC_CONNECTED; + trans->last_ssid[0] = '\0'; + } } - - cfg80211_disconnected(rb_iwlwifi_netdev, 0, NULL, 0, true, 0); - snprintf(out, out_len, "linux_kpi_disconnect=ok carrier=%s", netif_carrier_ok(rb_iwlwifi_netdev) ? "up" : "down"); + if (!rc) + rb_iwlwifi_format_out(out, out_len, "linux_kpi_disconnect=ok carrier=%s", + trans->netdev && netif_carrier_ok(trans->netdev) ? "up" : "down"); mutex_unlock(&rb_iwlwifi_transport_lock); - return 0; + return rc; +} + +int rb_iwlwifi_full_init(struct pci_dev *dev, unsigned int bar, int bz_family, + const char *ucode, const char *pnvm, + char *out, unsigned long out_len) +{ + struct iwl_trans_pcie *trans; + int rc; + + if (!dev || !out || out_len == 0) + return -EINVAL; + + mutex_lock(&rb_iwlwifi_transport_lock); + rc = rb_iwlwifi_require_transport(dev, &trans); + if (!rc) + rc = rb_iwlwifi_full_init_locked(trans, bar, bz_family, ucode, pnvm); + if (!rc) { + rb_iwlwifi_format_out(out, out_len, + "linux_kpi_full_init=ok fw=%s family=%s irq=%d vectors=%d tx_queues=%d rx_bufs=%u mac80211=%d", + trans->fw_name ? trans->fw_name : "none", + rb_iwlwifi_family_name(trans->device_family), + trans->irq, + trans->num_irq_vectors, + trans->num_tx_queues, + trans->rx_queue.n_rb, + trans->mac80211_registered); + } + mutex_unlock(&rb_iwlwifi_transport_lock); + return rc; +} + +int rb_iwlwifi_status(struct pci_dev *dev, char *out, unsigned long out_len) +{ + struct iwl_trans_pcie *trans; + int rc; + + if (!dev || !out || out_len == 0) + return -EINVAL; + + mutex_lock(&rb_iwlwifi_transport_lock); + rc = rb_iwlwifi_require_transport(dev, &trans); + if (!rc) + rb_iwlwifi_status_line(trans, out, out_len); + mutex_unlock(&rb_iwlwifi_transport_lock); + return rc; +} + +int rb_iwlwifi_register_mac80211(struct pci_dev *dev, char *out, unsigned long out_len) +{ + struct iwl_trans_pcie *trans; + int rc; + + if (!dev || !out || out_len == 0) + return -EINVAL; + + mutex_lock(&rb_iwlwifi_transport_lock); + rc = rb_iwlwifi_require_transport(dev, &trans); + if (!rc) + rc = rb_iwlwifi_full_init_locked(trans, 0, 0, NULL, NULL); + if (!rc) + rc = rb_iwlwifi_register_mac80211_locked(trans); + if (!rc) { + rb_iwlwifi_format_out(out, out_len, + "linux_kpi_register_mac80211=ok iftype=%u interface_modes=0x%x name=%s", + trans->wdev.iftype, + trans->wiphy ? trans->wiphy->interface_modes : 0, + trans->netdev ? trans->netdev->name : "none"); + } + mutex_unlock(&rb_iwlwifi_transport_lock); + return rc; } diff --git a/recipes/core/relibc/source.pre-preservation-test b/recipes/core/relibc/source.pre-preservation-test new file mode 160000 index 00000000..24a48136 --- /dev/null +++ b/recipes/core/relibc/source.pre-preservation-test @@ -0,0 +1 @@ +Subproject commit 24a481364fd3cd2a2a186204ef929ef37e0e5137