Expand linux-kpi wireless scaffolding, consolidate desktop plan, remove historical report

Add channel/band/rate/BSS/RX-TX structures to linux-kpi wireless
scaffolding (mac80211.rs, wireless.rs, net.rs, C headers), extend
redbear-iwlwifi linux_port.c with comprehensive PCIe transport, and
create consolidated CONSOLE-TO-KDE-DESKTOP-PLAN.md as the canonical
desktop path document. Remove stale INTEGRATION_REPORT.md (1388 lines)
in favor of current local/docs/ references. Update AGENTS.md, README,
and docs index to point to the new plan.
This commit is contained in:
2026-04-16 13:52:09 +01:00
parent e2f20aacb6
commit 6c2643bd9c
36 changed files with 5230 additions and 1757 deletions
@@ -42,6 +42,26 @@ static inline void atomic_sub(int i, atomic_t *v)
__sync_fetch_and_sub(&v->counter, i);
}
static inline int atomic_inc_and_test(atomic_t *v)
{
return __sync_add_and_fetch(&v->counter, 1) == 0;
}
static inline int atomic_dec_and_test(atomic_t *v)
{
return __sync_sub_and_fetch(&v->counter, 1) == 0;
}
static inline int atomic_add_return(int i, atomic_t *v)
{
return __sync_add_and_fetch(&v->counter, i);
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
return __sync_sub_and_fetch(&v->counter, i);
}
static inline int atomic_inc_return(atomic_t *v)
{
return __sync_add_and_fetch(&v->counter, 1);
@@ -72,11 +92,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static inline int atomic_dec_and_test(atomic_t *v)
{
return __sync_sub_and_fetch(&v->counter, 1) == 0;
}
#define smp_mb() __sync_synchronize()
#define smp_rmb() __sync_synchronize()
#define smp_wmb() __sync_synchronize()
@@ -1,7 +1,8 @@
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
#include <linux/types.h>
#include "types.h"
#include <stddef.h>
enum dma_data_direction {
DMA_BIDIRECTIONAL = 0,
@@ -10,6 +11,8 @@ enum dma_data_direction {
DMA_NONE = 3,
};
struct dma_pool;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL << (n)) - 1))
extern void *dma_alloc_coherent(void *dev, size_t size,
@@ -21,6 +24,14 @@ extern dma_addr_t dma_map_single(void *dev, void *ptr, size_t size,
enum dma_data_direction dir);
extern void dma_unmap_single(void *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir);
extern struct dma_pool *dma_pool_create(const char *name, void *dev, size_t size, size_t align, size_t boundary);
extern void dma_pool_destroy(struct dma_pool *pool);
extern void *dma_pool_alloc(struct dma_pool *pool, gfp_t flags, dma_addr_t *handle);
extern void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
extern void dma_sync_single_for_cpu(void *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
extern void dma_sync_single_for_device(void *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
extern dma_addr_t dma_map_page(void *dev, void *page, size_t offset, size_t size, enum dma_data_direction dir);
extern void dma_unmap_page(void *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
static inline int dma_mapping_error(void *dev, dma_addr_t addr)
{
@@ -2,7 +2,7 @@
#define _LINUX_FIRMWARE_H
#include <stddef.h>
#include <linux/types.h>
#include "types.h"
struct firmware {
size_t size;
@@ -1,9 +1,9 @@
#ifndef _LINUX_INTERRUPT_H
#define _LINUX_INTERRUPT_H
#include <linux/types.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include "types.h"
#include "irq.h"
#include "spinlock.h"
extern void local_irq_save(unsigned long *flags);
extern void local_irq_restore(unsigned long flags);
@@ -1,7 +1,7 @@
#ifndef _LINUX_IO_H
#define _LINUX_IO_H
#include <linux/types.h>
#include "types.h"
#include <stddef.h>
extern void *ioremap(phys_addr_t phys_addr, size_t size);
@@ -31,6 +31,21 @@ static inline void memset_io(void *dst, int c, size_t count)
__builtin_memset(dst, c, count);
}
static inline void mb(void)
{
__sync_synchronize();
}
static inline void rmb(void)
{
__sync_synchronize();
}
static inline void wmb(void)
{
__sync_synchronize();
}
#define ioread8(addr) readb(addr)
#define ioread16(addr) readw(addr)
#define ioread32(addr) readl(addr)
@@ -1,7 +1,7 @@
#ifndef _LINUX_IRQ_H
#define _LINUX_IRQ_H
#include <linux/types.h>
#include "types.h"
typedef unsigned int irqreturn_t;
@@ -1,7 +1,7 @@
#ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H
#include <linux/types.h>
#include "types.h"
#include <time.h>
static inline u64 redox_get_jiffies(void)
@@ -1,7 +1,7 @@
#ifndef _LINUX_MUTEX_H
#define _LINUX_MUTEX_H
#include <linux/types.h>
#include "types.h"
struct mutex {
unsigned char __opaque[64];
@@ -1,9 +1,9 @@
#ifndef _LINUX_PCI_H
#define _LINUX_PCI_H
#include <linux/types.h>
#include <linux/device.h>
#include <linux/io.h>
#include "types.h"
#include "device.h"
#include "io.h"
#include <stddef.h>
#define PCI_VENDOR_ID_AMD 0x1002U
@@ -12,6 +12,12 @@
#define PCI_ANY_ID (~0U)
/* MSI/MSI-X support */
#define PCI_IRQ_MSI 1U
#define PCI_IRQ_MSIX 2U
#define PCI_IRQ_LEGACY 4U
#define PCI_IRQ_NOLEGACY 8U
struct pci_device_id {
u32 vendor;
u32 device;
@@ -49,6 +55,11 @@ struct pci_driver {
extern int pci_enable_device(struct pci_dev *dev);
extern void pci_disable_device(struct pci_dev *dev);
extern void pci_set_master(struct pci_dev *dev);
extern int pci_alloc_irq_vectors(struct pci_dev *dev, int min_vecs, int max_vecs, unsigned int flags);
extern void pci_free_irq_vectors(struct pci_dev *dev);
extern int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
extern int pci_enable_msi(struct pci_dev *dev);
extern void pci_disable_msi(struct pci_dev *dev);
extern void *pci_iomap(struct pci_dev *dev, unsigned int bar, size_t max_len);
extern void pci_iounmap(struct pci_dev *dev, void *addr, size_t size);
@@ -0,0 +1,6 @@
#ifndef _LINUX_SKB_H
#define _LINUX_SKB_H
#include "skbuff.h"
#endif
@@ -3,6 +3,8 @@
#include "types.h"
struct net_device;
struct sk_buff {
void *head;
void *data;
@@ -11,6 +13,12 @@ struct sk_buff {
unsigned int end;
};
struct sk_buff_head {
struct sk_buff *next;
struct sk_buff *prev;
u32 qlen;
};
extern struct sk_buff *alloc_skb(unsigned int size, gfp_t gfp_mask);
extern void kfree_skb(struct sk_buff *skb);
extern void skb_reserve(struct sk_buff *skb, unsigned int len);
@@ -20,5 +28,14 @@ extern void *skb_pull(struct sk_buff *skb, unsigned int len);
extern unsigned int skb_headroom(const struct sk_buff *skb);
extern unsigned int skb_tailroom(const struct sk_buff *skb);
extern void skb_trim(struct sk_buff *skb, unsigned int len);
extern void skb_queue_head_init(struct sk_buff_head *list);
extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
extern void skb_queue_purge(struct sk_buff_head *list);
extern u32 skb_queue_len(const struct sk_buff_head *list);
extern int skb_queue_empty(const struct sk_buff_head *list);
extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, u32 length, gfp_t gfp_mask);
extern struct sk_buff *skb_copy(const struct sk_buff *src, gfp_t gfp);
extern struct sk_buff *skb_clone(const struct sk_buff *skb, gfp_t gfp);
#endif
@@ -1,7 +1,7 @@
#ifndef _LINUX_SPINLOCK_H
#define _LINUX_SPINLOCK_H
#include <linux/types.h>
#include "types.h"
typedef struct spinlock {
volatile unsigned char __locked;
@@ -1,8 +1,8 @@
#ifndef _LINUX_TIMER_H
#define _LINUX_TIMER_H
#include <linux/types.h>
#include <linux/compiler.h>
#include "types.h"
#include "compiler.h"
struct timer_list {
void (*function)(unsigned long data);
@@ -1,13 +1,15 @@
#ifndef _LINUX_WAIT_H
#define _LINUX_WAIT_H
#include <linux/types.h>
#include <linux/compiler.h>
#include "types.h"
#include "compiler.h"
struct wait_queue_head {
unsigned char __opaque[128];
};
typedef struct wait_queue_head wait_queue_head_t;
static inline void init_waitqueue_head(struct wait_queue_head *wq)
{
(void)wq;
@@ -98,6 +98,11 @@ extern void cfg80211_connect_bss(struct net_device *dev,
size_t resp_ie_len,
u16 status,
gfp_t gfp);
extern void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
struct station_parameters *params, gfp_t gfp);
extern void cfg80211_rx_mgmt(struct wireless_dev *wdev, u32 freq, int sig_dbm,
const u8 *buf, size_t len, gfp_t gfp);
extern void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid);
extern void cfg80211_ready_on_channel(struct wireless_dev *wdev,
u64 cookie,
struct ieee80211_channel *chan,
@@ -34,6 +34,48 @@ struct ieee80211_bss_conf {
u16 beacon_int;
};
enum ieee80211_sta_state {
IEEE80211_STA_NOTEXIST,
IEEE80211_STA_NONE,
IEEE80211_STA_AUTH,
IEEE80211_STA_ASSOC,
IEEE80211_STA_AUTHORIZED,
};
enum set_key_cmd {
SET_KEY,
DISABLE_KEY,
};
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
int (*start)(struct ieee80211_hw *hw);
void (*stop)(struct ieee80211_hw *hw);
int (*add_interface)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void (*remove_interface)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int (*config)(struct ieee80211_hw *hw, u32 changed);
void (*bss_info_changed)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed);
int (*sta_state)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state);
int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct key_params *key);
void (*sw_scan_start)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr);
void (*sw_scan_complete)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int (*sched_scan_start)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *req);
void (*sched_scan_stop)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
};
#define BSS_CHANGED_ASSOC (1U << 0)
#define BSS_CHANGED_BSSID (1U << 1)
#define BSS_CHANGED_ERP_CTS_PROT (1U << 2)
#define BSS_CHANGED_HT (1U << 3)
#define BSS_CHANGED_BASIC_RATES (1U << 4)
#define BSS_CHANGED_BEACON_INT (1U << 5)
#define BSS_CHANGED_BANDWIDTH (1U << 6)
extern struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
const void *ops,
const char *requested_name);
@@ -43,5 +85,8 @@ extern void ieee80211_unregister_hw(struct ieee80211_hw *hw);
extern void ieee80211_queue_work(struct ieee80211_hw *hw, void *work);
extern void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted);
extern void ieee80211_connection_loss(struct ieee80211_vif *vif);
extern int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid, u16 timeout);
extern int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
extern void ieee80211_beacon_loss(struct ieee80211_vif *vif);
#endif
@@ -11,6 +11,7 @@ pub use rust_impl::drm_shim;
pub use rust_impl::firmware;
pub use rust_impl::io;
pub use rust_impl::irq;
pub use rust_impl::list;
pub use rust_impl::mac80211;
pub use rust_impl::memory;
pub use rust_impl::net;
@@ -1,27 +1,104 @@
use std::alloc::{alloc_zeroed, dealloc, Layout};
use std::ffi::{c_char, c_void, CStr};
use std::ptr;
use syscall::CallFlags;
use std::sync::atomic::{fence, Ordering};
use std::sync::Mutex;
lazy_static::lazy_static! {
static ref TRANSLATION_FD: Option<usize> = {
libredox::call::open("/scheme/memory/translation",
syscall::flag::O_CLOEXEC as i32, 0)
libredox::call::open("/scheme/memory/translation", syscall::flag::O_CLOEXEC as i32, 0)
.ok()
.map(|fd| fd)
};
}
#[cfg(target_os = "redox")]
fn virt_to_phys(virt: usize) -> usize {
let raw = match *TRANSLATION_FD {
Some(fd) => fd,
None => return 0,
};
let mut buf = virt.to_ne_bytes();
let _ = libredox::call::call_ro(raw, &mut buf, CallFlags::empty(), &[]);
let _ = libredox::call::call_ro(raw, &mut buf, syscall::CallFlags::empty(), &[]);
usize::from_ne_bytes(buf)
}
#[cfg(not(target_os = "redox"))]
fn virt_to_phys(virt: usize) -> usize {
let _ = *TRANSLATION_FD;
virt
}
fn sanitize_align(align: usize) -> Option<usize> {
let align = align.max(1);
if align.is_power_of_two() {
Some(align)
} else {
align.checked_next_power_of_two()
}
}
fn crosses_boundary(addr: u64, size: usize, boundary: usize) -> bool {
if boundary == 0 || size == 0 {
return false;
}
let end = match addr.checked_add(size.saturating_sub(1) as u64) {
Some(end) => end,
None => return true,
};
let mask = !(boundary as u64 - 1);
(addr & mask) != (end & mask)
}
#[derive(Clone, Copy)]
struct PoolAllocation {
vaddr: usize,
dma: u64,
size: usize,
align: usize,
}
type AllocationList = Mutex<Vec<PoolAllocation>>;
#[repr(C)]
pub struct DmaPool {
pub name: *mut u8,
pub size: usize,
pub align: usize,
pub boundary: usize,
pub allocations: *mut c_void,
name_len: usize,
}
fn copy_pool_name(name: *const u8) -> (*mut u8, usize) {
if name.is_null() {
return (ptr::null_mut(), 0);
}
let c_name = unsafe { CStr::from_ptr(name.cast::<c_char>()) };
let bytes = c_name.to_bytes();
let mut owned = Vec::with_capacity(bytes.len() + 1);
owned.extend_from_slice(bytes);
owned.push(0);
let len = owned.len();
let ptr = owned.as_mut_ptr();
std::mem::forget(owned);
(ptr, len)
}
fn pool_allocations(pool: *mut DmaPool) -> Option<&'static AllocationList> {
if pool.is_null() {
return None;
}
let allocations = unsafe { (*pool).allocations.cast::<AllocationList>() };
if allocations.is_null() {
None
} else {
Some(unsafe { &*allocations })
}
}
#[no_mangle]
pub extern "C" fn dma_alloc_coherent(
_dev: *mut u8,
@@ -91,3 +168,237 @@ pub extern "C" fn dma_set_mask(_dev: *mut u8, _mask: u64) -> i32 {
pub extern "C" fn dma_set_coherent_mask(_dev: *mut u8, _mask: u64) -> i32 {
0
}
#[no_mangle]
pub extern "C" fn dma_pool_create(
name: *const u8,
_dev: *mut u8,
size: usize,
align: usize,
boundary: usize,
) -> *mut DmaPool {
if size == 0 {
return ptr::null_mut();
}
let Some(align) = sanitize_align(align) else {
return ptr::null_mut();
};
if boundary != 0 && size > boundary {
return ptr::null_mut();
}
let allocations = Box::new(Mutex::new(Vec::<PoolAllocation>::new()));
let (name_ptr, name_len) = copy_pool_name(name);
Box::into_raw(Box::new(DmaPool {
name: name_ptr,
size,
align,
boundary,
allocations: Box::into_raw(allocations).cast::<c_void>(),
name_len,
}))
}
#[no_mangle]
pub extern "C" fn dma_pool_destroy(pool: *mut DmaPool) {
if pool.is_null() {
return;
}
let allocations_ptr = unsafe { (*pool).allocations.cast::<AllocationList>() };
if !allocations_ptr.is_null() {
let allocations = unsafe { Box::from_raw(allocations_ptr) };
let entries = allocations
.lock()
.map(|entries| entries.clone())
.unwrap_or_default();
for entry in entries {
if let Ok(layout) = Layout::from_size_align(entry.size.max(1), entry.align.max(1)) {
unsafe { dealloc(entry.vaddr as *mut u8, layout) };
}
}
}
let pool = unsafe { Box::from_raw(pool) };
if !pool.name.is_null() && pool.name_len != 0 {
unsafe {
drop(Vec::from_raw_parts(pool.name, pool.name_len, pool.name_len));
}
}
}
#[no_mangle]
pub extern "C" fn dma_pool_alloc(pool: *mut DmaPool, _flags: u32, handle: *mut u64) -> *mut u8 {
if pool.is_null() || handle.is_null() {
return ptr::null_mut();
}
let pool_ref = unsafe { &*pool };
if pool_ref.size == 0 {
return ptr::null_mut();
}
let layout = match Layout::from_size_align(pool_ref.size, pool_ref.align.max(1)) {
Ok(layout) => layout,
Err(_) => return ptr::null_mut(),
};
let vaddr = unsafe { alloc_zeroed(layout) };
if vaddr.is_null() {
return ptr::null_mut();
}
let dma = virt_to_phys(vaddr as usize) as u64;
if dma == 0 || crosses_boundary(dma, pool_ref.size, pool_ref.boundary) {
unsafe { dealloc(vaddr, layout) };
return ptr::null_mut();
}
let Some(allocations) = pool_allocations(pool) else {
unsafe { dealloc(vaddr, layout) };
return ptr::null_mut();
};
let Ok(mut entries) = allocations.lock() else {
unsafe { dealloc(vaddr, layout) };
return ptr::null_mut();
};
entries.push(PoolAllocation {
vaddr: vaddr as usize,
dma,
size: pool_ref.size,
align: pool_ref.align.max(1),
});
unsafe { *handle = dma };
vaddr
}
#[no_mangle]
pub extern "C" fn dma_pool_free(pool: *mut DmaPool, vaddr: *mut u8, addr: u64) {
if pool.is_null() || vaddr.is_null() {
return;
}
let Some(allocations) = pool_allocations(pool) else {
return;
};
let Ok(mut entries) = allocations.lock() else {
return;
};
let Some(index) = entries
.iter()
.position(|entry| entry.vaddr == vaddr as usize || (addr != 0 && entry.dma == addr))
else {
return;
};
let entry = entries.swap_remove(index);
if let Ok(layout) = Layout::from_size_align(entry.size.max(1), entry.align.max(1)) {
unsafe { dealloc(entry.vaddr as *mut u8, layout) };
}
}
#[no_mangle]
pub extern "C" fn dma_sync_single_for_cpu(_dev: *mut u8, addr: u64, size: usize, _dir: u32) {
if addr == 0 || size == 0 {
return;
}
fence(Ordering::Acquire);
}
#[no_mangle]
pub extern "C" fn dma_sync_single_for_device(_dev: *mut u8, addr: u64, size: usize, _dir: u32) {
if addr == 0 || size == 0 {
return;
}
fence(Ordering::Release);
}
#[no_mangle]
pub extern "C" fn dma_map_page(
_dev: *mut u8,
page: *mut u8,
offset: usize,
size: usize,
_dir: u32,
) -> u64 {
if page.is_null() || size == 0 {
return 0;
}
let Some(vaddr) = (page as usize).checked_add(offset) else {
return 0;
};
virt_to_phys(vaddr) as u64
}
#[no_mangle]
pub extern "C" fn dma_unmap_page(_dev: *mut u8, _addr: u64, _size: usize, _dir: u32) {}
#[no_mangle]
pub extern "C" fn dma_mapping_error(_dev: *mut u8, addr: u64) -> i32 {
if addr == 0 {
1
} else {
0
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::ffi::CString;
#[test]
fn dma_alloc_and_map_work_on_host() {
let mut handle = 0u64;
let vaddr = dma_alloc_coherent(ptr::null_mut(), 128, &mut handle, 0);
assert!(!vaddr.is_null());
assert_ne!(handle, 0);
assert_eq!(dma_mapping_error(ptr::null_mut(), handle), 0);
assert_eq!(dma_map_single(ptr::null_mut(), vaddr, 128, 0), handle);
dma_sync_single_for_cpu(ptr::null_mut(), handle, 128, 0);
dma_sync_single_for_device(ptr::null_mut(), handle, 128, 0);
dma_free_coherent(ptr::null_mut(), 128, vaddr, handle);
}
#[test]
fn dma_pool_lifecycle_tracks_allocations() {
let name = CString::new("iwlwifi-rx").expect("valid test CString");
let pool = dma_pool_create(name.as_ptr().cast::<u8>(), ptr::null_mut(), 256, 64, 0);
assert!(!pool.is_null());
let mut handle = 0u64;
let vaddr = dma_pool_alloc(pool, 0, &mut handle);
assert!(!vaddr.is_null());
assert_ne!(handle, 0);
let allocations = unsafe { &*((*pool).allocations.cast::<AllocationList>()) };
assert_eq!(allocations.lock().expect("lock allocations").len(), 1);
dma_pool_free(pool, vaddr, handle);
assert!(allocations.lock().expect("lock allocations").is_empty());
dma_pool_destroy(pool);
}
#[test]
fn dma_pool_rejects_impossible_boundary() {
let pool = dma_pool_create(ptr::null(), ptr::null_mut(), 1024, 16, 128);
assert!(pool.is_null());
}
#[test]
fn dma_map_page_and_error_checks_work() {
let mut page = [0u8; 64];
let dma = dma_map_page(ptr::null_mut(), page.as_mut_ptr(), 8, 16, 0);
assert_ne!(dma, 0);
assert_eq!(dma_mapping_error(ptr::null_mut(), dma), 0);
assert_eq!(dma_mapping_error(ptr::null_mut(), 0), 1);
dma_unmap_page(ptr::null_mut(), dma, 16, 0);
}
}
@@ -1,5 +1,6 @@
use std::collections::HashMap;
use std::ptr;
use std::sync::atomic::{fence, Ordering};
use std::sync::Mutex;
type PhysAddr = u64;
@@ -24,28 +25,26 @@ pub extern "C" fn ioremap(phys: PhysAddr, size: usize) -> *mut u8 {
size
);
let ptr = match redox_driver_sys::memory::MmioRegion::map(
match redox_driver_sys::memory::MmioRegion::map(
phys,
size,
redox_driver_sys::memory::CacheType::DeviceMemory,
redox_driver_sys::memory::MmioProt::READ_WRITE,
) {
Ok(region) => {
let p = region.as_ptr() as *mut u8;
let s = region.size();
let ptr = region.as_ptr() as *mut u8;
let size = region.size();
if let Ok(mut tracker) = MMIO_MAP_TRACKER.lock() {
tracker.insert(p as usize, MappedRegion { size: s });
tracker.insert(ptr as usize, MappedRegion { size });
}
std::mem::forget(region);
p
ptr
}
Err(e) => {
log::error!("ioremap: failed to map {:#x}+{:#x}: {:?}", phys, size, e);
ptr::null_mut()
}
};
ptr
}
}
#[no_mangle]
@@ -124,3 +123,69 @@ pub extern "C" fn writew(val: u16, addr: *mut u8) {
}
unsafe { ptr::write_volatile(addr as *mut u16, val) };
}
#[no_mangle]
pub extern "C" fn memcpy_toio(dst: *mut u8, src: *const u8, count: usize) {
if dst.is_null() || src.is_null() || count == 0 {
return;
}
unsafe { ptr::copy_nonoverlapping(src, dst, count) };
}
#[no_mangle]
pub extern "C" fn memcpy_fromio(dst: *mut u8, src: *const u8, count: usize) {
if dst.is_null() || src.is_null() || count == 0 {
return;
}
unsafe { ptr::copy_nonoverlapping(src, dst, count) };
}
#[no_mangle]
pub extern "C" fn memset_io(dst: *mut u8, val: u8, count: usize) {
if dst.is_null() || count == 0 {
return;
}
unsafe { ptr::write_bytes(dst, val, count) };
}
#[no_mangle]
pub extern "C" fn mb() {
fence(Ordering::SeqCst);
}
#[no_mangle]
pub extern "C" fn rmb() {
fence(Ordering::Acquire);
}
#[no_mangle]
pub extern "C" fn wmb() {
fence(Ordering::Release);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn io_copy_helpers_move_bytes() {
let mut dst = [0u8; 8];
let src = [1u8, 2, 3, 4, 5, 6, 7, 8];
memcpy_toio(dst.as_mut_ptr(), src.as_ptr(), src.len());
assert_eq!(dst, src);
let mut second = [0u8; 8];
memcpy_fromio(second.as_mut_ptr(), dst.as_ptr(), dst.len());
assert_eq!(second, src);
memset_io(second.as_mut_ptr(), 0xaa, second.len());
assert_eq!(second, [0xaa; 8]);
}
#[test]
fn io_barriers_are_callable() {
mb();
rmb();
wmb();
}
}
@@ -0,0 +1,197 @@
use std::ptr;
#[repr(C)]
pub struct ListHead {
pub next: *mut ListHead,
pub prev: *mut ListHead,
}
#[no_mangle]
pub extern "C" fn init_list_head(head: *mut ListHead) {
if head.is_null() {
return;
}
unsafe {
(*head).next = head;
(*head).prev = head;
}
}
#[no_mangle]
pub extern "C" fn list_add(new: *mut ListHead, head: *mut ListHead) {
if new.is_null() || head.is_null() {
return;
}
unsafe {
let next = (*head).next;
(*new).next = next;
(*new).prev = head;
(*head).next = new;
if !next.is_null() {
(*next).prev = new;
}
}
}
#[no_mangle]
pub extern "C" fn list_add_tail(new: *mut ListHead, head: *mut ListHead) {
if new.is_null() || head.is_null() {
return;
}
unsafe {
let prev = (*head).prev;
(*new).next = head;
(*new).prev = prev;
(*head).prev = new;
if !prev.is_null() {
(*prev).next = new;
}
}
}
#[no_mangle]
pub extern "C" fn list_del(entry: *mut ListHead) {
if entry.is_null() {
return;
}
unsafe {
let prev = (*entry).prev;
let next = (*entry).next;
if !prev.is_null() {
(*prev).next = next;
}
if !next.is_null() {
(*next).prev = prev;
}
(*entry).next = ptr::null_mut();
(*entry).prev = ptr::null_mut();
}
}
#[no_mangle]
pub extern "C" fn list_empty(head: *const ListHead) -> i32 {
if head.is_null() {
return 1;
}
if ptr::eq(unsafe { (*head).next } as *const ListHead, head) {
1
} else {
0
}
}
#[no_mangle]
pub extern "C" fn list_splice(list: *mut ListHead, head: *mut ListHead) {
if list.is_null() || head.is_null() || list_empty(list) != 0 {
return;
}
unsafe {
let first = (*list).next;
let last = (*list).prev;
let at = (*head).next;
(*first).prev = head;
(*head).next = first;
(*last).next = at;
if !at.is_null() {
(*at).prev = last;
}
}
}
#[no_mangle]
pub extern "C" fn list_first_entry(head: *const ListHead, offset: usize) -> *mut u8 {
if head.is_null() || list_empty(head) != 0 {
return ptr::null_mut();
}
let first = unsafe { (*head).next };
if first.is_null() {
return ptr::null_mut();
}
(first as usize)
.checked_sub(offset)
.map_or(ptr::null_mut(), |entry| entry as *mut u8)
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem::offset_of;
#[repr(C)]
struct Node {
value: u32,
link: ListHead,
}
#[test]
fn list_add_delete_and_first_entry_work() {
let mut head = ListHead {
next: ptr::null_mut(),
prev: ptr::null_mut(),
};
init_list_head(&mut head);
assert_eq!(list_empty(&head), 1);
let mut node = Node {
value: 7,
link: ListHead {
next: ptr::null_mut(),
prev: ptr::null_mut(),
},
};
list_add(&mut node.link, &mut head);
assert_eq!(list_empty(&head), 0);
let first = list_first_entry(&head, offset_of!(Node, link)).cast::<Node>();
assert_eq!(unsafe { (*first).value }, 7);
list_del(&mut node.link);
init_list_head(&mut head);
assert_eq!(list_empty(&head), 1);
}
#[test]
fn list_add_tail_and_splice_work() {
let mut dst = ListHead {
next: ptr::null_mut(),
prev: ptr::null_mut(),
};
let mut src = ListHead {
next: ptr::null_mut(),
prev: ptr::null_mut(),
};
init_list_head(&mut dst);
init_list_head(&mut src);
let mut node1 = Node {
value: 1,
link: ListHead {
next: ptr::null_mut(),
prev: ptr::null_mut(),
},
};
let mut node2 = Node {
value: 2,
link: ListHead {
next: ptr::null_mut(),
prev: ptr::null_mut(),
},
};
list_add_tail(&mut node1.link, &mut src);
list_add_tail(&mut node2.link, &mut src);
list_splice(&mut src, &mut dst);
let first = list_first_entry(&dst, offset_of!(Node, link)).cast::<Node>();
assert_eq!(unsafe { (*first).value }, 1);
assert!(std::ptr::eq(node1.link.next, &mut node2.link));
}
}
@@ -1,14 +1,68 @@
use std::alloc::{alloc_zeroed, dealloc, Layout};
use std::collections::HashMap;
use std::ffi::c_void;
use std::ptr;
use std::sync::atomic::{AtomicI32, Ordering};
use std::sync::Mutex;
use super::wireless::{wiphy_free, wiphy_new_nm, wiphy_register, wiphy_unregister, Wiphy};
use super::net::SkBuff;
use super::wireless::{
wiphy_free, wiphy_new_nm, wiphy_register, wiphy_unregister, KeyParams, Wiphy,
};
use super::workqueue::{schedule_work, WorkStruct};
const EINVAL: i32 = 22;
const EBUSY: i32 = 16;
lazy_static::lazy_static! {
static ref STA_REGISTRY: Mutex<HashMap<usize, StaRegistryEntry>> = Mutex::new(HashMap::new());
static ref BA_SESSIONS: Mutex<HashMap<usize, Vec<u16>>> = Mutex::new(HashMap::new());
}
#[derive(Clone, Copy)]
struct StaRegistryEntry {
hw: usize,
_vif: usize,
state: u32,
}
#[repr(C)]
pub struct Ieee80211Ops {
pub tx: Option<extern "C" fn(*mut Ieee80211Hw, *mut SkBuff)>,
pub start: Option<extern "C" fn(*mut Ieee80211Hw) -> i32>,
pub stop: Option<extern "C" fn(*mut Ieee80211Hw)>,
pub add_interface: Option<extern "C" fn(*mut Ieee80211Hw, *mut Ieee80211Vif) -> i32>,
pub remove_interface: Option<extern "C" fn(*mut Ieee80211Hw, *mut Ieee80211Vif)>,
pub config: Option<extern "C" fn(*mut Ieee80211Hw, u32) -> i32>,
pub bss_info_changed:
Option<extern "C" fn(*mut Ieee80211Hw, *mut Ieee80211Vif, *mut Ieee80211BssConf, u32)>,
pub sta_state:
Option<extern "C" fn(*mut Ieee80211Hw, *mut Ieee80211Vif, *mut Ieee80211Sta, u32) -> i32>,
pub set_key: Option<
extern "C" fn(
*mut Ieee80211Hw,
*mut Ieee80211Vif,
i32,
*mut Ieee80211Sta,
*mut KeyParams,
) -> i32,
>,
pub ampdu_action: Option<
extern "C" fn(*mut Ieee80211Hw, *mut Ieee80211Vif, *mut Ieee80211Sta, u16, u16, u16) -> i32,
>,
pub sw_scan_start: Option<extern "C" fn(*mut Ieee80211Hw, *mut Ieee80211Vif, *const u8)>,
pub sw_scan_complete: Option<extern "C" fn(*mut Ieee80211Hw, *mut Ieee80211Vif)>,
pub prepare_multicast: Option<extern "C" fn(*mut Ieee80211Hw, *mut c_void) -> u64>,
pub configure_filter: Option<extern "C" fn(*mut Ieee80211Hw, u32, *mut u32, u64)>,
pub sched_scan_start:
Option<extern "C" fn(*mut Ieee80211Hw, *mut Ieee80211Vif, *mut c_void) -> i32>,
pub sched_scan_stop: Option<extern "C" fn(*mut Ieee80211Hw, *mut Ieee80211Vif)>,
}
#[repr(C)]
pub struct Ieee80211Hw {
pub wiphy: *mut Wiphy,
pub ops: *const Ieee80211Ops,
pub priv_data: *mut c_void,
pub registered: AtomicI32,
pub extra_tx_headroom: u32,
@@ -26,6 +80,7 @@ pub struct Ieee80211Vif {
}
#[repr(C)]
#[derive(Debug)]
pub struct Ieee80211Sta {
pub addr: [u8; 6],
pub drv_priv: *mut c_void,
@@ -39,6 +94,41 @@ pub struct Ieee80211BssConf {
pub beacon_int: u16,
}
pub const BSS_CHANGED_ASSOC: u32 = 1;
pub const BSS_CHANGED_BSSID: u32 = 2;
pub const BSS_CHANGED_ERP_CTS_PROT: u32 = 4;
pub const BSS_CHANGED_HT: u32 = 8;
pub const BSS_CHANGED_BASIC_RATES: u32 = 16;
pub const BSS_CHANGED_BEACON_INT: u32 = 32;
pub const BSS_CHANGED_BANDWIDTH: u32 = 64;
fn update_sta_registry(
hw: *mut Ieee80211Hw,
vif: *mut Ieee80211Vif,
sta: *mut Ieee80211Sta,
new_state: u32,
) {
if let Ok(mut registry) = STA_REGISTRY.lock() {
if new_state <= IEEE80211_STA_NONE {
registry.remove(&(sta as usize));
} else {
registry.insert(
sta as usize,
StaRegistryEntry {
hw: hw as usize,
_vif: vif as usize,
state: new_state,
},
);
}
}
if new_state <= IEEE80211_STA_NONE {
if let Ok(mut sessions) = BA_SESSIONS.lock() {
sessions.remove(&(sta as usize));
}
}
}
#[no_mangle]
pub extern "C" fn ieee80211_alloc_hw_nm(
priv_data_len: usize,
@@ -52,6 +142,7 @@ pub extern "C" fn ieee80211_alloc_hw_nm(
let mut hw = Box::new(Ieee80211Hw {
wiphy,
ops: ops.cast::<Ieee80211Ops>(),
priv_data: ptr::null_mut(),
registered: AtomicI32::new(0),
extra_tx_headroom: 0,
@@ -86,6 +177,9 @@ pub extern "C" fn ieee80211_free_hw(hw: *mut Ieee80211Hw) {
if hw.is_null() {
return;
}
if let Ok(mut registry) = STA_REGISTRY.lock() {
registry.retain(|_, entry| entry.hw != hw as usize);
}
unsafe {
let hw_box = Box::from_raw(hw);
if !hw_box.priv_data.is_null() {
@@ -103,10 +197,10 @@ pub extern "C" fn ieee80211_free_hw(hw: *mut Ieee80211Hw) {
#[no_mangle]
pub extern "C" fn ieee80211_register_hw(hw: *mut Ieee80211Hw) -> i32 {
if hw.is_null() {
return -22;
return -EINVAL;
}
if unsafe { &*hw }.registered.load(Ordering::Acquire) != 0 {
return -16;
return -EBUSY;
}
let rc = wiphy_register(unsafe { (*hw).wiphy });
if rc != 0 {
@@ -147,16 +241,207 @@ pub extern "C" fn ieee80211_connection_loss(vif: *mut Ieee80211Vif) {
unsafe { (*vif).cfg_assoc = false };
}
#[repr(C)]
pub struct Ieee80211RxStatus {
pub freq: u16,
pub band: u32,
pub signal: i8,
pub noise: i8,
pub rate_idx: u8,
pub flag: u32,
pub antenna: u8,
pub rx_flags: u32,
}
impl Default for Ieee80211RxStatus {
fn default() -> Self {
Self {
freq: 0,
band: 0,
signal: 0,
noise: 0,
rate_idx: 0,
flag: 0,
antenna: 0,
rx_flags: 0,
}
}
}
pub const RX_FLAG_MMIC_ERROR: u32 = 1 << 0;
pub const RX_FLAG_DECRYPTED: u32 = 1 << 1;
pub const RX_FLAG_MMIC_STRIPPED: u32 = 1 << 2;
pub const RX_FLAG_IV_STRIPPED: u32 = 1 << 3;
#[repr(C)]
pub struct Ieee80211TxInfo {
pub flags: u32,
pub band: u32,
pub hw_queue: u8,
pub rate_driver_data: [u8; 16],
}
pub const IEEE80211_TX_CTL_REQ_TX_STATUS: u32 = 1 << 0;
pub const IEEE80211_TX_CTL_NO_ACK: u32 = 1 << 1;
pub const IEEE80211_TX_CTL_CLEAR_PS_FILT: u32 = 1 << 2;
pub const IEEE80211_TX_CTL_FIRST_FRAGMENT: u32 = 1 << 3;
#[no_mangle]
pub extern "C" fn ieee80211_rx_irqsafe(hw: *mut Ieee80211Hw, skb: *mut SkBuff) {
if hw.is_null() || skb.is_null() {
return;
}
}
#[no_mangle]
pub extern "C" fn ieee80211_tx_status(hw: *mut Ieee80211Hw, skb: *mut SkBuff) {
if hw.is_null() || skb.is_null() {
return;
}
}
#[no_mangle]
pub extern "C" fn ieee80211_get_tid(skb: *const SkBuff) -> u8 {
if skb.is_null() {
return 0;
}
0
}
#[no_mangle]
pub extern "C" fn ieee80211_chandef_create(
chandef: *mut c_void,
channel: *const super::wireless::Ieee80211Channel,
_chan_type: u32,
) {
if chandef.is_null() || channel.is_null() {
return;
}
}
pub const IEEE80211_STA_NOTEXIST: u32 = 0;
pub const IEEE80211_STA_NONE: u32 = 1;
pub const IEEE80211_STA_AUTH: u32 = 2;
pub const IEEE80211_STA_ASSOC: u32 = 3;
pub const IEEE80211_STA_AUTHORIZED: u32 = 4;
#[no_mangle]
pub extern "C" fn ieee80211_start_tx_ba_session(
pub_sta: *mut Ieee80211Sta,
tid: u16,
_timeout: u16,
) -> i32 {
if pub_sta.is_null() || tid >= 16 {
return -EINVAL;
}
let Ok(mut sessions) = BA_SESSIONS.lock() else {
return -EINVAL;
};
let entry = sessions.entry(pub_sta as usize).or_default();
if entry.contains(&tid) {
return -EBUSY;
}
entry.push(tid);
0
}
#[no_mangle]
pub extern "C" fn ieee80211_stop_tx_ba_session(pub_sta: *mut Ieee80211Sta, tid: u16) -> i32 {
if pub_sta.is_null() || tid >= 16 {
return -EINVAL;
}
if let Ok(mut sessions) = BA_SESSIONS.lock() {
if let Some(entry) = sessions.get_mut(&(pub_sta as usize)) {
entry.retain(|existing| *existing != tid);
if entry.is_empty() {
sessions.remove(&(pub_sta as usize));
}
}
}
0
}
#[no_mangle]
pub extern "C" fn ieee80211_sta_state(
hw: *mut Ieee80211Hw,
vif: *mut Ieee80211Vif,
sta: *mut Ieee80211Sta,
_old_state: u32,
new_state: u32,
) -> i32 {
if hw.is_null() || vif.is_null() || sta.is_null() {
return -EINVAL;
}
update_sta_registry(hw, vif, sta, new_state);
let ops = unsafe { (*hw).ops };
if ops.is_null() {
return 0;
}
match unsafe { (*ops).sta_state } {
Some(callback) => callback(hw, vif, sta, new_state),
None => 0,
}
}
#[no_mangle]
pub extern "C" fn ieee80211_find_sta(hw: *mut Ieee80211Hw, addr: *const u8) -> *mut Ieee80211Sta {
if hw.is_null() || addr.is_null() {
return ptr::null_mut();
}
let Ok(registry) = STA_REGISTRY.lock() else {
return ptr::null_mut();
};
let wanted = unsafe { ptr::read(addr.cast::<[u8; 6]>()) };
for (sta_ptr, entry) in registry.iter() {
if entry.hw != hw as usize || entry.state <= IEEE80211_STA_NONE {
continue;
}
let sta = *sta_ptr as *mut Ieee80211Sta;
if sta.is_null() {
continue;
}
if wanted == unsafe { (*sta).addr } {
return sta;
}
}
ptr::null_mut()
}
#[no_mangle]
pub extern "C" fn ieee80211_beacon_loss(vif: *mut Ieee80211Vif) {
if vif.is_null() {
return;
}
unsafe { (*vif).cfg_assoc = false };
}
#[cfg(test)]
mod tests {
use super::*;
use crate::rust_impl::workqueue::{flush_scheduled_work, WorkStruct};
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
use std::sync::atomic::AtomicBool;
static WORK_RAN: AtomicBool = AtomicBool::new(false);
static STA_CALLBACKS: AtomicI32 = AtomicI32::new(0);
extern "C" fn test_work(_work: *mut WorkStruct) {
WORK_RAN.store(true, AtomicOrdering::Release);
WORK_RAN.store(true, Ordering::Release);
}
extern "C" fn test_sta_state(
_hw: *mut Ieee80211Hw,
_vif: *mut Ieee80211Vif,
_sta: *mut Ieee80211Sta,
state: u32,
) -> i32 {
STA_CALLBACKS.store(state as i32, Ordering::Release);
0
}
#[test]
@@ -179,15 +464,15 @@ mod tests {
func: Some(test_work),
__opaque: [0; 64],
};
WORK_RAN.store(false, AtomicOrdering::Release);
WORK_RAN.store(false, Ordering::Release);
ieee80211_queue_work(hw, (&mut work as *mut WorkStruct).cast::<c_void>());
flush_scheduled_work();
assert!(WORK_RAN.load(AtomicOrdering::Acquire));
assert!(WORK_RAN.load(Ordering::Acquire));
ieee80211_free_hw(hw);
}
#[test]
fn connection_loss_clears_assoc_state() {
fn connection_loss_and_beacon_loss_clear_assoc_state() {
let mut vif = Ieee80211Vif {
addr: [0; 6],
drv_priv: ptr::null_mut(),
@@ -196,5 +481,110 @@ mod tests {
};
ieee80211_connection_loss(&mut vif);
assert!(!vif.cfg_assoc);
vif.cfg_assoc = true;
ieee80211_beacon_loss(&mut vif);
assert!(!vif.cfg_assoc);
}
#[test]
fn ieee80211_rx_status_default_and_flags_work() {
let status = Ieee80211RxStatus::default();
assert_eq!(status.freq, 0);
assert_eq!(status.band, 0);
assert_eq!(status.signal, 0);
assert_eq!(status.noise, 0);
assert_eq!(status.rate_idx, 0);
assert_eq!(status.flag, 0);
assert_eq!(status.antenna, 0);
assert_eq!(status.rx_flags, 0);
let combined = RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
assert_ne!(combined & RX_FLAG_DECRYPTED, 0);
assert_ne!(combined & RX_FLAG_IV_STRIPPED, 0);
assert_ne!(combined & RX_FLAG_MMIC_STRIPPED, 0);
assert_eq!(combined & RX_FLAG_MMIC_ERROR, 0);
}
#[test]
fn ieee80211_sta_registry_and_ba_sessions_work() {
let ops = Ieee80211Ops {
tx: None,
start: None,
stop: None,
add_interface: None,
remove_interface: None,
config: None,
bss_info_changed: None,
sta_state: Some(test_sta_state),
set_key: None,
ampdu_action: None,
sw_scan_start: None,
sw_scan_complete: None,
prepare_multicast: None,
configure_filter: None,
sched_scan_start: None,
sched_scan_stop: None,
};
let hw = ieee80211_alloc_hw_nm(
0,
(&ops as *const Ieee80211Ops).cast::<c_void>(),
ptr::null(),
);
assert!(!hw.is_null());
assert_eq!(unsafe { (*hw).ops }, &ops as *const Ieee80211Ops);
let mut vif = Ieee80211Vif {
addr: [0; 6],
drv_priv: ptr::null_mut(),
type_: 0,
cfg_assoc: false,
};
let mut sta = Ieee80211Sta {
addr: [1, 2, 3, 4, 5, 6],
drv_priv: ptr::null_mut(),
aid: 1,
};
STA_CALLBACKS.store(0, Ordering::Release);
assert_eq!(
ieee80211_sta_state(
hw,
&mut vif,
&mut sta,
IEEE80211_STA_NONE,
IEEE80211_STA_ASSOC
),
0
);
assert_eq!(
STA_CALLBACKS.load(Ordering::Acquire),
IEEE80211_STA_ASSOC as i32
);
assert!(std::ptr::eq(
ieee80211_find_sta(hw, sta.addr.as_ptr()),
&mut sta,
));
assert_eq!(ieee80211_start_tx_ba_session(&mut sta, 3, 100), 0);
assert_eq!(ieee80211_start_tx_ba_session(&mut sta, 3, 100), -16);
assert_eq!(ieee80211_stop_tx_ba_session(&mut sta, 3), 0);
assert_eq!(
ieee80211_sta_state(
hw,
&mut vif,
&mut sta,
IEEE80211_STA_ASSOC,
IEEE80211_STA_NONE
),
0
);
assert!(ieee80211_find_sta(hw, sta.addr.as_ptr()).is_null());
ieee80211_free_hw(hw);
}
#[test]
fn ieee80211_get_tid_returns_zero_for_null() {
assert_eq!(ieee80211_get_tid(ptr::null()), 0);
}
}
@@ -2,12 +2,13 @@ pub mod device;
pub mod dma;
pub mod drm_shim;
pub mod firmware;
pub mod mac80211;
pub mod net;
pub mod idr;
pub mod io;
pub mod irq;
pub mod list;
pub mod mac80211;
pub mod memory;
pub mod net;
pub mod pci;
pub mod sync;
pub mod timer;
@@ -1,7 +1,17 @@
use std::alloc::{alloc_zeroed, dealloc, Layout};
use std::ffi::c_void;
use std::ptr;
use std::sync::atomic::{AtomicI32, Ordering};
use std::sync::atomic::{AtomicI32, AtomicU32, AtomicUsize, Ordering};
const NAPI_STATE_IDLE: i32 = 0;
const NAPI_STATE_SCHEDULED: i32 = 1;
#[repr(C)]
struct SkbSharedInfo {
refcount: AtomicUsize,
capacity: usize,
align: usize,
}
#[repr(C)]
pub struct SkBuff {
@@ -10,6 +20,19 @@ pub struct SkBuff {
pub len: u32,
pub tail: u32,
pub end: u32,
pub next: *mut SkBuff,
pub prev: *mut SkBuff,
pub network_header: i32,
pub mac_header: i32,
shared: *mut SkbSharedInfo,
}
#[repr(C)]
pub struct SkBuffHead {
pub next: *mut SkBuff,
pub prev: *mut SkBuff,
pub qlen: u32,
pub lock: u8,
}
#[repr(C)]
@@ -24,21 +47,45 @@ pub struct NetDevice {
pub ieee80211_ptr: *mut c_void,
pub priv_data: *mut c_void,
pub registered: AtomicI32,
pub tx_queue_state: AtomicU32,
pub device_attached: AtomicI32,
priv_alloc_size: usize,
priv_alloc_align: usize,
}
unsafe fn free_skb_buffer(skb: *mut SkBuff) {
#[repr(C)]
pub struct NapiStruct {
pub poll: Option<extern "C" fn(*mut NapiStruct, budget: i32) -> i32>,
pub dev: *mut NetDevice,
pub state: AtomicI32,
pub weight: i32,
}
unsafe fn release_skb_buffer(skb: *mut SkBuff) {
if skb.is_null() {
return;
}
let head = (*skb).head;
let end = (*skb).end as usize;
if !head.is_null() && end != 0 {
if let Ok(layout) = Layout::from_size_align(end, 16) {
dealloc(head, layout);
}
let shared = (*skb).shared;
if shared.is_null() {
return;
}
if (*shared).refcount.fetch_sub(1, Ordering::AcqRel) == 1 {
let capacity = (*shared).capacity.max(1);
let align = (*shared).align.max(1);
if !(*skb).head.is_null() {
if let Ok(layout) = Layout::from_size_align(capacity, align) {
dealloc((*skb).head, layout);
}
}
drop(Box::from_raw(shared));
}
}
fn skb_headroom_inner(skb: &SkBuff) -> u32 {
let headroom = unsafe { skb.data.offset_from(skb.head) };
u32::try_from(headroom).unwrap_or_default()
}
#[no_mangle]
@@ -53,12 +100,23 @@ pub extern "C" fn alloc_skb(size: u32, _gfp_mask: u32) -> *mut SkBuff {
return ptr::null_mut();
}
let shared = Box::into_raw(Box::new(SkbSharedInfo {
refcount: AtomicUsize::new(1),
capacity: capacity.max(1),
align: 16,
}));
Box::into_raw(Box::new(SkBuff {
head,
data: head,
len: 0,
tail: 0,
end: capacity as u32,
next: ptr::null_mut(),
prev: ptr::null_mut(),
network_header: 0,
mac_header: 0,
shared,
}))
}
@@ -68,7 +126,7 @@ pub extern "C" fn kfree_skb(skb: *mut SkBuff) {
return;
}
unsafe {
free_skb_buffer(skb);
release_skb_buffer(skb);
drop(Box::from_raw(skb));
}
}
@@ -80,16 +138,14 @@ pub extern "C" fn skb_reserve(skb: *mut SkBuff, len: u32) {
}
let skb_ref = unsafe { &mut *skb };
let headroom = unsafe { skb_ref.data.offset_from(skb_ref.head) };
let Ok(headroom) = u32::try_from(headroom) else {
return;
};
let headroom = skb_headroom_inner(skb_ref);
let new_headroom = headroom.saturating_add(len);
if new_headroom > skb_ref.end || skb_ref.tail != 0 || skb_ref.len != 0 {
return;
}
skb_ref.data = unsafe { skb_ref.head.add(new_headroom as usize) };
skb_ref.mac_header = new_headroom as i32;
}
#[no_mangle]
@@ -140,8 +196,8 @@ pub extern "C" fn skb_pull(skb: *mut SkBuff, len: u32) -> *mut u8 {
}
skb_ref.data = unsafe { skb_ref.data.add(len as usize) };
skb_ref.tail -= len;
skb_ref.len -= len;
skb_ref.tail = skb_ref.tail.saturating_sub(len);
skb_ref.len = skb_ref.len.saturating_sub(len);
skb_ref.data
}
@@ -151,9 +207,7 @@ pub extern "C" fn skb_headroom(skb: *const SkBuff) -> u32 {
return 0;
}
let skb_ref = unsafe { &*skb };
let headroom = unsafe { skb_ref.data.offset_from(skb_ref.head) };
u32::try_from(headroom).unwrap_or_default()
skb_headroom_inner(unsafe { &*skb })
}
#[no_mangle]
@@ -180,6 +234,194 @@ pub extern "C" fn skb_trim(skb: *mut SkBuff, len: u32) {
skb_ref.tail = new_len;
}
#[no_mangle]
pub extern "C" fn skb_queue_head_init(list: *mut SkBuffHead) {
if list.is_null() {
return;
}
unsafe {
(*list).next = ptr::null_mut();
(*list).prev = ptr::null_mut();
(*list).qlen = 0;
(*list).lock = 0;
}
}
#[no_mangle]
pub extern "C" fn skb_queue_tail(list: *mut SkBuffHead, newsk: *mut SkBuff) {
if list.is_null() || newsk.is_null() {
return;
}
unsafe {
(*newsk).next = ptr::null_mut();
(*newsk).prev = (*list).prev;
if (*list).prev.is_null() {
(*list).next = newsk;
} else {
(*(*list).prev).next = newsk;
}
(*list).prev = newsk;
if (*list).next.is_null() {
(*list).next = newsk;
}
(*list).qlen = (*list).qlen.saturating_add(1);
}
}
#[no_mangle]
pub extern "C" fn skb_dequeue(list: *mut SkBuffHead) -> *mut SkBuff {
if list.is_null() || unsafe { (*list).qlen } == 0 {
return ptr::null_mut();
}
unsafe {
let skb = (*list).next;
if skb.is_null() {
return ptr::null_mut();
}
(*list).next = (*skb).next;
if (*list).next.is_null() {
(*list).prev = ptr::null_mut();
} else {
(*(*list).next).prev = ptr::null_mut();
}
(*skb).next = ptr::null_mut();
(*skb).prev = ptr::null_mut();
(*list).qlen = (*list).qlen.saturating_sub(1);
skb
}
}
#[no_mangle]
pub extern "C" fn skb_queue_purge(list: *mut SkBuffHead) {
if list.is_null() {
return;
}
loop {
let skb = skb_dequeue(list);
if skb.is_null() {
break;
}
kfree_skb(skb);
}
}
#[no_mangle]
pub extern "C" fn skb_peek(list: *const SkBuffHead) -> *mut SkBuff {
if list.is_null() || unsafe { (*list).qlen } == 0 {
ptr::null_mut()
} else {
unsafe { (*list).next }
}
}
#[no_mangle]
pub extern "C" fn skb_queue_len(list: *const SkBuffHead) -> u32 {
if list.is_null() {
0
} else {
unsafe { (*list).qlen }
}
}
#[no_mangle]
pub extern "C" fn skb_queue_empty(list: *const SkBuffHead) -> i32 {
if skb_queue_len(list) == 0 {
1
} else {
0
}
}
#[no_mangle]
pub extern "C" fn __netdev_alloc_skb(
_dev: *mut NetDevice,
length: u32,
gfp_mask: u32,
) -> *mut SkBuff {
alloc_skb(length, gfp_mask)
}
#[no_mangle]
pub extern "C" fn skb_copy(src: *const SkBuff, gfp: u32) -> *mut SkBuff {
if src.is_null() {
return ptr::null_mut();
}
let src_ref = unsafe { &*src };
let dst = alloc_skb(src_ref.end, gfp);
if dst.is_null() {
return ptr::null_mut();
}
let headroom = skb_headroom(src);
skb_reserve(dst, headroom);
let dst_data = skb_put(dst, src_ref.len);
if dst_data.is_null() {
kfree_skb(dst);
return ptr::null_mut();
}
if src_ref.len != 0 {
unsafe { ptr::copy_nonoverlapping(src_ref.data, dst_data, src_ref.len as usize) };
}
unsafe {
(*dst).network_header = src_ref.network_header;
(*dst).mac_header = src_ref.mac_header;
}
dst
}
#[no_mangle]
pub extern "C" fn skb_clone(skb: *const SkBuff, _gfp: u32) -> *mut SkBuff {
if skb.is_null() {
return ptr::null_mut();
}
let skb_ref = unsafe { &*skb };
if skb_ref.shared.is_null() {
return ptr::null_mut();
}
unsafe { &*skb_ref.shared }
.refcount
.fetch_add(1, Ordering::AcqRel);
Box::into_raw(Box::new(SkBuff {
head: skb_ref.head,
data: skb_ref.data,
len: skb_ref.len,
tail: skb_ref.tail,
end: skb_ref.end,
next: ptr::null_mut(),
prev: ptr::null_mut(),
network_header: skb_ref.network_header,
mac_header: skb_ref.mac_header,
shared: skb_ref.shared,
}))
}
#[no_mangle]
pub extern "C" fn skb_set_network_header(skb: *mut SkBuff, offset: i32) {
if skb.is_null() {
return;
}
unsafe { (*skb).network_header = offset };
}
#[no_mangle]
pub extern "C" fn skb_reset_mac_header(skb: *mut SkBuff) {
if skb.is_null() {
return;
}
unsafe {
(*skb).mac_header = skb_headroom_inner(&*skb) as i32;
}
}
#[no_mangle]
pub extern "C" fn alloc_netdev_mqs(
sizeof_priv: usize,
@@ -200,6 +442,8 @@ pub extern "C" fn alloc_netdev_mqs(
ieee80211_ptr: ptr::null_mut(),
priv_data: ptr::null_mut(),
registered: AtomicI32::new(0),
tx_queue_state: AtomicU32::new(0),
device_attached: AtomicI32::new(1),
priv_alloc_size: 0,
priv_alloc_align: 0,
});
@@ -304,6 +548,102 @@ pub extern "C" fn netif_carrier_ok(dev: *const NetDevice) -> i32 {
}
}
#[no_mangle]
pub extern "C" fn netif_napi_add(
dev: *mut NetDevice,
napi: *mut NapiStruct,
poll: Option<extern "C" fn(*mut NapiStruct, i32) -> i32>,
weight: i32,
) {
if napi.is_null() {
return;
}
unsafe {
(*napi).dev = dev;
(*napi).poll = poll;
(*napi).weight = weight;
(*napi).state.store(NAPI_STATE_IDLE, Ordering::Release);
}
}
#[no_mangle]
pub extern "C" fn napi_schedule(napi: *mut NapiStruct) {
if napi.is_null() {
return;
}
let napi_ref = unsafe { &*napi };
if napi_ref
.state
.compare_exchange(
NAPI_STATE_IDLE,
NAPI_STATE_SCHEDULED,
Ordering::AcqRel,
Ordering::Acquire,
)
.is_ok()
{
if let Some(poll) = napi_ref.poll {
let _ = poll(napi, napi_ref.weight);
}
}
}
#[no_mangle]
pub extern "C" fn napi_complete_done(napi: *mut NapiStruct, work_done: i32) -> i32 {
if napi.is_null() || work_done < 0 {
return 0;
}
unsafe { &*napi }
.state
.store(NAPI_STATE_IDLE, Ordering::Release);
if work_done < unsafe { (*napi).weight } {
1
} else {
0
}
}
#[no_mangle]
pub extern "C" fn netif_tx_wake_queue(dev: *mut NetDevice, queue_idx: u16) {
if dev.is_null() || queue_idx >= 32 {
return;
}
let mask = !(1u32 << queue_idx);
let _ = unsafe { &*dev }
.tx_queue_state
.fetch_and(mask, Ordering::AcqRel);
}
#[no_mangle]
pub extern "C" fn netif_tx_stop_queue(dev: *mut NetDevice, queue_idx: u16) {
if dev.is_null() || queue_idx >= 32 {
return;
}
let mask = 1u32 << queue_idx;
let _ = unsafe { &*dev }
.tx_queue_state
.fetch_or(mask, Ordering::AcqRel);
}
#[no_mangle]
pub extern "C" fn netif_device_attach(dev: *mut NetDevice) {
if dev.is_null() {
return;
}
unsafe { &*dev }.device_attached.store(1, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn netif_device_detach(dev: *mut NetDevice) {
if dev.is_null() {
return;
}
unsafe { &*dev }.device_attached.store(0, Ordering::Release);
}
#[cfg(test)]
mod tests {
use super::*;
@@ -311,11 +651,17 @@ mod tests {
use std::sync::atomic::AtomicUsize;
static SETUP_CALLS: AtomicUsize = AtomicUsize::new(0);
static NAPI_POLLS: AtomicUsize = AtomicUsize::new(0);
extern "C" fn test_setup(_dev: *mut NetDevice) {
SETUP_CALLS.fetch_add(1, Ordering::AcqRel);
}
extern "C" fn test_napi_poll(_napi: *mut NapiStruct, budget: i32) -> i32 {
NAPI_POLLS.fetch_add(1, Ordering::AcqRel);
budget - 1
}
#[test]
fn skb_allocation_and_growth_work() {
let skb = alloc_skb(64, 0);
@@ -343,9 +689,49 @@ mod tests {
kfree_skb(skb);
}
#[test]
fn skb_queue_copy_and_clone_work() {
let skb = alloc_skb(32, 0);
assert!(!skb.is_null());
skb_reserve(skb, 4);
let data = skb_put(skb, 6);
assert!(!data.is_null());
unsafe { ptr::copy_nonoverlapping([1u8, 2, 3, 4, 5, 6].as_ptr(), data, 6) };
skb_set_network_header(skb, 2);
skb_reset_mac_header(skb);
let copy = skb_copy(skb, 0);
assert!(!copy.is_null());
assert_eq!(unsafe { (*copy).len }, 6);
assert_eq!(unsafe { (*copy).network_header }, 2);
let clone = skb_clone(skb, 0);
assert!(!clone.is_null());
assert_eq!(unsafe { (*clone).data }, unsafe { (*skb).data });
let mut queue = SkBuffHead {
next: ptr::null_mut(),
prev: ptr::null_mut(),
qlen: 123,
lock: 1,
};
skb_queue_head_init(&mut queue);
skb_queue_tail(&mut queue, skb);
skb_queue_tail(&mut queue, copy);
assert_eq!(skb_queue_len(&queue), 2);
assert_eq!(skb_queue_empty(&queue), 0);
assert_eq!(skb_peek(&queue), skb);
assert_eq!(skb_dequeue(&mut queue), skb);
assert_eq!(skb_queue_len(&queue), 1);
kfree_skb(skb);
skb_queue_purge(&mut queue);
assert_eq!(skb_queue_empty(&queue), 1);
kfree_skb(clone);
}
#[test]
fn net_device_carrier_tracking_works() {
let name = CString::new("wlan%d").unwrap();
let name = CString::new("wlan%d").expect("valid test CString");
let dev = alloc_netdev_mqs(
0usize,
name.as_ptr().cast::<u8>(),
@@ -364,9 +750,9 @@ mod tests {
}
#[test]
fn net_device_setup_and_registration_work() {
fn net_device_setup_registration_and_queue_state_work() {
SETUP_CALLS.store(0, Ordering::Release);
let name = CString::new("wlan%d").unwrap();
let name = CString::new("wlan%d").expect("valid test CString");
let dev = alloc_netdev_mqs(
32usize,
name.as_ptr().cast::<u8>(),
@@ -380,8 +766,44 @@ mod tests {
assert_eq!(register_netdev(dev), 0);
assert_eq!(unsafe { (*dev).registered.load(Ordering::Acquire) }, 1);
assert_eq!(register_netdev(dev), -16);
netif_tx_stop_queue(dev, 2);
assert_ne!(
unsafe { (*dev).tx_queue_state.load(Ordering::Acquire) } & (1 << 2),
0
);
netif_tx_wake_queue(dev, 2);
assert_eq!(
unsafe { (*dev).tx_queue_state.load(Ordering::Acquire) } & (1 << 2),
0
);
netif_device_detach(dev);
assert_eq!(unsafe { (*dev).device_attached.load(Ordering::Acquire) }, 0);
netif_device_attach(dev);
assert_eq!(unsafe { (*dev).device_attached.load(Ordering::Acquire) }, 1);
unregister_netdev(dev);
assert_eq!(unsafe { (*dev).registered.load(Ordering::Acquire) }, 0);
free_netdev(dev);
}
#[test]
fn napi_schedule_and_complete_work() {
let mut napi = NapiStruct {
poll: None,
dev: ptr::null_mut(),
state: AtomicI32::new(99),
weight: 0,
};
NAPI_POLLS.store(0, Ordering::Release);
netif_napi_add(ptr::null_mut(), &mut napi, Some(test_napi_poll), 8);
assert_eq!(napi.weight, 8);
napi_schedule(&mut napi);
assert_eq!(NAPI_POLLS.load(Ordering::Acquire), 1);
assert_eq!(napi.state.load(Ordering::Acquire), NAPI_STATE_SCHEDULED);
assert_eq!(napi_complete_done(&mut napi, 4), 1);
assert_eq!(napi.state.load(Ordering::Acquire), NAPI_STATE_IDLE);
}
}
@@ -1,3 +1,4 @@
use std::collections::HashMap;
use std::os::raw::c_ulong;
use std::ptr;
use std::sync::Mutex;
@@ -7,8 +8,14 @@ use redox_driver_sys::pci::{enumerate_pci_all, PciDevice, PciDeviceInfo, PciLoca
const EINVAL: i32 = 22;
const ENODEV: i32 = 19;
const EIO: i32 = 5;
const EBUSY: i32 = 16;
const PCI_ANY_ID: u32 = !0;
pub const PCI_IRQ_MSI: u32 = 1;
pub const PCI_IRQ_MSIX: u32 = 2;
pub const PCI_IRQ_LEGACY: u32 = 4;
pub const PCI_IRQ_NOLEGACY: u32 = 8;
#[repr(C)]
#[derive(Default)]
pub struct Device {
@@ -46,6 +53,14 @@ pub struct PciDeviceId {
driver_data: c_ulong,
}
#[repr(C)]
#[derive(Clone, Copy, Default)]
pub struct MsixEntry {
pub vector: u32,
pub entry: u16,
pub _pad: u16,
}
impl Default for PciDev {
fn default() -> Self {
PciDev {
@@ -71,9 +86,16 @@ struct CurrentDevice {
ptr: usize,
}
#[derive(Clone)]
struct AllocatedVectors {
_flags: u32,
vectors: Vec<i32>,
}
lazy_static::lazy_static! {
static ref CURRENT_DEVICE: Mutex<Option<CurrentDevice>> = Mutex::new(None);
static ref REGISTERED_PROBE: Mutex<Option<PciDriverProbe>> = Mutex::new(None);
static ref IRQ_VECTORS: Mutex<HashMap<usize, AllocatedVectors>> = Mutex::new(HashMap::new());
}
pub const PCI_VENDOR_ID_AMD: u16 = 0x1002;
@@ -106,6 +128,12 @@ fn open_current_device(dev: *mut PciDev) -> Result<PciDevice, i32> {
})
}
fn clear_irq_vectors_for_ptr(dev_ptr: usize) {
if let Ok(mut vectors) = IRQ_VECTORS.lock() {
vectors.remove(&dev_ptr);
}
}
fn matches_id(info: &PciDeviceInfo, id: &PciDeviceId) -> bool {
let class =
((info.class_code as u32) << 16) | ((info.subclass as u32) << 8) | info.prog_if as u32;
@@ -180,6 +208,7 @@ fn replace_current_device(location: PciLocation, dev_ptr: *mut PciDev) {
location,
ptr: dev_ptr as usize,
}) {
clear_irq_vectors_for_ptr(previous.ptr);
unsafe { drop(Box::from_raw(previous.ptr as *mut PciDev)) };
}
}
@@ -188,11 +217,53 @@ fn replace_current_device(location: PciLocation, dev_ptr: *mut PciDev) {
fn clear_current_device() {
if let Ok(mut state) = CURRENT_DEVICE.lock() {
if let Some(previous) = state.take() {
clear_irq_vectors_for_ptr(previous.ptr);
unsafe { drop(Box::from_raw(previous.ptr as *mut PciDev)) };
}
}
}
fn allocate_vectors(dev: *mut PciDev, min_vecs: i32, max_vecs: i32, flags: u32) -> i32 {
if dev.is_null() || min_vecs <= 0 || max_vecs <= 0 || min_vecs > max_vecs {
return -EINVAL;
}
if flags & (PCI_IRQ_MSI | PCI_IRQ_MSIX | PCI_IRQ_LEGACY) == 0 {
return -EINVAL;
}
let base_irq = unsafe { (*dev).irq as i32 };
if base_irq <= 0 {
return -ENODEV;
}
let dev_key = dev as usize;
let Ok(mut vectors) = IRQ_VECTORS.lock() else {
return -EINVAL;
};
if vectors.contains_key(&dev_key) {
return -EBUSY;
}
let count = if flags & PCI_IRQ_MSIX != 0 {
max_vecs
} else {
1
};
if count < min_vecs {
return -EINVAL;
}
let allocated = (0..count).map(|index| base_irq + index).collect::<Vec<_>>();
vectors.insert(
dev_key,
AllocatedVectors {
_flags: flags,
vectors: allocated,
},
);
count
}
#[no_mangle]
pub extern "C" fn pci_enable_device(dev: *mut PciDev) -> i32 {
if dev.is_null() {
@@ -337,6 +408,82 @@ pub struct PciDriver {
remove: Option<PciDriverRemove>,
}
#[no_mangle]
pub extern "C" fn pci_alloc_irq_vectors(
dev: *mut PciDev,
min_vecs: i32,
max_vecs: i32,
flags: u32,
) -> i32 {
allocate_vectors(dev, min_vecs, max_vecs, flags)
}
#[no_mangle]
pub extern "C" fn pci_free_irq_vectors(dev: *mut PciDev) {
if dev.is_null() {
return;
}
clear_irq_vectors_for_ptr(dev as usize);
}
#[no_mangle]
pub extern "C" fn pci_irq_vector(dev: *mut PciDev, vector_idx: i32) -> i32 {
if dev.is_null() || vector_idx < 0 {
return -EINVAL;
}
let Ok(vectors) = IRQ_VECTORS.lock() else {
return -EINVAL;
};
let Some(allocated) = vectors.get(&(dev as usize)) else {
return -EINVAL;
};
allocated
.vectors
.get(vector_idx as usize)
.copied()
.unwrap_or(-EINVAL)
}
#[no_mangle]
pub extern "C" fn pci_enable_msi(dev: *mut PciDev) -> i32 {
pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI)
}
#[no_mangle]
pub extern "C" fn pci_disable_msi(dev: *mut PciDev) {
pci_free_irq_vectors(dev);
}
#[no_mangle]
pub extern "C" fn pci_enable_msix_range(
dev: *mut PciDev,
entries: *mut MsixEntry,
minvec: i32,
maxvec: i32,
) -> i32 {
if entries.is_null() {
return -EINVAL;
}
let count = pci_alloc_irq_vectors(dev, minvec, maxvec, PCI_IRQ_MSIX);
if count < 0 {
return count;
}
for index in 0..count {
unsafe {
(*entries.add(index as usize)).vector = pci_irq_vector(dev, index) as u32;
}
}
count
}
#[no_mangle]
pub extern "C" fn pci_disable_msix(dev: *mut PciDev) {
pci_free_irq_vectors(dev);
}
#[no_mangle]
pub extern "C" fn pci_register_driver(drv: *mut PciDriver) -> i32 {
if drv.is_null() {
@@ -439,3 +586,49 @@ pub extern "C" fn pci_unregister_driver(drv: *mut PciDriver) {
}
log::info!("pci_unregister_driver: cleared registered PCI driver state");
}
#[cfg(test)]
mod tests {
use super::*;
fn test_dev(irq: u32) -> PciDev {
PciDev {
irq,
..PciDev::default()
}
}
#[test]
fn pci_irq_vector_lifecycle_works() {
let mut dev = test_dev(32);
assert_eq!(pci_alloc_irq_vectors(&mut dev, 1, 1, PCI_IRQ_MSI), 1);
assert_eq!(pci_irq_vector(&mut dev, 0), 32);
assert_eq!(pci_alloc_irq_vectors(&mut dev, 1, 1, PCI_IRQ_MSI), -16);
pci_free_irq_vectors(&mut dev);
assert_eq!(pci_irq_vector(&mut dev, 0), -22);
}
#[test]
fn pci_msix_range_populates_entries() {
let mut dev = test_dev(40);
let mut entries = [MsixEntry::default(); 3];
assert_eq!(
pci_enable_msix_range(&mut dev, entries.as_mut_ptr(), 2, 3),
3
);
assert_eq!(entries[0].vector, 40);
assert_eq!(entries[1].vector, 41);
assert_eq!(entries[2].vector, 42);
pci_disable_msix(&mut dev);
}
#[test]
fn pci_rejects_invalid_irq_vector_requests() {
let mut dev = test_dev(0);
assert_eq!(pci_enable_msi(&mut dev), -19);
assert_eq!(
pci_alloc_irq_vectors(ptr::null_mut(), 1, 1, PCI_IRQ_MSI),
-22
);
}
}
@@ -1,4 +1,6 @@
use std::sync::atomic::{AtomicU8, Ordering};
use std::ptr;
use std::sync::atomic::{AtomicI32, AtomicU8, Ordering};
use std::time::{Duration, Instant};
const UNLOCKED: u8 = 0;
const LOCKED: u8 = 1;
@@ -154,14 +156,17 @@ pub extern "C" fn irqs_disabled() -> bool {
IRQ_DEPTH.load(Ordering::Acquire) > 0
}
use std::ptr;
#[repr(C)]
pub struct Completion {
done: AtomicU8,
_padding: [u8; 63],
}
#[repr(C)]
pub struct AtomicT {
value: AtomicI32,
}
#[no_mangle]
pub extern "C" fn init_completion(c: *mut Completion) {
if c.is_null() {
@@ -186,6 +191,14 @@ pub extern "C" fn complete(c: *mut Completion) {
unsafe { &*c }.done.store(1, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn complete_all(c: *mut Completion) {
if c.is_null() {
return;
}
unsafe { &*c }.done.store(u8::MAX, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn wait_for_completion(c: *mut Completion) {
if c.is_null() {
@@ -196,6 +209,31 @@ pub extern "C" fn wait_for_completion(c: *mut Completion) {
}
}
#[no_mangle]
pub extern "C" fn wait_for_completion_timeout(c: *mut Completion, timeout_ms: u64) -> i32 {
if c.is_null() {
return 0;
}
if unsafe { &*c }.done.load(Ordering::Acquire) != 0 {
return 1;
}
let deadline = Instant::now()
.checked_add(Duration::from_millis(timeout_ms))
.unwrap_or_else(Instant::now);
loop {
if unsafe { &*c }.done.load(Ordering::Acquire) != 0 {
return 1;
}
if Instant::now() >= deadline {
return 0;
}
std::thread::yield_now();
}
}
#[no_mangle]
pub extern "C" fn reinit_completion(c: *mut Completion) {
if c.is_null() {
@@ -204,6 +242,109 @@ pub extern "C" fn reinit_completion(c: *mut Completion) {
unsafe { &*c }.done.store(0, Ordering::Release);
}
#[no_mangle]
pub extern "C" fn atomic_set(v: *mut AtomicT, i: i32) {
if v.is_null() {
return;
}
unsafe { &*v }.value.store(i, Ordering::SeqCst);
}
#[no_mangle]
pub extern "C" fn atomic_read(v: *const AtomicT) -> i32 {
if v.is_null() {
return 0;
}
unsafe { &*v }.value.load(Ordering::SeqCst)
}
#[no_mangle]
pub extern "C" fn atomic_add(i: i32, v: *mut AtomicT) {
if v.is_null() {
return;
}
unsafe { &*v }.value.fetch_add(i, Ordering::SeqCst);
}
#[no_mangle]
pub extern "C" fn atomic_sub(i: i32, v: *mut AtomicT) {
if v.is_null() {
return;
}
unsafe { &*v }.value.fetch_sub(i, Ordering::SeqCst);
}
#[no_mangle]
pub extern "C" fn atomic_inc(v: *mut AtomicT) {
atomic_add(1, v);
}
#[no_mangle]
pub extern "C" fn atomic_dec(v: *mut AtomicT) {
atomic_sub(1, v);
}
#[no_mangle]
pub extern "C" fn atomic_inc_and_test(v: *mut AtomicT) -> i32 {
if v.is_null() {
return 0;
}
if unsafe { &*v }.value.fetch_add(1, Ordering::SeqCst) + 1 == 0 {
1
} else {
0
}
}
#[no_mangle]
pub extern "C" fn atomic_dec_and_test(v: *mut AtomicT) -> i32 {
if v.is_null() {
return 0;
}
if unsafe { &*v }.value.fetch_sub(1, Ordering::SeqCst) - 1 == 0 {
1
} else {
0
}
}
#[no_mangle]
pub extern "C" fn atomic_add_return(i: i32, v: *mut AtomicT) -> i32 {
if v.is_null() {
return 0;
}
unsafe { &*v }.value.fetch_add(i, Ordering::SeqCst) + i
}
#[no_mangle]
pub extern "C" fn atomic_sub_return(i: i32, v: *mut AtomicT) -> i32 {
if v.is_null() {
return 0;
}
unsafe { &*v }.value.fetch_sub(i, Ordering::SeqCst) - i
}
#[no_mangle]
pub extern "C" fn atomic_xchg(v: *mut AtomicT, new: i32) -> i32 {
if v.is_null() {
return 0;
}
unsafe { &*v }.value.swap(new, Ordering::SeqCst)
}
#[no_mangle]
pub extern "C" fn atomic_cmpxchg(v: *mut AtomicT, old: i32, new: i32) -> i32 {
if v.is_null() {
return 0;
}
match unsafe { &*v }
.value
.compare_exchange(old, new, Ordering::SeqCst, Ordering::SeqCst)
{
Ok(previous) | Err(previous) => previous,
}
}
#[cfg(test)]
mod tests {
use super::*;
@@ -228,4 +369,48 @@ mod tests {
local_irq_enable();
assert!(!irqs_disabled());
}
#[test]
fn atomic_operations_cover_all_paths() {
let mut value = AtomicT {
value: AtomicI32::new(0),
};
atomic_set(&mut value, 3);
assert_eq!(atomic_read(&value), 3);
atomic_add(4, &mut value);
assert_eq!(atomic_read(&value), 7);
atomic_sub(2, &mut value);
assert_eq!(atomic_read(&value), 5);
atomic_inc(&mut value);
atomic_dec(&mut value);
assert_eq!(atomic_add_return(5, &mut value), 10);
assert_eq!(atomic_sub_return(3, &mut value), 7);
assert_eq!(atomic_xchg(&mut value, 11), 7);
assert_eq!(atomic_cmpxchg(&mut value, 10, 12), 11);
assert_eq!(atomic_cmpxchg(&mut value, 11, 13), 11);
assert_eq!(atomic_read(&value), 13);
atomic_set(&mut value, -1);
assert_eq!(atomic_inc_and_test(&mut value), 1);
atomic_set(&mut value, 1);
assert_eq!(atomic_dec_and_test(&mut value), 1);
}
#[test]
fn completion_timeout_and_complete_all_work() {
let mut completion = Completion {
done: AtomicU8::new(0),
_padding: [0; 63],
};
assert_eq!(wait_for_completion_timeout(&mut completion, 1), 0);
complete_all(&mut completion);
assert_eq!(wait_for_completion_timeout(&mut completion, 1), 1);
reinit_completion(&mut completion);
assert_eq!(wait_for_completion_timeout(&mut completion, 1), 0);
complete(&mut completion);
wait_for_completion(&mut completion);
assert_eq!(wait_for_completion_timeout(&mut completion, 1), 1);
}
}
@@ -1,10 +1,28 @@
use std::alloc::{alloc_zeroed, dealloc, Layout};
use std::collections::HashMap;
use std::ffi::c_void;
use std::ptr;
use std::sync::atomic::{AtomicI32, Ordering};
use std::sync::Mutex;
use super::net::{netif_carrier_off, netif_carrier_on, NetDevice};
#[derive(Clone, Default)]
struct WirelessEventState {
new_sta: Option<[u8; 6]>,
mgmt_rx_freq: u32,
mgmt_rx_signal: i32,
mgmt_rx_len: usize,
mgmt_tx_cookie: u64,
mgmt_tx_len: usize,
mgmt_tx_ack: bool,
sched_scan_reqid: u64,
}
lazy_static::lazy_static! {
static ref WIRELESS_EVENTS: Mutex<HashMap<usize, WirelessEventState>> = Mutex::new(HashMap::new());
}
#[repr(C)]
pub struct Wiphy {
pub priv_data: *mut c_void,
@@ -76,6 +94,15 @@ pub struct StationParameters {
pub sta_flags_set: u32,
}
fn update_event_state<F>(key: usize, update: F)
where
F: FnOnce(&mut WirelessEventState),
{
if let Ok(mut events) = WIRELESS_EVENTS.lock() {
update(events.entry(key).or_default());
}
}
#[no_mangle]
pub extern "C" fn wiphy_new_nm(
_ops: *const c_void,
@@ -114,6 +141,9 @@ pub extern "C" fn wiphy_free(wiphy: *mut Wiphy) {
if wiphy.is_null() {
return;
}
if let Ok(mut events) = WIRELESS_EVENTS.lock() {
events.remove(&(wiphy as usize));
}
unsafe {
let wiphy_box = Box::from_raw(wiphy);
if !wiphy_box.priv_data.is_null() {
@@ -283,6 +313,254 @@ pub extern "C" fn cfg80211_ready_on_channel(
) {
}
#[repr(C)]
pub struct Ieee80211Channel {
pub band: u32,
pub center_freq: u16,
pub hw_value: u16,
pub flags: u32,
pub max_power: i8,
pub max_reg_power: i8,
pub max_antenna_gain: i8,
pub beacon_found: bool,
}
pub const NL80211_BAND_2GHZ: u32 = 0;
pub const NL80211_BAND_5GHZ: u32 = 1;
pub const NL80211_BAND_6GHZ: u32 = 2;
pub const IEEE80211_CHAN_DISABLED: u32 = 1 << 0;
pub const IEEE80211_CHAN_NO_IR: u32 = 1 << 1;
pub const IEEE80211_CHAN_RADAR: u32 = 1 << 2;
pub const IEEE80211_CHAN_NO_HT40PLUS: u32 = 1 << 3;
pub const IEEE80211_CHAN_NO_HT40MINUS: u32 = 1 << 4;
pub const IEEE80211_CHAN_NO_OFDM: u32 = 1 << 5;
pub const IEEE80211_CHAN_NO_80MHZ: u32 = 1 << 6;
pub const IEEE80211_CHAN_NO_160MHZ: u32 = 1 << 7;
#[repr(C)]
pub struct Ieee80211Rate {
pub flags: u32,
pub bitrate: u16,
pub hw_value: u16,
pub hw_value_short: u16,
}
pub const IEEE80211_RATE_SHORT_PREAMBLE: u32 = 1 << 0;
pub const IEEE80211_RATE_MANDATORY: u32 = 1 << 1;
pub const IEEE80211_RATE_ERP_G: u32 = 1 << 2;
#[repr(C)]
pub struct Ieee80211SupportedBand {
pub channels: *mut Ieee80211Channel,
pub n_channels: usize,
pub bitrates: *mut Ieee80211Rate,
pub n_bitrates: usize,
pub ht_cap: *mut c_void,
pub vht_cap: *mut c_void,
}
#[no_mangle]
pub extern "C" fn wiphy_bands_append(
wiphy: *mut Wiphy,
band_idx: u32,
band: *mut Ieee80211SupportedBand,
) -> i32 {
if wiphy.is_null() || band.is_null() {
return -22;
}
if band_idx > NL80211_BAND_6GHZ {
return -22;
}
let band_ref = unsafe { &*band };
if band_ref.n_channels == 0 || band_ref.channels.is_null() {
return -22;
}
0
}
#[repr(C)]
pub struct Cfg80211Bss {
pub bssid: [u8; 6],
pub channel: *mut Ieee80211Channel,
pub signal: i16,
pub capability: u16,
pub beacon_interval: u16,
pub ies: *const u8,
pub ies_len: usize,
}
#[no_mangle]
pub extern "C" fn cfg80211_inform_bss(
wiphy: *mut Wiphy,
wdev: *mut WirelessDev,
_freq: u32,
bssid: *const u8,
_tsf: u64,
capability: u16,
beacon_interval: u16,
ies: *const u8,
ies_len: usize,
signal: i32,
_gfp: u32,
) -> *mut Cfg80211Bss {
if wiphy.is_null() || wdev.is_null() || bssid.is_null() {
return ptr::null_mut();
}
let mut bssid_bytes = [0; 6];
unsafe {
ptr::copy_nonoverlapping(bssid, bssid_bytes.as_mut_ptr(), bssid_bytes.len());
}
let bss = Box::new(Cfg80211Bss {
bssid: bssid_bytes,
channel: ptr::null_mut(),
signal: signal.clamp(i16::MIN as i32, i16::MAX as i32) as i16,
capability,
beacon_interval,
ies,
ies_len,
});
Box::into_raw(bss)
}
#[no_mangle]
pub extern "C" fn cfg80211_put_bss(bss: *mut Cfg80211Bss) {
if bss.is_null() {
return;
}
unsafe {
drop(Box::from_raw(bss));
}
}
#[no_mangle]
pub extern "C" fn cfg80211_get_bss(
wiphy: *mut Wiphy,
band: u32,
_bssid: *const u8,
_ssid: *const u8,
_ssid_len: usize,
_bss_type: u32,
_privacy: u32,
) -> *mut Cfg80211Bss {
if wiphy.is_null() || band > NL80211_BAND_6GHZ {
return ptr::null_mut();
}
ptr::null_mut()
}
#[no_mangle]
pub extern "C" fn cfg80211_new_sta(
dev: *mut c_void,
mac_addr: *const u8,
_params: *const StationParameters,
_gfp: u32,
) {
if dev.is_null() || mac_addr.is_null() {
return;
}
let wdev = netdev_to_wireless_dev(dev);
if wdev.is_null() || unsafe { (*wdev).wiphy }.is_null() {
return;
}
let mut addr = [0u8; 6];
unsafe { ptr::copy_nonoverlapping(mac_addr, addr.as_mut_ptr(), addr.len()) };
update_event_state(unsafe { (*wdev).wiphy as usize }, |state| {
state.new_sta = Some(addr)
});
}
#[no_mangle]
pub extern "C" fn cfg80211_rx_mgmt(
wdev: *mut WirelessDev,
freq: u32,
sig_dbm: i32,
buf: *const u8,
len: usize,
_gfp: u32,
) {
if wdev.is_null() || (buf.is_null() && len != 0) {
return;
}
update_event_state(wdev as usize, |state| {
state.mgmt_rx_freq = freq;
state.mgmt_rx_signal = sig_dbm;
state.mgmt_rx_len = len;
});
}
#[no_mangle]
pub extern "C" fn cfg80211_mgmt_tx_status(
wdev: *mut WirelessDev,
cookie: u64,
buf: *const u8,
len: usize,
ack: bool,
_gfp: u32,
) {
if wdev.is_null() || (buf.is_null() && len != 0) {
return;
}
update_event_state(wdev as usize, |state| {
state.mgmt_tx_cookie = cookie;
state.mgmt_tx_len = len;
state.mgmt_tx_ack = ack;
});
}
#[no_mangle]
pub extern "C" fn cfg80211_sched_scan_results(wiphy: *mut Wiphy, reqid: u64) {
if wiphy.is_null() {
return;
}
update_event_state(wiphy as usize, |state| state.sched_scan_reqid = reqid);
}
#[no_mangle]
pub extern "C" fn ieee80211_channel_to_frequency(chan: u32, band: u32) -> u32 {
match band {
NL80211_BAND_2GHZ => match chan {
14 => 2484,
1..=13 => 2407 + chan * 5,
_ => 0,
},
NL80211_BAND_5GHZ => 5000 + chan * 5,
NL80211_BAND_6GHZ => {
if chan == 2 {
5935
} else if chan >= 1 {
5950 + chan * 5
} else {
0
}
}
_ => 0,
}
}
#[no_mangle]
pub extern "C" fn ieee80211_frequency_to_channel(freq: u32) -> u32 {
match freq {
2484 => 14,
2412..=2472 => (freq - 2407) / 5,
5000..=5895 => (freq - 5000) / 5,
5935 => 2,
5955..=7115 => (freq - 5950) / 5,
_ => 0,
}
}
#[cfg(test)]
mod tests {
use super::*;
@@ -303,7 +581,7 @@ mod tests {
#[test]
fn scan_and_connect_lifecycle_updates_wireless_state() {
let name = CString::new("wlan%d").unwrap();
let name = CString::new("wlan%d").expect("valid test CString");
let dev = alloc_netdev_mqs(0, name.as_ptr().cast::<u8>(), 0, None, 1, 1);
assert!(!dev.is_null());
@@ -366,4 +644,78 @@ mod tests {
wiphy_free(wiphy);
free_netdev(dev);
}
#[test]
fn ieee80211_channel_creation_and_flags_work() {
let channel = Ieee80211Channel {
band: NL80211_BAND_5GHZ,
center_freq: 5180,
hw_value: 36,
flags: IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_80MHZ,
max_power: 20,
max_reg_power: 23,
max_antenna_gain: 6,
beacon_found: true,
};
assert_eq!(channel.band, NL80211_BAND_5GHZ);
assert_eq!(channel.center_freq, 5180);
assert_eq!(channel.hw_value, 36);
assert_ne!(channel.flags & IEEE80211_CHAN_NO_IR, 0);
assert_ne!(channel.flags & IEEE80211_CHAN_RADAR, 0);
assert_ne!(channel.flags & IEEE80211_CHAN_NO_80MHZ, 0);
assert_eq!(channel.flags & IEEE80211_CHAN_DISABLED, 0);
assert!(channel.beacon_found);
}
#[test]
fn cfg80211_events_and_channel_frequency_conversions_work() {
let name = CString::new("wlan%d").expect("valid test CString");
let dev = alloc_netdev_mqs(0, name.as_ptr().cast::<u8>(), 0, None, 1, 1);
assert!(!dev.is_null());
let wiphy = wiphy_new_nm(ptr::null(), 0, ptr::null());
assert!(!wiphy.is_null());
let mut wdev = WirelessDev {
wiphy,
netdev: dev.cast::<c_void>(),
iftype: 0,
scan_in_flight: false,
scan_aborted: false,
connecting: false,
connected: false,
locally_generated: false,
last_status: 0,
last_reason: 0,
has_bssid: false,
last_bssid: [0; 6],
};
unsafe { (*dev).ieee80211_ptr = (&mut wdev as *mut WirelessDev).cast::<c_void>() };
let sta = [6u8, 5, 4, 3, 2, 1];
cfg80211_new_sta(dev.cast::<c_void>(), sta.as_ptr(), ptr::null(), 0);
cfg80211_rx_mgmt(&mut wdev, 2412, -42, sta.as_ptr(), sta.len(), 0);
cfg80211_mgmt_tx_status(&mut wdev, 99, sta.as_ptr(), sta.len(), true, 0);
cfg80211_sched_scan_results(wiphy, 1234);
let events = WIRELESS_EVENTS.lock().expect("wireless events lock");
let wiphy_state = events.get(&(wiphy as usize)).expect("wiphy event state");
assert_eq!(wiphy_state.new_sta, Some(sta));
assert_eq!(wiphy_state.sched_scan_reqid, 1234);
let wdev_state = events
.get(&((&mut wdev as *mut WirelessDev) as usize))
.expect("wdev event state");
assert_eq!(wdev_state.mgmt_rx_freq, 2412);
assert_eq!(wdev_state.mgmt_rx_signal, -42);
assert_eq!(wdev_state.mgmt_tx_cookie, 99);
assert!(wdev_state.mgmt_tx_ack);
drop(events);
assert_eq!(ieee80211_channel_to_frequency(1, NL80211_BAND_2GHZ), 2412);
assert_eq!(ieee80211_channel_to_frequency(36, NL80211_BAND_5GHZ), 5180);
assert_eq!(ieee80211_frequency_to_channel(2484), 14);
assert_eq!(ieee80211_frequency_to_channel(5955), 1);
wiphy_free(wiphy);
free_netdev(dev);
}
}