milestone: desktop path Phases 1-5

Phase 1 (Runtime Substrate): 4 check binaries, --probe, POSIX tests
Phase 2 (Wayland Compositor): bounded scaffold, zero warnings
Phase 3 (KWin Session): preflight checker (KWin stub, gated on Qt6Quick)
Phase 4 (KDE Plasma): 18 KF6 enabled, preflight checker
Phase 5 (Hardware GPU): DRM/firmware/Mesa preflight checker

Build: zero warnings, all scripts syntax-clean. Oracle-verified.
This commit is contained in:
2026-04-29 09:54:06 +01:00
parent b23714f542
commit 8acc73d774
508 changed files with 76526 additions and 396 deletions
@@ -0,0 +1 @@
/target
@@ -0,0 +1,20 @@
[package]
name = "ahcid"
description = "AHCI (SATA standard) driver"
version = "0.1.0"
edition = "2021"
[dependencies]
byteorder = "1.2"
log.workspace = true
redox_syscall = { workspace = true, features = ["std"] }
common = { path = "../../common" }
daemon = { path = "../../../daemon" }
driver-block = { path = "../driver-block" }
pcid = { path = "../../pcid" }
libredox.workspace = true
redox_event.workspace = true
[lints]
workspace = true
@@ -0,0 +1,185 @@
use std::convert::TryInto;
use std::ptr;
use syscall::error::Result;
use common::dma::Dma;
use super::hba::{HbaCmdHeader, HbaCmdTable, HbaPort};
use super::Disk;
enum BufferKind<'a> {
Read(&'a mut [u8]),
Write(&'a [u8]),
}
struct Request {
address: usize,
total_sectors: usize,
sector: usize,
running_opt: Option<(u32, usize)>,
}
pub struct DiskATA {
id: usize,
port: &'static mut HbaPort,
size: u64,
request_opt: Option<Request>,
clb: Dma<[HbaCmdHeader; 32]>,
ctbas: [Dma<HbaCmdTable>; 32],
_fb: Dma<[u8; 256]>,
buf: Dma<[u8; 256 * 512]>,
}
impl DiskATA {
pub fn new(id: usize, port: &'static mut HbaPort) -> Result<Self> {
let mut clb = unsafe { Dma::zeroed()?.assume_init() };
let mut ctbas: [_; 32] = (0..32)
.map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() }))
.collect::<Result<Vec<_>>>()?
.try_into()
.unwrap_or_else(|_| unreachable!());
let mut fb = unsafe { Dma::zeroed()?.assume_init() };
let buf = unsafe { Dma::zeroed()?.assume_init() };
port.init(&mut clb, &mut ctbas, &mut fb)?;
let size = unsafe { port.identify(&mut clb, &mut ctbas).unwrap_or(0) };
Ok(DiskATA {
id: id,
port: port,
size: size,
request_opt: None,
clb: clb,
ctbas,
_fb: fb,
buf: buf,
})
}
fn request(&mut self, block: u64, mut buffer_kind: BufferKind) -> Result<Option<usize>> {
let (write, address, total_sectors) = match buffer_kind {
BufferKind::Read(ref buffer) => (false, buffer.as_ptr() as usize, buffer.len() / 512),
BufferKind::Write(ref buffer) => (true, buffer.as_ptr() as usize, buffer.len() / 512),
};
loop {
let mut request = match self.request_opt.take() {
Some(request) => {
if address == request.address && total_sectors == request.total_sectors {
// Keep servicing current request
request
} else {
// Have to wait for another request to finish
self.request_opt = Some(request);
return Ok(None);
}
}
None => {
// Create new request
Request {
address,
total_sectors,
sector: 0,
running_opt: None,
}
}
};
// Finish a previously running request
if let Some(running) = request.running_opt.take() {
if self.port.ata_running(running.0) {
// Continue waiting for request
request.running_opt = Some(running);
self.request_opt = Some(request);
return Ok(None);
}
self.port.ata_stop(running.0)?;
if let BufferKind::Read(ref mut buffer) = buffer_kind {
unsafe {
ptr::copy(
self.buf.as_ptr(),
buffer.as_mut_ptr().add(request.sector * 512),
running.1 * 512,
);
}
}
request.sector += running.1;
}
if request.sector < request.total_sectors {
// Start a new request
let sectors = if request.total_sectors - request.sector >= 255 {
255
} else {
request.total_sectors - request.sector
};
if let BufferKind::Write(ref buffer) = buffer_kind {
unsafe {
ptr::copy(
buffer.as_ptr().add(request.sector * 512),
self.buf.as_mut_ptr(),
sectors * 512,
);
}
}
if let Some(slot) = self.port.ata_dma(
block + request.sector as u64,
sectors,
write,
&mut self.clb,
&mut self.ctbas,
&mut self.buf,
)? {
request.running_opt = Some((slot, sectors));
}
self.request_opt = Some(request);
// TODO: support async internally
return Ok(None);
} else {
// Done
return Ok(Some(request.sector * 512));
}
}
}
}
impl Disk for DiskATA {
fn block_size(&self) -> u32 {
512
}
fn size(&self) -> u64 {
self.size
}
async fn read(&mut self, block: u64, buffer: &mut [u8]) -> Result<usize> {
//TODO: FIGURE OUT WHY INTERRUPTS CAUSE HANGS
loop {
match self.request(block, BufferKind::Read(buffer))? {
Some(count) => return Ok(count),
None => std::thread::yield_now(),
}
}
}
async fn write(&mut self, block: u64, buffer: &[u8]) -> Result<usize> {
//TODO: FIGURE OUT WHY INTERRUPTS CAUSE HANGS
loop {
match self.request(block, BufferKind::Write(buffer))? {
Some(count) => return Ok(count),
None => std::thread::yield_now(),
}
}
}
}
@@ -0,0 +1,148 @@
#![allow(dead_code)]
use std::convert::TryInto;
use std::ptr;
use byteorder::{BigEndian, ByteOrder};
use syscall::error::{Error, Result, EBADF};
use common::dma::Dma;
use super::hba::{HbaCmdHeader, HbaCmdTable, HbaPort};
use super::Disk;
const SCSI_READ_CAPACITY: u8 = 0x25;
const SCSI_READ10: u8 = 0x28;
pub struct DiskATAPI {
id: usize,
port: &'static mut HbaPort,
size: u64,
clb: Dma<[HbaCmdHeader; 32]>,
ctbas: [Dma<HbaCmdTable>; 32],
_fb: Dma<[u8; 256]>,
// Just using the same buffer size as DiskATA
// Although the sector size is different (and varies)
buf: Dma<[u8; 256 * 512]>,
blk_count: u32,
blk_size: u32,
}
impl DiskATAPI {
pub fn new(id: usize, port: &'static mut HbaPort) -> Result<Self> {
let mut clb = unsafe { Dma::zeroed()?.assume_init() };
let mut ctbas: [_; 32] = (0..32)
.map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() }))
.collect::<Result<Vec<_>>>()?
.try_into()
.unwrap_or_else(|_| unreachable!());
let mut fb = unsafe { Dma::zeroed()?.assume_init() };
let mut buf = unsafe { Dma::zeroed()?.assume_init() };
port.init(&mut clb, &mut ctbas, &mut fb)?;
let size = unsafe { port.identify_packet(&mut clb, &mut ctbas).unwrap_or(0) };
let mut cmd = [0; 16];
cmd[0] = SCSI_READ_CAPACITY;
port.atapi_dma(&cmd, 8, &mut clb, &mut ctbas, &mut buf)?;
// Instead of a count, contains number of last LBA, so add 1
let blk_count = BigEndian::read_u32(&buf[0..4]) + 1;
let blk_size = BigEndian::read_u32(&buf[4..8]);
Ok(DiskATAPI {
id,
port,
size,
clb,
ctbas,
_fb: fb,
buf,
blk_count,
blk_size,
})
}
}
impl Disk for DiskATAPI {
fn block_size(&self) -> u32 {
self.blk_size
}
fn size(&self) -> u64 {
u64::from(self.blk_count) * u64::from(self.blk_size)
}
async fn read(&mut self, block: u64, buffer: &mut [u8]) -> Result<usize> {
// TODO: Handle audio CDs, which use special READ CD command
let blk_len = self.blk_size;
let sectors = buffer.len() as u32 / blk_len;
fn read10_cmd(block: u32, count: u16) -> [u8; 16] {
let mut cmd = [0; 16];
cmd[0] = SCSI_READ10;
BigEndian::write_u32(&mut cmd[2..6], block as u32);
BigEndian::write_u16(&mut cmd[7..9], count as u16);
cmd
}
let mut sector = 0;
let buf_len = (256 * 512) / blk_len;
let buf_size = buf_len * blk_len;
while sectors - sector >= buf_len {
let cmd = read10_cmd(block as u32 + sector, buf_len as u16);
self.port.atapi_dma(
&cmd,
buf_size,
&mut self.clb,
&mut self.ctbas,
&mut self.buf,
)?;
unsafe {
ptr::copy(
self.buf.as_ptr(),
buffer
.as_mut_ptr()
.offset(sector as isize * blk_len as isize),
buf_size as usize,
);
}
sector += blk_len;
}
if sector < sectors {
let cmd = read10_cmd(block as u32 + sector, (sectors - sector) as u16);
self.port.atapi_dma(
&cmd,
buf_size,
&mut self.clb,
&mut self.ctbas,
&mut self.buf,
)?;
unsafe {
ptr::copy(
self.buf.as_ptr(),
buffer
.as_mut_ptr()
.offset(sector as isize * blk_len as isize),
((sectors - sector) * blk_len) as usize,
);
}
sector += sectors - sector;
}
Ok((sector * blk_len) as usize)
}
async fn write(&mut self, _block: u64, _buffer: &[u8]) -> Result<usize> {
Err(Error::new(EBADF)) // TODO: Implement writing
}
}
@@ -0,0 +1,157 @@
use common::io::Mmio;
#[repr(u8)]
pub enum FisType {
/// Register FIS - host to device
RegH2D = 0x27,
/// Register FIS - device to host
RegD2H = 0x34,
/// DMA activate FIS - device to host
DmaAct = 0x39,
/// DMA setup FIS - bidirectional
DmaSetup = 0x41,
/// Data FIS - bidirectional
Data = 0x46,
/// BIST activate FIS - bidirectional
Bist = 0x58,
/// PIO setup FIS - device to host
PioSetup = 0x5F,
/// Set device bits FIS - device to host
DevBits = 0xA1,
}
#[repr(C, packed)]
pub struct FisRegH2D {
// DWORD 0
pub fis_type: Mmio<u8>, // FIS_TYPE_REG_H2D
pub pm: Mmio<u8>, // Port multiplier, 1: Command, 0: Control
pub command: Mmio<u8>, // Command register
pub featurel: Mmio<u8>, // Feature register, 7:0
// DWORD 1
pub lba0: Mmio<u8>, // LBA low register, 7:0
pub lba1: Mmio<u8>, // LBA mid register, 15:8
pub lba2: Mmio<u8>, // LBA high register, 23:16
pub device: Mmio<u8>, // Device register
// DWORD 2
pub lba3: Mmio<u8>, // LBA register, 31:24
pub lba4: Mmio<u8>, // LBA register, 39:32
pub lba5: Mmio<u8>, // LBA register, 47:40
pub featureh: Mmio<u8>, // Feature register, 15:8
// DWORD 3
pub countl: Mmio<u8>, // Count register, 7:0
pub counth: Mmio<u8>, // Count register, 15:8
pub icc: Mmio<u8>, // Isochronous command completion
pub control: Mmio<u8>, // Control register
// DWORD 4
pub rsv1: [Mmio<u8>; 4], // Reserved
}
#[repr(C, packed)]
pub struct FisRegD2H {
// DWORD 0
pub fis_type: Mmio<u8>, // FIS_TYPE_REG_D2H
pub pm: Mmio<u8>, // Port multiplier, Interrupt bit: 2
pub status: Mmio<u8>, // Status register
pub error: Mmio<u8>, // Error register
// DWORD 1
pub lba0: Mmio<u8>, // LBA low register, 7:0
pub lba1: Mmio<u8>, // LBA mid register, 15:8
pub lba2: Mmio<u8>, // LBA high register, 23:16
pub device: Mmio<u8>, // Device register
// DWORD 2
pub lba3: Mmio<u8>, // LBA register, 31:24
pub lba4: Mmio<u8>, // LBA register, 39:32
pub lba5: Mmio<u8>, // LBA register, 47:40
pub rsv2: Mmio<u8>, // Reserved
// DWORD 3
pub countl: Mmio<u8>, // Count register, 7:0
pub counth: Mmio<u8>, // Count register, 15:8
pub rsv3: [Mmio<u8>; 2], // Reserved
// DWORD 4
pub rsv4: [Mmio<u8>; 4], // Reserved
}
#[repr(C, packed)]
pub struct FisData {
// DWORD 0
pub fis_type: Mmio<u8>, // FIS_TYPE_DATA
pub pm: Mmio<u8>, // Port multiplier
pub rsv1: [Mmio<u8>; 2], // Reserved
// DWORD 1 ~ N
pub data: [Mmio<u8>; 252], // Payload
}
#[repr(C, packed)]
pub struct FisPioSetup {
// DWORD 0
pub fis_type: Mmio<u8>, // FIS_TYPE_PIO_SETUP
pub pm: Mmio<u8>, // Port multiplier, direction: 4 - device to host, interrupt: 2
pub status: Mmio<u8>, // Status register
pub error: Mmio<u8>, // Error register
// DWORD 1
pub lba0: Mmio<u8>, // LBA low register, 7:0
pub lba1: Mmio<u8>, // LBA mid register, 15:8
pub lba2: Mmio<u8>, // LBA high register, 23:16
pub device: Mmio<u8>, // Device register
// DWORD 2
pub lba3: Mmio<u8>, // LBA register, 31:24
pub lba4: Mmio<u8>, // LBA register, 39:32
pub lba5: Mmio<u8>, // LBA register, 47:40
pub rsv2: Mmio<u8>, // Reserved
// DWORD 3
pub countl: Mmio<u8>, // Count register, 7:0
pub counth: Mmio<u8>, // Count register, 15:8
pub rsv3: Mmio<u8>, // Reserved
pub e_status: Mmio<u8>, // New value of status register
// DWORD 4
pub tc: Mmio<u16>, // Transfer count
pub rsv4: [Mmio<u8>; 2], // Reserved
}
#[repr(C, packed)]
pub struct FisDmaSetup {
// DWORD 0
pub fis_type: Mmio<u8>, // FIS_TYPE_DMA_SETUP
pub pm: Mmio<u8>, // Port multiplier, direction: 4 - device to host, interrupt: 2, auto-activate: 1
pub rsv1: [Mmio<u8>; 2], // Reserved
// DWORD 1&2
/* DMA Buffer Identifier. Used to Identify DMA buffer in host memory. SATA Spec says host specific and not in Spec. Trying AHCI spec might work. */
pub dma_buffer_id_low: Mmio<u32>,
pub dma_buffer_id_high: Mmio<u32>,
// DWORD 3
pub rsv3: Mmio<u32>, // More reserved
// DWORD 4
pub dma_buffer_offset: Mmio<u32>, // Byte offset into buffer. First 2 bits must be 0
// DWORD 5
pub transfer_count: Mmio<u32>, // Number of bytes to transfer. Bit 0 must be 0
// DWORD 6
pub rsv6: Mmio<u32>, // Reserved
}
@@ -0,0 +1,549 @@
use log::{debug, error, info, trace};
use std::mem::size_of;
use std::ops::DerefMut;
use std::time::Duration;
use std::{ptr, u32};
use common::dma::Dma;
use common::io::{Io, Mmio};
use common::timeout::Timeout;
use syscall::error::{Error, Result, EIO};
use super::fis::{FisRegH2D, FisType};
const ATA_CMD_READ_DMA_EXT: u8 = 0x25;
const ATA_CMD_WRITE_DMA_EXT: u8 = 0x35;
const ATA_CMD_IDENTIFY: u8 = 0xEC;
const ATA_CMD_IDENTIFY_PACKET: u8 = 0xA1;
const ATA_CMD_PACKET: u8 = 0xA0;
const ATA_DEV_BUSY: u8 = 0x80;
const ATA_DEV_DRQ: u8 = 0x08;
const HBA_PORT_CMD_CR: u32 = 1 << 15;
const HBA_PORT_CMD_FR: u32 = 1 << 14;
const HBA_PORT_CMD_FRE: u32 = 1 << 4;
const HBA_PORT_CMD_ST: u32 = 1;
const HBA_PORT_IS_ERR: u32 = 1 << 30 | 1 << 29 | 1 << 28 | 1 << 27;
const HBA_SSTS_PRESENT: u32 = 0x3;
const HBA_SIG_ATA: u32 = 0x00000101;
const HBA_SIG_ATAPI: u32 = 0xEB140101;
const HBA_SIG_PM: u32 = 0x96690101;
const HBA_SIG_SEMB: u32 = 0xC33C0101;
const TIMEOUT: Duration = Duration::new(5, 0);
#[derive(Debug)]
pub enum HbaPortType {
None,
Unknown(u32),
SATA,
SATAPI,
PM,
SEMB,
}
#[repr(C, packed)]
pub struct HbaPort {
pub clb: [Mmio<u32>; 2], // 0x00, command list base address, 1K-byte aligned
pub fb: [Mmio<u32>; 2], // 0x08, FIS base address, 256-byte aligned
pub is: Mmio<u32>, // 0x10, interrupt status
pub ie: Mmio<u32>, // 0x14, interrupt enable
pub cmd: Mmio<u32>, // 0x18, command and status
pub _rsv0: Mmio<u32>, // 0x1C, Reserved
pub tfd: Mmio<u32>, // 0x20, task file data
pub sig: Mmio<u32>, // 0x24, signature
pub ssts: Mmio<u32>, // 0x28, SATA status (SCR0:SStatus)
pub sctl: Mmio<u32>, // 0x2C, SATA control (SCR2:SControl)
pub serr: Mmio<u32>, // 0x30, SATA error (SCR1:SError)
pub sact: Mmio<u32>, // 0x34, SATA active (SCR3:SActive)
pub ci: Mmio<u32>, // 0x38, command issue
pub sntf: Mmio<u32>, // 0x3C, SATA notification (SCR4:SNotification)
pub fbs: Mmio<u32>, // 0x40, FIS-based switch control
pub _rsv1: [Mmio<u32>; 11], // 0x44 ~ 0x6F, Reserved
pub vendor: [Mmio<u32>; 4], // 0x70 ~ 0x7F, vendor specific
}
impl HbaPort {
pub fn probe(&self) -> HbaPortType {
if self.ssts.readf(HBA_SSTS_PRESENT) {
let sig = self.sig.read();
match sig {
HBA_SIG_ATA => HbaPortType::SATA,
HBA_SIG_ATAPI => HbaPortType::SATAPI,
HBA_SIG_PM => HbaPortType::PM,
HBA_SIG_SEMB => HbaPortType::SEMB,
_ => HbaPortType::Unknown(sig),
}
} else {
HbaPortType::None
}
}
pub fn start(&mut self) -> Result<()> {
let timeout = Timeout::new(TIMEOUT);
while self.cmd.readf(HBA_PORT_CMD_CR) {
timeout.run().map_err(|()| {
log::error!("HBA start timed out");
Error::new(EIO)
})?;
}
self.cmd.writef(HBA_PORT_CMD_FRE | HBA_PORT_CMD_ST, true);
Ok(())
}
pub fn stop(&mut self) -> Result<()> {
self.cmd.writef(HBA_PORT_CMD_ST, false);
let timeout = Timeout::new(TIMEOUT);
while self.cmd.readf(HBA_PORT_CMD_FR | HBA_PORT_CMD_CR) {
timeout.run().map_err(|()| {
log::error!("HBA stop timed out");
Error::new(EIO)
})?;
}
self.cmd.writef(HBA_PORT_CMD_FRE, false);
Ok(())
}
pub fn slot(&self) -> Option<u32> {
let slots = self.sact.read() | self.ci.read();
for i in 0..32 {
if slots & 1 << i == 0 {
return Some(i);
}
}
None
}
pub fn init(
&mut self,
clb: &mut Dma<[HbaCmdHeader; 32]>,
ctbas: &mut [Dma<HbaCmdTable>; 32],
fb: &mut Dma<[u8; 256]>,
) -> Result<()> {
self.stop()?;
for i in 0..32 {
let cmdheader = &mut clb[i];
cmdheader.ctba_low.write(ctbas[i].physical() as u32);
cmdheader
.ctba_high
.write((ctbas[i].physical() as u64 >> 32) as u32);
cmdheader.prdtl.write(0);
}
self.clb[0].write(clb.physical() as u32);
self.clb[1].write(((clb.physical() as u64) >> 32) as u32);
self.fb[0].write(fb.physical() as u32);
self.fb[1].write(((fb.physical() as u64) >> 32) as u32);
let is = self.is.read();
self.is.write(is);
self.ie.write(0b10111);
let serr = self.serr.read();
self.serr.write(serr);
// Disable power management
let sctl = self.sctl.read();
self.sctl.write(sctl | 7 << 8);
// Power on and spin up device
self.cmd.writef(1 << 2 | 1 << 1, true);
debug!("AHCI init {:X}", self.cmd.read());
Ok(())
}
pub unsafe fn identify(
&mut self,
clb: &mut Dma<[HbaCmdHeader; 32]>,
ctbas: &mut [Dma<HbaCmdTable>; 32],
) -> Result<u64> {
self.identify_inner(ATA_CMD_IDENTIFY, clb, ctbas)
}
pub unsafe fn identify_packet(
&mut self,
clb: &mut Dma<[HbaCmdHeader; 32]>,
ctbas: &mut [Dma<HbaCmdTable>; 32],
) -> Result<u64> {
self.identify_inner(ATA_CMD_IDENTIFY_PACKET, clb, ctbas)
}
// Shared between identify() and identify_packet()
unsafe fn identify_inner(
&mut self,
cmd: u8,
clb: &mut Dma<[HbaCmdHeader; 32]>,
ctbas: &mut [Dma<HbaCmdTable>; 32],
) -> Result<u64> {
let dest: Dma<[u16; 256]> = Dma::new([0; 256]).unwrap();
let slot = self
.ata_start(clb, ctbas, |cmdheader, cmdfis, prdt_entries, _acmd| {
cmdheader.prdtl.write(1);
let prdt_entry = &mut prdt_entries[0];
prdt_entry.dba_low.write(dest.physical() as u32);
prdt_entry
.dba_high
.write((dest.physical() as u64 >> 32) as u32);
prdt_entry.dbc.write(512 | 1);
cmdfis.pm.write(1 << 7);
cmdfis.command.write(cmd);
cmdfis.device.write(0);
cmdfis.countl.write(1);
cmdfis.counth.write(0);
})?
.ok_or(Error::new(EIO))?;
self.ata_stop(slot)?;
let mut serial = String::new();
for word in 10..20 {
let d = dest[word];
let a = ((d >> 8) as u8) as char;
if a != '\0' {
serial.push(a);
}
let b = (d as u8) as char;
if b != '\0' {
serial.push(b);
}
}
let mut firmware = String::new();
for word in 23..27 {
let d = dest[word];
let a = ((d >> 8) as u8) as char;
if a != '\0' {
firmware.push(a);
}
let b = (d as u8) as char;
if b != '\0' {
firmware.push(b);
}
}
let mut model = String::new();
for word in 27..47 {
let d = dest[word];
let a = ((d >> 8) as u8) as char;
if a != '\0' {
model.push(a);
}
let b = (d as u8) as char;
if b != '\0' {
model.push(b);
}
}
let mut sectors = (dest[100] as u64)
| ((dest[101] as u64) << 16)
| ((dest[102] as u64) << 32)
| ((dest[103] as u64) << 48);
let lba_bits = if sectors == 0 {
sectors = (dest[60] as u64) | ((dest[61] as u64) << 16);
28
} else {
48
};
info!(
"Serial: {} Firmware: {} Model: {} {}-bit LBA Size: {} MB",
serial.trim(),
firmware.trim(),
model.trim(),
lba_bits,
sectors / 2048
);
Ok(sectors * 512)
}
pub fn ata_dma(
&mut self,
block: u64,
sectors: usize,
write: bool,
clb: &mut Dma<[HbaCmdHeader; 32]>,
ctbas: &mut [Dma<HbaCmdTable>; 32],
buf: &mut Dma<[u8; 256 * 512]>,
) -> Result<Option<u32>> {
trace!(
"AHCI {:X} DMA BLOCK: {:X} SECTORS: {} WRITE: {}",
(self as *mut HbaPort) as usize,
block,
sectors,
write
);
assert!(sectors > 0 && sectors < 256);
self.ata_start(clb, ctbas, |cmdheader, cmdfis, prdt_entries, _acmd| {
if write {
let cfl = cmdheader.cfl.read();
cmdheader.cfl.write(cfl | 1 << 7 | 1 << 6)
}
cmdheader.prdtl.write(1);
let prdt_entry = &mut prdt_entries[0];
prdt_entry.dba_low.write(buf.physical() as u32);
prdt_entry
.dba_high
.write((buf.physical() as u64 >> 32) as u32);
prdt_entry.dbc.write(((sectors * 512) as u32) | 1);
cmdfis.pm.write(1 << 7);
if write {
cmdfis.command.write(ATA_CMD_WRITE_DMA_EXT);
} else {
cmdfis.command.write(ATA_CMD_READ_DMA_EXT);
}
cmdfis.lba0.write(block as u8);
cmdfis.lba1.write((block >> 8) as u8);
cmdfis.lba2.write((block >> 16) as u8);
cmdfis.device.write(1 << 6);
cmdfis.lba3.write((block >> 24) as u8);
cmdfis.lba4.write((block >> 32) as u8);
cmdfis.lba5.write((block >> 40) as u8);
cmdfis.countl.write(sectors as u8);
cmdfis.counth.write((sectors >> 8) as u8);
})
}
/// Send ATAPI packet
pub fn atapi_dma(
&mut self,
cmd: &[u8; 16],
size: u32,
clb: &mut Dma<[HbaCmdHeader; 32]>,
ctbas: &mut [Dma<HbaCmdTable>; 32],
buf: &mut Dma<[u8; 256 * 512]>,
) -> Result<()> {
let slot = self
.ata_start(clb, ctbas, |cmdheader, cmdfis, prdt_entries, acmd| {
let cfl = cmdheader.cfl.read();
cmdheader.cfl.write(cfl | 1 << 5);
cmdheader.prdtl.write(1);
let prdt_entry = &mut prdt_entries[0];
prdt_entry.dba_low.write(buf.physical() as u32);
prdt_entry
.dba_high
.write((buf.physical() as u64 >> 32) as u32);
prdt_entry.dbc.write(size - 1);
cmdfis.pm.write(1 << 7);
cmdfis.command.write(ATA_CMD_PACKET);
cmdfis.device.write(0);
cmdfis.lba1.write(0);
cmdfis.lba2.write(0);
cmdfis.featurel.write(1);
cmdfis.featureh.write(0);
unsafe { ptr::write_volatile(acmd.as_mut_ptr() as *mut [u8; 16], *cmd) };
})?
.ok_or(Error::new(EIO))?;
self.ata_stop(slot)
}
pub fn ata_start<F>(
&mut self,
clb: &mut Dma<[HbaCmdHeader; 32]>,
ctbas: &mut [Dma<HbaCmdTable>; 32],
callback: F,
) -> Result<Option<u32>>
where
F: FnOnce(
&mut HbaCmdHeader,
&mut FisRegH2D,
&mut [HbaPrdtEntry; PRDT_ENTRIES],
&mut [Mmio<u8>; 16],
),
{
//TODO: Should probably remove
self.is.write(u32::MAX);
let Some(slot) = self.slot() else {
return Ok(None);
};
{
let cmdheader = &mut clb[slot as usize];
cmdheader
.cfl
.write((size_of::<FisRegH2D>() / size_of::<u32>()) as u8);
let cmdtbl = &mut ctbas[slot as usize];
unsafe {
ptr::write_bytes(
cmdtbl.deref_mut() as *mut HbaCmdTable as *mut u8,
0,
size_of::<HbaCmdTable>(),
);
}
let cmdfis = unsafe { &mut *(cmdtbl.cfis.as_mut_ptr() as *mut FisRegH2D) };
cmdfis.fis_type.write(FisType::RegH2D as u8);
let prdt_entry = unsafe { &mut *(&mut cmdtbl.prdt_entry as *mut _) };
let acmd = unsafe { &mut *(&mut cmdtbl.acmd as *mut _) };
callback(cmdheader, cmdfis, prdt_entry, acmd)
}
let timeout = Timeout::new(TIMEOUT);
while self.tfd.readf((ATA_DEV_BUSY | ATA_DEV_DRQ) as u32) {
timeout.run().map_err(|()| {
log::error!("HBA ata_start timeout");
Error::new(EIO)
})?;
}
self.ci.writef(1 << slot, true);
//TODO: Should probably remove
self.start()?;
Ok(Some(slot))
}
pub fn ata_running(&self, slot: u32) -> bool {
(self.ci.readf(1 << slot) || self.tfd.readf(0x80)) && self.is.read() & HBA_PORT_IS_ERR == 0
}
pub fn ata_stop(&mut self, slot: u32) -> Result<()> {
let timeout = Timeout::new(TIMEOUT);
while self.ata_running(slot) {
timeout.run().map_err(|()| {
log::error!("HBA ata_stop timeout");
Error::new(EIO)
})?;
}
self.stop()?;
if self.is.read() & HBA_PORT_IS_ERR != 0 {
let (is, ie, cmd, tfd, ssts, sctl, serr, sact, ci, sntf, fbs) = (
self.is.read(),
self.ie.read(),
self.cmd.read(),
self.tfd.read(),
self.ssts.read(),
self.sctl.read(),
self.serr.read(),
self.sact.read(),
self.ci.read(),
self.sntf.read(),
self.fbs.read(),
);
error!("IS {:X} IE {:X} CMD {:X} TFD {:X}", is, ie, cmd, tfd);
error!(
"SSTS {:X} SCTL {:X} SERR {:X} SACT {:X}",
ssts, sctl, serr, sact
);
error!("CI {:X} SNTF {:X} FBS {:X}", ci, sntf, fbs);
self.is.write(u32::MAX);
Err(Error::new(EIO))
} else {
Ok(())
}
}
}
#[repr(C, packed)]
pub struct HbaMem {
pub cap: Mmio<u32>, // 0x00, Host capability
pub ghc: Mmio<u32>, // 0x04, Global host control
pub is: Mmio<u32>, // 0x08, Interrupt status
pub pi: Mmio<u32>, // 0x0C, Port implemented
pub vs: Mmio<u32>, // 0x10, Version
pub ccc_ctl: Mmio<u32>, // 0x14, Command completion coalescing control
pub ccc_pts: Mmio<u32>, // 0x18, Command completion coalescing ports
pub em_loc: Mmio<u32>, // 0x1C, Enclosure management location
pub em_ctl: Mmio<u32>, // 0x20, Enclosure management control
pub cap2: Mmio<u32>, // 0x24, Host capabilities extended
pub bohc: Mmio<u32>, // 0x28, BIOS/OS handoff control and status
pub _rsv: [Mmio<u8>; 116], // 0x2C - 0x9F, Reserved
pub vendor: [Mmio<u8>; 96], // 0xA0 - 0xFF, Vendor specific registers
pub ports: [HbaPort; 32], // 0x100 - 0x10FF, Port control registers
}
impl HbaMem {
pub fn init(&mut self) {
/*
self.ghc.writef(1, true);
while self.ghc.readf(1) {
pause();
}
*/
self.ghc.write(1 << 31 | 1 << 1);
debug!(
"AHCI CAP {:X} GHC {:X} IS {:X} PI {:X} VS {:X} CAP2 {:X} BOHC {:X}",
self.cap.read(),
self.ghc.read(),
self.is.read(),
self.pi.read(),
self.vs.read(),
self.cap2.read(),
self.bohc.read()
);
}
}
#[repr(C, packed)]
pub struct HbaPrdtEntry {
dba_low: Mmio<u32>, // Data base address (low
dba_high: Mmio<u32>, // Data base address (high)
_rsv0: Mmio<u32>, // Reserved
dbc: Mmio<u32>, // Byte count, 4M max, interrupt = 1
}
#[repr(C, packed)]
pub struct HbaCmdTable {
// 0x00
cfis: [Mmio<u8>; 64], // Command FIS
// 0x40
acmd: [Mmio<u8>; 16], // ATAPI command, 12 or 16 bytes
// 0x50
_rsv: [Mmio<u8>; 48], // Reserved
// 0x80
prdt_entry: [HbaPrdtEntry; PRDT_ENTRIES], // Physical region descriptor table entries, 0 ~ 65535
}
const CMD_TBL_SIZE: usize = 256 * 4096;
const PRDT_ENTRIES: usize = (CMD_TBL_SIZE - 128) / size_of::<HbaPrdtEntry>();
#[repr(C, packed)]
pub struct HbaCmdHeader {
// DW0
cfl: Mmio<u8>, /* Command FIS length in DWORDS, 2 ~ 16, atapi: 4, write - host to device: 2, prefetchable: 1 */
_pm: Mmio<u8>, // Reset - 0x80, bist: 0x40, clear busy on ok: 0x20, port multiplier
prdtl: Mmio<u16>, // Physical region descriptor table length in entries
// DW1
_prdbc: Mmio<u32>, // Physical region descriptor byte count transferred
// DW2, 3
ctba_low: Mmio<u32>, // Command table descriptor base address (low)
ctba_high: Mmio<u32>, // Command table descriptor base address (high)
// DW4 - 7
_rsv1: [Mmio<u32>; 4], // Reserved
}
@@ -0,0 +1,79 @@
use common::io::Io;
use driver_block::Disk;
use log::{error, info};
use self::disk_ata::DiskATA;
use self::disk_atapi::DiskATAPI;
use self::hba::{HbaMem, HbaPortType};
pub mod disk_ata;
pub mod disk_atapi;
pub mod fis;
pub mod hba;
pub enum AnyDisk {
Ata(DiskATA),
Atapi(DiskATAPI),
}
impl Disk for AnyDisk {
fn block_size(&self) -> u32 {
match self {
Self::Ata(a) => a.block_size(),
Self::Atapi(a) => a.block_size(),
}
}
fn size(&self) -> u64 {
match self {
Self::Ata(a) => a.size(),
Self::Atapi(a) => a.size(),
}
}
async fn read(&mut self, base: u64, buffer: &mut [u8]) -> syscall::Result<usize> {
match self {
Self::Ata(a) => a.read(base, buffer).await,
Self::Atapi(a) => a.read(base, buffer).await,
}
}
async fn write(&mut self, base: u64, buffer: &[u8]) -> syscall::Result<usize> {
match self {
Self::Ata(a) => a.write(base, buffer).await,
Self::Atapi(a) => a.write(base, buffer).await,
}
}
}
pub fn disks(base: usize, name: &str) -> (&'static mut HbaMem, Vec<AnyDisk>) {
let hba_mem = unsafe { &mut *(base as *mut HbaMem) };
hba_mem.init();
let pi = hba_mem.pi.read();
let disks: Vec<AnyDisk> = (0..hba_mem.ports.len())
.filter(|&i| pi & 1 << i as i32 == 1 << i as i32)
.filter_map(|i| {
let port = unsafe { &mut *hba_mem.ports.as_mut_ptr().add(i) };
let port_type = port.probe();
info!("{}-{}: {:?}", name, i, port_type);
let disk: Option<AnyDisk> = match port_type {
HbaPortType::SATA => match DiskATA::new(i, port) {
Ok(disk) => Some(AnyDisk::Ata(disk)),
Err(err) => {
error!("{}: {}", i, err);
None
}
},
HbaPortType::SATAPI => match DiskATAPI::new(i, port) {
Ok(disk) => Some(AnyDisk::Atapi(disk)),
Err(err) => {
error!("{}: {}", i, err);
None
}
},
_ => None,
};
disk
})
.collect();
(hba_mem, disks)
}
@@ -0,0 +1,109 @@
// #![cfg_attr(target_arch = "aarch64", feature(stdsimd))] // Required for yield instruction
use std::io::{Read, Write};
use std::os::fd::AsRawFd;
use std::usize;
use common::io::Io;
use driver_block::{DiskScheme, ExecutorTrait, FuturesExecutor};
use event::{EventFlags, RawEventQueue};
use pcid_interface::PciFunctionHandle;
use log::{error, info};
pub mod ahci;
fn main() {
pcid_interface::pci_daemon(daemon);
}
fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
let pci_config = pcid_handle.config();
let mut name = pci_config.func.name();
name.push_str("_ahci");
let irq = pci_config
.func
.legacy_interrupt_line
.expect("ahcid: no legacy interrupts supported");
common::setup_logging(
"disk",
"pci",
&name,
common::output_level(),
common::file_level(),
);
info!("AHCI {}", pci_config.func.display());
let address = unsafe { pcid_handle.map_bar(5) }.ptr.as_ptr() as usize;
{
let (hba_mem, disks) = ahci::disks(address as usize, &name);
let scheme_name = format!("disk.{}", name);
let mut scheme = DiskScheme::new(
Some(daemon),
scheme_name,
disks
.into_iter()
.enumerate()
.map(|(i, disk)| (i as u32, disk))
.collect(),
&FuturesExecutor,
);
let mut irq_file = irq.irq_handle("ahcid");
let irq_fd = irq_file.as_raw_fd() as usize;
let event_queue = RawEventQueue::new().expect("ahcid: failed to create event queue");
libredox::call::setrens(0, 0).expect("ahcid: failed to enter null namespace");
event_queue
.subscribe(scheme.event_handle().raw(), 1, EventFlags::READ)
.expect("ahcid: failed to event scheme socket");
event_queue
.subscribe(irq_fd, 1, EventFlags::READ)
.expect("ahcid: failed to event irq scheme");
for event in event_queue {
let event = event.unwrap();
if event.fd == scheme.event_handle().raw() {
FuturesExecutor.block_on(scheme.tick()).unwrap();
} else if event.fd == irq_fd {
let mut irq = [0; 8];
if irq_file
.read(&mut irq)
.expect("ahcid: failed to read irq file")
>= irq.len()
{
let is = hba_mem.is.read();
if is > 0 {
let pi = hba_mem.pi.read();
let pi_is = pi & is;
for i in 0..hba_mem.ports.len() {
if pi_is & 1 << i > 0 {
let port = &mut hba_mem.ports[i];
let is = port.is.read();
port.is.write(is);
}
}
hba_mem.is.write(is);
irq_file
.write(&irq)
.expect("ahcid: failed to write irq file");
FuturesExecutor.block_on(scheme.tick()).unwrap();
}
}
} else {
error!("Unknown event {}", event.fd);
}
}
}
std::process::exit(0);
}
@@ -0,0 +1,20 @@
[package]
name = "bcm2835-sdhcid"
description = "BCM2835 storage controller driver"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
fdt = { git = "https://github.com/repnop/fdt.git", rev = "059bb23" }
common = { path = "../../common" }
daemon = { path = "../../../daemon" }
driver-block = { path = "../driver-block" }
libredox.workspace = true
redox_syscall = { workspace = true, features = ["std"] }
redox_event.workspace = true
[lints]
workspace = true
@@ -0,0 +1,128 @@
use std::process;
use driver_block::{DiskScheme, ExecutorTrait, TrivialExecutor};
use event::{EventFlags, RawEventQueue};
use fdt::Fdt;
mod sd;
#[cfg(target_os = "redox")]
fn get_dtb() -> Vec<u8> {
std::fs::read("kernel.dtb:").unwrap()
}
#[cfg(target_os = "linux")]
fn get_dtb() -> Vec<u8> {
use std::env;
if let Some(arg1) = env::args().nth(1) {
std::fs::read(arg1).unwrap()
} else {
Vec::new()
}
}
fn main() {
daemon::Daemon::new(daemon);
}
fn daemon(daemon: daemon::Daemon) -> ! {
let dtb_data = get_dtb();
println!("read from OS, len = {}", dtb_data.len());
if dtb_data.len() == 0 {
process::exit(0);
}
let fdt = Fdt::new(&dtb_data).unwrap();
println!("DTB model = {}", fdt.root().model());
let with = ["brcm,bcm2835-sdhci"];
let compat_node = fdt.find_compatible(&with).unwrap();
let reg = compat_node.reg().unwrap().next().unwrap();
let reg_size = reg.size.unwrap();
let mut reg_addr = reg.starting_address as usize;
println!(
"DeviceMemory start = 0x{:08x}, size = 0x{:08x}",
reg_addr, reg_size
);
if let Some(mut ranges) = fdt.find_node("/soc").and_then(|f| f.ranges()) {
let range = ranges
.find(|f| f.child_bus_address <= reg_addr && reg_addr - f.child_bus_address < f.size)
.expect("Couldn't find device range in /soc/@ranges");
reg_addr = range.parent_bus_address + (reg_addr - range.child_bus_address);
println!(
"DeviceMemory remapped onto CPU address space: start = 0x{:08x}, size = 0x{:08x}",
reg_addr, reg_size
);
}
let addr = unsafe {
common::physmap(
reg_addr,
reg_size,
common::Prot::RW,
common::MemoryType::DeviceMemory,
)
.expect("bcm2835-sdhcid: failed to map address") as usize
};
println!(
"ioremap 0x{:08x} to 0x{:08x} 2222",
reg.starting_address as usize, addr
);
let mut sdhci = sd::SdHostCtrl::new(addr);
unsafe {
sdhci.init();
/*
let mut buf1 = [0u32; 512];
sdhci.sd_readblock(1, &mut buf1, 1);
println!("readblock {:?}", buf1);
buf1[0] = 0xdead_0000;
buf1[1] = 0xdead_0000;
buf1[2] = 0x0000_dead;
buf1[3] = 0x0000_dead;
sdhci.sd_writeblock(1, &buf1, 1);
sdhci.sd_readblock(1, &mut buf1, 1);
println!("readblock {:?}", buf1);
*/
/*
let mut buf1 = [0u8; 512];
sdhci.read(1, &mut buf1);
println!("readblock {:?}", buf1);
buf1[0] = 0xde;
buf1[1] = 0xad;
buf1[2] = 0xde;
buf1[3] = 0xad;
sdhci.write(1, &buf1);
sdhci.read(1, &mut buf1);
println!("readblock {:?}", buf1);
*/
}
let mut disks = Vec::new();
disks.push(sdhci);
let mut scheme = DiskScheme::new(
Some(daemon),
"disk.mmc".to_string(),
disks
.into_iter()
.enumerate()
.map(|(i, disk)| (i as u32, disk))
.collect(),
&TrivialExecutor, // TODO: real executor
);
let event_queue = RawEventQueue::new().expect("mmcd: failed to open event file");
event_queue
.subscribe(scheme.event_handle().raw(), 0, EventFlags::READ)
.expect("mmcd: failed to event disk scheme");
libredox::call::setrens(0, 0).expect("mmcd: failed to enter null namespace");
for event in event_queue {
let event = event.unwrap();
if event.fd == scheme.event_handle().raw() {
TrivialExecutor.block_on(scheme.tick()).unwrap();
} else {
println!("Unknown event {}", event.fd);
}
}
process::exit(0);
}
@@ -0,0 +1,785 @@
use common::io::{Io, Mmio};
use driver_block::Disk;
use std::{sync::RwLock, thread, time::Duration};
use syscall::{Error, Result, EINVAL};
#[cfg(target_arch = "aarch64")]
#[inline(always)]
pub(crate) unsafe fn wait_cycles(mut n: usize) {
use core::arch::asm;
while n > 0 {
asm!("nop");
n -= 1;
}
}
#[cfg(target_arch = "riscv64")]
#[inline(always)]
pub(crate) unsafe fn wait_msec(mut n: usize) {
todo!()
}
#[cfg(target_arch = "aarch64")]
#[inline(always)]
pub(crate) unsafe fn wait_msec(mut n: usize) {
use core::arch::asm;
let mut f: usize;
let mut t: usize;
let mut r: usize;
asm!("mrs {0}, cntfrq_el0", out(reg) f);
asm!("mrs {0}, cntpct_el0", out(reg) t);
t += ((f / 1000) * n) / 1000;
loop {
asm!("mrs {0}, cntpct_el0", out(reg) r);
if r >= t {
break;
}
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[inline(always)]
pub(crate) unsafe fn wait_msec(n: usize) {
thread::sleep(Duration::from_millis(n as u64));
}
//cmd Flags
const CMD_NEED_APP: u32 = 0x8000_0000;
const CMD_RSPNS_48: u32 = 0x0002_0000;
const CMD_ERRORS_MASK: u32 = 0xfff9_c004;
const CMD_RCA_MASK: u32 = 0xffff_0000;
//CMD
const CMD_GO_IDLE: u32 = 0x0000_0000;
const CMD_ALL_SEND_CID: u32 = 0x0201_0000;
const CMD_SEND_CSD: u32 = 0x0901_0000;
const CMD_SEND_REL_ADDR: u32 = 0x0302_0000;
const CMD_CARD_SELECT: u32 = 0x0703_0000;
const CMD_SEND_IF_COND: u32 = 0x0802_0000;
const CMD_STOP_TRANS: u32 = 0x0c03_0000;
const CMD_READ_SINGLE: u32 = 0x1122_0010;
const CMD_READ_MULTI: u32 = 0x1222_0032;
const CMD_SET_BLOCKCNT: u32 = 0x1702_0000;
const CMD_WRITE_SINGLE: u32 = 0x1822_0000;
const CMD_WRITE_MULTI: u32 = 0x1922_0022;
const CMD_APP_CMD: u32 = 0x3700_0000;
const CMD_SET_BUS_WIDTH: u32 = 0x0602_0000 | CMD_NEED_APP;
const CMD_SEND_OP_COND: u32 = 0x2902_0000 | CMD_NEED_APP;
const CMD_SEND_SCR: u32 = 0x3322_0010 | CMD_NEED_APP;
//STATUS register settings
const SR_READ_AVAILABLE: u32 = 0x0000_0800;
const SR_WRITE_AVAILABLE: u32 = 0x0000_0400;
const SR_DAT_INHIBIT: u32 = 0x0000_0002;
const SR_CMD_INHIBIT: u32 = 0x0000_0001;
const SR_APP_CMD: u32 = 0x0000_0020;
//CONTROL register settings
const C0_SPI_MODE_EN: u32 = 0x0010_0000;
const C0_HCTL_HS_EN: u32 = 0x0000_0004;
const C0_HCTL_DWITDH: u32 = 0x0000_0002;
const C1_SRST_DATA: u32 = 0x0400_0000;
const C1_SRST_CMD: u32 = 0x0200_0000;
const C1_SRST_HC: u32 = 0x0100_0000;
const C1_TOUNIT_DIS: u32 = 0x000f_0000;
const C1_TOUNIT_MAX: u32 = 0x000e_0000;
const C1_CLK_GENSEL: u32 = 0x0000_0020;
const C1_CLK_EN: u32 = 0x0000_0004;
const C1_CLK_STABLE: u32 = 0x0000_0002;
const C1_CLK_INTLEN: u32 = 0x0000_0001;
//INTERRUPT register settings
const INT_DATA_TIMEOUT: u32 = 0x0010_0000;
const INT_CMD_TIMEOUT: u32 = 0x0001_0000;
const INT_READ_RDY: u32 = 0x0000_0020;
const INT_WRITE_RDY: u32 = 0x0000_0010;
const INT_DATA_DONE: u32 = 0x0000_0002;
const INT_CMD_DONE: u32 = 0x0000_0001;
const INT_ERROR_MASK: u32 = 0x017e_8000;
const HOST_SPEC_VERSION_OFFSET: u32 = 16;
const HOST_SPEC_VERSION_MASK: u32 = 0x00ff_0000;
const HOST_SPEC_V3: u32 = 2;
const HOST_SPEC_V2: u32 = 1;
const HOST_SPEC_V1: u32 = 0;
const ACMD41_VOLTAGE: u32 = 0x00ff_8000;
const ACMD41_CMD_COMPLETE: u32 = 0x8000_0000;
const ACMD41_CMD_CCS: u32 = 0x4000_0000;
const ACMD41_ARG_HC: u32 = 0x51ff_8000;
const SCR_SD_BUS_WIDTH_4: u32 = 0x0000_0400;
const SCR_SUPP_SET_BLKCNT: u32 = 0x0200_0000;
//added by bztsrc driver
const SCR_SUPP_CCS: u32 = 0x0000_0001;
#[repr(C, packed)]
pub struct SdHostCtrlRegs {
//LSB
//ACMD23 Argument
_arg2: Mmio<u32>,
//Block Size and Count
blksizecnt: Mmio<u32>,
//Argument
arg1: Mmio<u32>,
//Command and Transfer Mode
cmdtm: Mmio<u32>,
//Response bit 0-127
resp0: Mmio<u32>,
resp1: Mmio<u32>,
resp2: Mmio<u32>,
resp3: Mmio<u32>,
//Data
data: Mmio<u32>,
//Status
status: Mmio<u32>,
//Host Configuration bits
control0: Mmio<u32>,
//Host Configuration bits
control1: Mmio<u32>,
//Interrupt Flags
interrupt: Mmio<u32>,
//Interrupt Flag Enable
irpt_mask: Mmio<u32>,
//Interrupt Generation Enable
irpt_en: Mmio<u32>,
//Host Configuration bits
_control2: Mmio<u32>,
_rsvd: [Mmio<u32>; 47],
//Slot Interrupt Status and Version
slotisr_ver: Mmio<u32>,
}
//TODO: refactor, sd/sdhci/bcmh2835-sdhci three different modules.
pub struct SdHostCtrl {
regs: RwLock<&'static mut SdHostCtrlRegs>,
host_spec_ver: u32,
cid: [u32; 4],
csd: [u32; 4],
rca: u32, //relative card address
scr: [u32; 2],
ocr: u32,
size: u64,
}
impl SdHostCtrl {
pub fn new(address: usize) -> Self {
SdHostCtrl {
regs: RwLock::new(unsafe { &mut *(address as *mut SdHostCtrlRegs) }),
host_spec_ver: 0,
cid: [0; 4],
csd: [0; 4],
rca: 0,
scr: [0; 2],
ocr: 0,
size: 0,
}
}
pub unsafe fn init(&mut self) {
let regs = self.regs.get_mut().unwrap();
let mut reg_val = regs.slotisr_ver.read();
self.host_spec_ver = (reg_val & HOST_SPEC_VERSION_MASK) >> HOST_SPEC_VERSION_OFFSET;
regs.control0.write(0x0);
reg_val = regs.control1.read();
regs.control1.write(reg_val | C1_SRST_HC);
let mut cnt = 1000;
while (cnt >= 0) && ((regs.control1.read() & C1_SRST_HC) == C1_SRST_HC) {
cnt -= 1;
wait_msec(10);
}
if cnt < 0 {
println!("ERROR: failed to reset EMMC");
return;
}
println!("EMMC: reset OK");
reg_val = regs.control1.read();
regs.control1.write(reg_val | C1_CLK_INTLEN | C1_TOUNIT_MAX);
wait_msec(10);
{
if let Err(_) = self.set_clock(40_0000) {
println!("ERROR: failed to set clock {}", 40_0000);
return;
}
}
let regs = self.regs.get_mut().unwrap();
regs.irpt_en.write(0xffff_ffff);
regs.irpt_mask.write(0xffff_ffff);
if let Err(_) = self.sd_cmd(CMD_GO_IDLE, 0) {
println!("failed to go idle");
return;
}
if let Err(_) = self.sd_cmd(CMD_SEND_IF_COND, 0x0000_01aa) {
println!("failed to send if cond");
return;
}
cnt = 6;
reg_val = 0;
while ((reg_val & ACMD41_CMD_COMPLETE) == 0) && cnt > 0 {
wait_msec(10);
cnt -= 1;
if let Ok(val) = self.sd_cmd(CMD_SEND_OP_COND, ACMD41_ARG_HC) {
reg_val = val;
self.ocr = reg_val;
print!("EMMC: CMD_SEND_OP_COND returned 0x{:08x} = ", reg_val);
if (reg_val & ACMD41_CMD_COMPLETE) != 0 {
print!("COMPLETE ");
}
if (reg_val & ACMD41_VOLTAGE) != 0 {
print!("VOLTAGE ");
}
if (reg_val & ACMD41_CMD_CCS) != 0 {
print!("CCS ");
}
print!("\n");
} else {
println!("ERROR: EMMC ACMD41 returned error");
return;
}
}
if (reg_val & ACMD41_CMD_COMPLETE) == 0 || cnt <= 0 {
println!("ACMD41 TIMEOUT");
return;
}
if (reg_val & ACMD41_VOLTAGE) == 0 {
println!("ACMD41 VOLTAGE NOT FOUND!");
return;
}
let ccs = if (reg_val & ACMD41_CMD_CCS) != 0 {
SCR_SUPP_CCS
} else {
0
};
if let Err(_) = self.sd_cmd(CMD_ALL_SEND_CID, 0) {
println!("CMD_ALL_SEND_CID ERROR, IGNORE!");
}
let sd_rca = self.sd_cmd(CMD_SEND_REL_ADDR, 0x0).unwrap();
println!("CMD_SEND_REL_ADDR = 0x{:08x}", sd_rca);
self.rca = sd_rca;
if let Err(_) = self.sd_cmd(CMD_SEND_CSD, sd_rca) {
println!("failed to get csd");
return;
}
let (csize, cmult) = if (self.ocr & ACMD41_CMD_CCS) != 0 {
let csize = (self.csd[1] & 0x3f) << 16 | (self.csd[2] & 0xffff_0000) >> 16;
let cmult = 8;
(csize as u64, cmult as u64)
} else {
let csize = (self.csd[1] & 0x3ff) << 2 | (self.csd[2] & 0xc000_0000) >> 30;
let cmult = (self.csd[2] & 0x0003_8000) >> 15;
(csize as u64, cmult as u64)
};
self.size = ((csize + 1) << (cmult + 2)) * 512;
println!("mmc size = 0x{:08x}", self.size);
if let Err(_) = self.set_clock(2500_0000) {
println!("failed to set clock 2500_0000 Hz");
return;
}
if let Err(_) = self.sd_cmd(CMD_CARD_SELECT, sd_rca) {
println!("failed to CMD_CARD_SELECT 0x{:08x}", sd_rca);
return;
}
if let Err(_) = self.sd_status(SR_DAT_INHIBIT) {
println!("SR_DAT_INHIBIT return");
return;
}
let regs = self.regs.get_mut().unwrap();
regs.blksizecnt.write(1 << 16 | 8);
if let Err(_) = self.sd_cmd(CMD_SEND_SCR, 0) {
println!("failed to CMD_SEND_SCR");
return;
}
if let Err(_) = self.sd_int(INT_READ_RDY) {
println!("failed to INT_READ_RDY");
return;
}
cnt = 10000;
let mut i = 0;
let regs = self.regs.get_mut().unwrap();
while i < 2 && cnt > 0 {
reg_val = regs.status.read();
cnt -= 1;
if (reg_val & SR_READ_AVAILABLE) != 0 {
self.scr[i] = regs.data.read();
i += 1;
} else {
wait_msec(10);
cnt -= 1;
}
}
if i != 2 {
println!("SD TIMEOUT FOR SCR[; 2]");
return;
}
if (self.scr[0] & SCR_SD_BUS_WIDTH_4) != 0 {
if let Err(_) = self.sd_cmd(CMD_SET_BUS_WIDTH, sd_rca | 2) {
println!("failed to set bus width, {}", sd_rca | 2);
return;
}
let regs = self.regs.get_mut().unwrap();
regs.control0.write(C0_HCTL_DWITDH);
}
print!("EMMC: supports ");
if (self.scr[0] & SCR_SUPP_SET_BLKCNT) != 0 {
print!("SET_BLKCNT ");
}
if ccs != 0 {
print!("CCS ");
}
print!("\n");
self.scr[0] &= !SCR_SUPP_CCS;
self.scr[0] |= ccs;
}
pub unsafe fn set_clock(&mut self, freq: u32) -> Result<()> {
let regs = self.regs.get_mut().unwrap();
let mut reg_val = regs.status.read() & (SR_CMD_INHIBIT | SR_DAT_INHIBIT);
let mut cnt = 10_0000;
while (cnt > 0) && reg_val != 0 {
wait_msec(1);
cnt -= 1;
reg_val = regs.status.read() & (SR_CMD_INHIBIT | SR_DAT_INHIBIT);
}
if cnt <= 0 {
println!("ERROR: TIMEOUT WAITING FOR INHIBIT FLAG");
return Err(Error::new(EINVAL));
}
reg_val = regs.control1.read();
reg_val &= !C1_CLK_EN;
regs.control1.write(reg_val);
wait_msec(10);
let c = 4166_6666 / freq;
let mut x: u32 = c - 1;
let mut s: u32 = 32;
if x == 0 {
s = 0;
} else {
if (x & 0xffff_0000) == 0 {
x <<= 16;
s -= 16;
}
if (x & 0xff00_0000) == 0 {
x <<= 8;
s -= 8;
}
if (x & 0xf000_0000) == 0 {
x <<= 4;
s -= 4;
}
if (x & 0xc000_0000) == 0 {
x <<= 2;
s -= 2;
}
if (x & 0x8000_0000) == 0 {
x <<= 1;
s -= 1;
}
if s > 0 {
s -= 1;
}
if s > 7 {
s = 7;
}
}
let mut d;
if self.host_spec_ver > HOST_SPEC_V2 {
d = c;
} else {
d = 1 << s;
}
if d <= 2 {
d = 2;
s = 0;
}
println!("sd clk divisor: 0x{:08x}, shift: 0x{:08x}", d, s);
let mut h = 0;
if self.host_spec_ver > HOST_SPEC_V2 {
h = (d & 0x300) >> 2;
}
d = ((d & 0xff) << 8) | h;
reg_val = regs.control1.read() & 0xffff_003f;
regs.control1.write(reg_val | d);
wait_msec(10);
reg_val = regs.control1.read();
regs.control1.write(reg_val | C1_CLK_EN);
wait_msec(10);
reg_val = regs.control1.read() & C1_CLK_STABLE;
cnt = 10000;
while cnt > 0 && reg_val == 0 {
wait_msec(10);
cnt -= 1;
reg_val = regs.control1.read() & C1_CLK_STABLE;
}
if cnt <= 0 {
println!("ERROR: failed to get stable clock");
return Err(Error::new(EINVAL));
}
Ok(())
}
pub unsafe fn sd_cmd(&mut self, mut code: u32, arg: u32) -> Result<u32> {
if (code & CMD_NEED_APP) != 0 {
let pre_cmd = CMD_APP_CMD | if self.rca != 0 { CMD_RSPNS_48 } else { 0 };
match self.sd_cmd(pre_cmd, self.rca) {
Err(_) => {
println!("ERROR: failed to send SD APP command");
return Err(Error::new(EINVAL));
}
Ok(_) => {
code &= !CMD_NEED_APP;
}
}
}
if let Err(_) = self.sd_status(SR_CMD_INHIBIT) {
println!("ERROR: Emmc busy");
return Err(Error::new(EINVAL));
}
//println!("EMMC: Sending command 0x{:08x}, arg 0x{:08x}", code, arg);
let regs = self.regs.get_mut().unwrap();
let mut reg_val = regs.interrupt.read();
regs.interrupt.write(reg_val);
regs.arg1.write(arg);
regs.cmdtm.write(code);
if code == CMD_SEND_OP_COND {
wait_msec(1000);
} else if code == CMD_SEND_IF_COND || code == CMD_APP_CMD {
wait_msec(200);
}
if let Err(_) = self.sd_int(INT_CMD_DONE) {
println!("ERROR: failed to send EMMC command");
return Err(Error::new(EINVAL));
}
let regs = self.regs.get_mut().unwrap();
reg_val = regs.resp0.read();
if code == CMD_GO_IDLE || code == CMD_APP_CMD {
return Ok(0);
} else if code == (CMD_APP_CMD | CMD_RSPNS_48) {
return Ok(reg_val & SR_APP_CMD);
} else if code == CMD_SEND_OP_COND {
return Ok(reg_val);
} else if code == CMD_SEND_IF_COND {
if reg_val == arg {
return Ok(0);
} else {
return Err(Error::new(EINVAL));
}
} else if code == CMD_ALL_SEND_CID {
self.cid[0] = reg_val;
self.cid[1] = regs.resp1.read();
self.cid[2] = regs.resp2.read();
self.cid[3] = regs.resp3.read();
//FIXME: wrong implement, see CMD_SEND_CSD for detail
return Ok(reg_val);
} else if code == CMD_SEND_CSD {
let tmp0 = reg_val;
let tmp1 = regs.resp1.read();
let tmp2 = regs.resp2.read();
let tmp3 = regs.resp3.read();
self.csd[0] = tmp3 << 8 | tmp2 >> 24;
self.csd[1] = tmp2 << 8 | tmp1 >> 24;
self.csd[2] = tmp1 << 8 | tmp0 >> 24;
self.csd[3] = tmp0 << 8;
//FIXME: support variable length of result.
return Ok(reg_val);
} else if code == CMD_SEND_REL_ADDR {
let mut err = reg_val & 0x1fff;
err |= (reg_val & 0x2000) << 6;
err |= (reg_val & 0x4000) << 8;
err |= (reg_val & 0x8000) << 8;
err &= CMD_ERRORS_MASK;
if err != 0 {
return Err(Error::new(EINVAL));
} else {
return Ok(reg_val & CMD_RCA_MASK);
}
} else {
return Ok(reg_val & CMD_ERRORS_MASK);
}
}
pub unsafe fn sd_status(&mut self, mask: u32) -> Result<()> {
let regs = self.regs.get_mut().unwrap();
let mut cnt = 500000;
let mut reg_val = regs.status.read() & mask;
let mut reg_val1 = regs.interrupt.read() & INT_ERROR_MASK;
while cnt > 0 && reg_val != 0 && reg_val1 == 0 {
wait_msec(1);
cnt -= 1;
reg_val = regs.status.read() & mask;
reg_val1 = regs.interrupt.read() & INT_ERROR_MASK;
}
reg_val1 = regs.interrupt.read() & INT_ERROR_MASK;
if cnt <= 0 || reg_val1 != 0 {
return Err(Error::new(EINVAL));
} else {
return Ok(());
}
}
pub unsafe fn sd_int(&mut self, mask: u32) -> Result<()> {
let regs = self.regs.get_mut().unwrap();
let mut cnt = 100_0000;
let m = mask | INT_ERROR_MASK;
let mut reg_val = regs.interrupt.read() & m;
while cnt > 0 && reg_val == 0 {
wait_msec(1);
cnt -= 1;
reg_val = regs.interrupt.read() & m;
}
reg_val = regs.interrupt.read();
let err = reg_val & (INT_CMD_TIMEOUT | INT_DATA_TIMEOUT | INT_ERROR_MASK);
if cnt <= 0 || err != 0 {
regs.interrupt.write(reg_val);
return Err(Error::new(EINVAL));
} else {
regs.interrupt.write(mask);
return Ok(());
}
}
pub unsafe fn sd_readblock(&mut self, lba: u32, buf: &mut [u32], num: u32) -> Result<usize> {
let num = if num < 1 { 1 } else { num };
//println!("sd_readblock lba 0x{:x}, num 0x{:x}", lba, num);
if let Err(_) = self.sd_status(SR_DAT_INHIBIT) {
println!("SR_DAT_INHIBIT TIMEOUT");
return Err(Error::new(EINVAL));
}
if (self.scr[0] & SCR_SUPP_CCS) != 0 {
if num > 1 && ((self.scr[0] & SCR_SUPP_SET_BLKCNT) != 0) {
if let Err(_) = self.sd_cmd(CMD_SET_BLOCKCNT, num) {
println!("CMD_SET_BLOCKCNT ERROR");
return Err(Error::new(EINVAL));
}
}
let regs = self.regs.get_mut().unwrap();
regs.blksizecnt.write((num) << 16 | 512);
if num == 1 {
self.sd_cmd(CMD_READ_SINGLE, lba).unwrap();
} else {
self.sd_cmd(CMD_READ_MULTI, lba).unwrap();
}
} else {
let regs = self.regs.get_mut().unwrap();
regs.blksizecnt.write(1 << 16 | 512);
}
let mut cnt = 0;
while cnt < num {
if (self.scr[0] & SCR_SUPP_CCS) == 0 {
self.sd_cmd(CMD_READ_SINGLE, (lba + cnt) * 512).unwrap();
}
if let Err(_) = self.sd_int(INT_READ_RDY) {
println!("ERROR: Timeout waiting for ready to read");
return Err(Error::new(EINVAL));
}
let regs = self.regs.get_mut().unwrap();
regs.blksizecnt.write(1 << 16 | 512);
for d in 0..128 {
buf[(128 * cnt + d) as usize] = regs.data.read();
}
cnt += 1;
}
if num > 1 && (self.scr[0] & SCR_SUPP_SET_BLKCNT) == 0 && (self.scr[0] & SCR_SUPP_CCS) != 0
{
self.sd_cmd(CMD_STOP_TRANS, 0).unwrap();
}
Ok((num * 512) as usize)
}
pub unsafe fn sd_writeblock(&mut self, lba: u32, buf: &[u32], num: u32) -> Result<usize> {
let num = if num < 1 { 1 } else { num };
//println!("sd_writelock lba 0x{:x}, num 0x{:x}", lba, num);
if let Err(_) = self.sd_status(SR_DAT_INHIBIT | SR_WRITE_AVAILABLE) {
println!("SR_DAT_INHIBIT TIMEOUT");
return Err(Error::new(EINVAL));
}
if (self.scr[0] & SCR_SUPP_CCS) != 0 {
if num > 1 && ((self.scr[0] & SCR_SUPP_SET_BLKCNT) != 0) {
if let Err(_) = self.sd_cmd(CMD_SET_BLOCKCNT, num) {
println!("CMD_SET_BLOCKCNT ERROR");
return Err(Error::new(EINVAL));
}
}
let regs = self.regs.get_mut().unwrap();
regs.blksizecnt.write((num) << 16 | 512);
if num == 1 {
self.sd_cmd(CMD_WRITE_SINGLE, lba).unwrap();
} else {
self.sd_cmd(CMD_WRITE_MULTI, lba).unwrap();
}
} else {
let regs = self.regs.get_mut().unwrap();
regs.blksizecnt.write(1 << 16 | 512);
}
let mut cnt = 0;
while cnt < num {
if (self.scr[0] & SCR_SUPP_CCS) == 0 {
self.sd_cmd(CMD_WRITE_SINGLE, (lba + cnt) * 512).unwrap();
}
if let Err(_) = self.sd_int(INT_WRITE_RDY) {
println!("ERROR: Timeout waiting for ready to write");
return Err(Error::new(EINVAL));
}
let regs = self.regs.get_mut().unwrap();
regs.blksizecnt.write(1 << 16 | 512);
for d in 0..128 {
regs.data.write(buf[(128 * cnt + d) as usize]);
}
cnt += 1;
}
if let Err(_) = self.sd_int(INT_DATA_DONE) {
println!("ERROR: Timeout waiting for data done");
return Err(Error::new(EINVAL));
}
if num > 1 && (self.scr[0] & SCR_SUPP_SET_BLKCNT) == 0 && (self.scr[0] & SCR_SUPP_CCS) != 0
{
self.sd_cmd(CMD_STOP_TRANS, 0).unwrap();
}
Ok((num * 512) as usize)
}
}
impl Disk for SdHostCtrl {
fn block_size(&self) -> u32 {
512
}
fn size(&self) -> u64 {
//assert 512MiB
self.size
}
// TODO: real async?
async fn read(&mut self, block: u64, buffer: &mut [u8]) -> Result<usize> {
if (buffer.len() % 512) != 0 {
println!("buffer.len {} should be aligned to {}", buffer.len(), 512);
return Err(Error::new(EINVAL));
}
let u32_len = buffer.len() / core::mem::size_of::<u32>();
let num = buffer.len() / 512;
let u8_ptr = buffer.as_mut_ptr();
let ret = unsafe {
let u32_buffer = core::slice::from_raw_parts_mut(u8_ptr as *mut u32, u32_len);
self.sd_readblock(block as u32, u32_buffer, num as u32)
};
match ret {
Ok(cnt) => Ok(cnt),
Err(err) => Err(err),
}
}
// TODO: real async?
async fn write(&mut self, block: u64, buffer: &[u8]) -> Result<usize> {
if (buffer.len() % 512) != 0 {
println!("buffer.len {} should be aligned to {}", buffer.len(), 512);
return Err(Error::new(EINVAL));
}
let u32_len = buffer.len() / core::mem::size_of::<u32>();
let num = buffer.len() / 512;
let u8_ptr = buffer.as_ptr();
let ret = unsafe {
let u32_buffer = core::slice::from_raw_parts(u8_ptr as *const u32, u32_len);
self.sd_writeblock(block as u32, u32_buffer, num as u32)
};
match ret {
Ok(cnt) => Ok(cnt),
Err(err) => Err(err),
}
}
}
@@ -0,0 +1,23 @@
[package]
name = "driver-block"
description = "Shared storage driver code"
version = "0.1.0"
edition = "2021"
[dependencies]
daemon = { path = "../../../daemon" }
executor = { path = "../../executor" }
partitionlib = { path = "../partitionlib" }
libredox.workspace = true
log.workspace = true
# TODO: migrate virtio to our executor
futures = { version = "0.3.28", features = ["executor"] }
redox_syscall = { workspace = true, features = ["std"] }
redox-scheme.workspace = true
scheme-utils = { path = "../../../scheme-utils" }
[lints]
workspace = true
@@ -0,0 +1,661 @@
use std::cmp;
use std::future::{Future, IntoFuture};
use std::io::{self, Read, Seek, SeekFrom};
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::fmt::Write;
use std::str;
use std::task::Poll;
use executor::LocalExecutor;
use libredox::Fd;
use partitionlib::{LogicalBlockSize, PartitionTable};
use redox_scheme::scheme::{register_scheme_inner, SchemeAsync, SchemeState};
use redox_scheme::{CallerCtx, OpenResult, RequestKind, Response, SignalBehavior, Socket};
use scheme_utils::{FpathWriter, HandleMap};
use syscall::dirent::DirentBuf;
use syscall::schemev2::NewFdFlags;
use syscall::{
Error, Result, Stat, EACCES, EAGAIN, EBADF, EINTR, EINVAL, EISDIR, ENOENT, ENOLCK, EOPNOTSUPP,
EOVERFLOW, EWOULDBLOCK, MODE_DIR, MODE_FILE, O_DIRECTORY, O_STAT,
};
/// Split the read operation into a series of block reads.
/// `read_fn` will be called with a block number to be read, and a buffer to be filled.
/// `read_fn` must return a full block of data.
/// Result will be the number of bytes read.
fn block_read(
offset: u64,
blksize: u32,
buf: &mut [u8],
mut read_fn: impl FnMut(u64, &mut [u8]) -> io::Result<()>,
) -> io::Result<usize> {
// TODO: Yield sometimes, perhaps after a few blocks or something.
if buf.len() == 0 {
return Ok(0);
}
let to_copy = usize::try_from(
offset.saturating_add(u64::try_from(buf.len()).expect("buf.len() larger than u64"))
- offset,
)
.expect("bytes to copy larger than usize");
let mut curr_buf = &mut buf[..to_copy];
let mut curr_offset = offset;
let blk_size = usize::try_from(blksize).expect("blksize larger than usize");
let mut total_read = 0;
let mut block_bytes = [0u8; 4096];
let block_bytes = &mut block_bytes[..blk_size];
while curr_buf.len() > 0 {
// TODO: Async/await? I mean, shouldn't AHCI be async?
let blk_offset =
usize::try_from(curr_offset % u64::from(blksize)).expect("usize smaller than blksize");
let to_copy = cmp::min(curr_buf.len(), blk_size - blk_offset);
assert!(blk_offset + to_copy <= blk_size);
read_fn(curr_offset / u64::from(blksize), block_bytes)?;
let src_buf = &block_bytes[blk_offset..];
curr_buf[..to_copy].copy_from_slice(&src_buf[..to_copy]);
curr_buf = &mut curr_buf[to_copy..];
curr_offset += u64::try_from(to_copy).expect("bytes to copy larger than u64");
total_read += to_copy;
}
Ok(total_read)
}
pub trait Disk {
fn block_size(&self) -> u32;
fn size(&self) -> u64;
// These operate on a whole multiple of the block size
// FIXME maybe only operate on a single block worth of data?
async fn read(&mut self, block: u64, buffer: &mut [u8]) -> syscall::Result<usize>;
async fn write(&mut self, block: u64, buffer: &[u8]) -> syscall::Result<usize>;
}
impl<T: Disk + ?Sized> Disk for Box<T> {
fn block_size(&self) -> u32 {
(**self).block_size()
}
fn size(&self) -> u64 {
(**self).size()
}
async fn read(&mut self, block: u64, buffer: &mut [u8]) -> syscall::Result<usize> {
(**self).read(block, buffer).await
}
async fn write(&mut self, block: u64, buffer: &[u8]) -> syscall::Result<usize> {
(**self).write(block, buffer).await
}
}
pub struct DiskWrapper<T> {
pub disk: T,
pub pt: Option<PartitionTable>,
}
impl<T: Disk> DiskWrapper<T> {
pub fn pt(disk: &mut T, executor: &impl ExecutorTrait) -> Option<PartitionTable> {
let bs = match disk.block_size() {
512 => LogicalBlockSize::Lb512,
4096 => LogicalBlockSize::Lb4096,
_ => return None,
};
struct Device<'a, D: Disk, E: ExecutorTrait> {
disk: &'a mut D,
executor: &'a E,
offset: u64,
}
impl<'a, D: Disk, E: ExecutorTrait> Seek for Device<'a, D, E> {
fn seek(&mut self, from: SeekFrom) -> io::Result<u64> {
let size = i64::try_from(self.disk.size()).or(Err(io::Error::new(
io::ErrorKind::Other,
"Disk larger than 2^63 - 1 bytes",
)))?;
self.offset = match from {
SeekFrom::Start(new_pos) => cmp::min(self.disk.size(), new_pos),
SeekFrom::Current(new_pos) => {
cmp::max(0, cmp::min(size, self.offset as i64 + new_pos)) as u64
}
SeekFrom::End(new_pos) => cmp::max(0, cmp::min(size + new_pos, size)) as u64,
};
Ok(self.offset)
}
}
// TODO: Perhaps this impl should be used in the rest of the scheme.
impl<'a, D: Disk, E: ExecutorTrait> Read for Device<'a, D, E> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let blksize = self.disk.block_size();
let size_in_blocks = self.disk.size() / u64::from(blksize);
let disk = &mut self.disk;
let read_block = |block: u64, block_bytes: &mut [u8]| {
if block >= size_in_blocks {
return Err(io::Error::from_raw_os_error(syscall::EOVERFLOW));
}
let bytes = self.executor.block_on(disk.read(block, block_bytes))?;
assert_eq!(bytes, block_bytes.len());
Ok(())
};
let bytes_read = block_read(self.offset, blksize, buf, read_block)?;
self.offset += bytes_read as u64;
Ok(bytes_read)
}
}
partitionlib::get_partitions(
&mut Device {
disk,
offset: 0,
executor,
},
bs,
)
.ok()
.flatten()
}
pub fn new(mut disk: T, executor: &impl ExecutorTrait) -> Self {
Self {
pt: Self::pt(&mut disk, executor),
disk,
}
}
pub fn disk(&self) -> &T {
&self.disk
}
pub fn disk_mut(&mut self) -> &mut T {
&mut self.disk
}
pub fn block_size(&self) -> u32 {
self.disk.block_size()
}
pub fn size(&self) -> u64 {
self.disk.size()
}
pub async fn read(
&mut self,
part_num: Option<usize>,
block: u64,
buf: &mut [u8],
) -> syscall::Result<usize> {
if buf.len() as u64 % u64::from(self.disk.block_size()) != 0 {
return Err(Error::new(EINVAL));
}
if let Some(part_num) = part_num {
let part = self
.pt
.as_ref()
.ok_or(syscall::Error::new(EBADF))?
.partitions
.get(part_num)
.ok_or(syscall::Error::new(EBADF))?;
if block >= part.size {
return Err(syscall::Error::new(EOVERFLOW));
}
let abs_block = part.start_lba + block;
self.disk.read(abs_block, buf).await
} else {
self.disk.read(block, buf).await
}
}
pub async fn write(
&mut self,
part_num: Option<usize>,
block: u64,
buf: &[u8],
) -> syscall::Result<usize> {
if buf.len() as u64 % u64::from(self.disk.block_size()) != 0 {
return Err(Error::new(EINVAL));
}
if let Some(part_num) = part_num {
let part = self
.pt
.as_ref()
.ok_or(syscall::Error::new(EBADF))?
.partitions
.get(part_num)
.ok_or(syscall::Error::new(EBADF))?;
if block >= part.size {
return Err(syscall::Error::new(EOVERFLOW));
}
let abs_block = part.start_lba + block;
self.disk.write(abs_block, buf).await
} else {
self.disk.write(block, buf).await
}
}
}
pub struct DiskScheme<T> {
inner: DiskSchemeInner<T>,
state: SchemeState,
}
impl<T: Disk> DiskScheme<T> {
pub fn new(
daemon: Option<daemon::Daemon>,
scheme_name: String,
disks: BTreeMap<u32, T>,
executor: &impl ExecutorTrait,
) -> Self {
assert!(scheme_name.starts_with("disk"));
let socket = Socket::nonblock().expect("failed to create disk scheme");
let mut inner = DiskSchemeInner {
scheme_name: scheme_name,
socket,
disks: disks
.into_iter()
.map(|(k, disk)| (k, DiskWrapper::new(disk, executor)))
.collect(),
handles: HandleMap::new(),
};
let cap_id = inner.scheme_root().expect("failed to get this scheme root");
register_scheme_inner(&inner.socket, &inner.scheme_name, cap_id)
.expect("failed to register disk scheme root");
if let Some(daemon) = daemon {
daemon.ready();
}
Self {
inner,
state: SchemeState::new(),
}
}
pub fn event_handle(&self) -> &Fd {
self.inner.socket.inner()
}
/// Process pending and new requests.
///
/// This needs to be called each time there is a new event on the scheme.
pub async fn tick(&mut self) -> io::Result<()> {
// Handle new scheme requests
loop {
let request = match self.inner.socket.next_request(SignalBehavior::Interrupt) {
Ok(Some(request)) => request,
Ok(None) => {
// Scheme likely got unmounted
// TODO: return this to caller instead
std::process::exit(0);
}
Err(error) if error.errno == EWOULDBLOCK || error.errno == EAGAIN => break,
Err(err) if err.errno == EINTR => continue,
Err(err) => return Err(err.into()),
};
let response = match request.kind() {
RequestKind::Call(call_request) => {
// TODO: Spawn a separate task for each scheme call. This would however require the
// use of a smarter buffer pool (or direct IO, or a buffer per fd) in order to do
// parallel IO. It might also require async-aware locks so that a close() is
// correctly ordered wrt IO on the same fd.
call_request
.handle_async(&mut self.inner, &mut self.state)
.await
}
RequestKind::SendFd(request) => Response::err(EOPNOTSUPP, request),
RequestKind::RecvFd(request) => Response::err(EOPNOTSUPP, request),
RequestKind::Cancellation(_cancellation_request) => {
// FIXME implement cancellation
continue;
}
RequestKind::MsyncMsg | RequestKind::MunmapMsg | RequestKind::MmapMsg => {
unreachable!()
}
RequestKind::OnClose { id } => {
self.inner.on_close(id);
continue;
}
RequestKind::OnDetach { .. } => continue,
};
self.inner
.socket
.write_response(response, SignalBehavior::Restart)?;
}
Ok(())
}
}
enum Handle {
List(Vec<u8>), // entries
Disk(u32), // disk num
Partition(u32, u32), // disk num, part num
SchemeRoot,
}
struct DiskSchemeInner<T> {
scheme_name: String,
socket: Socket,
disks: BTreeMap<u32, DiskWrapper<T>>,
handles: HandleMap<Handle>,
}
pub trait ExecutorTrait {
fn block_on<'a, O: 'a>(&self, fut: impl IntoFuture<Output = O> + 'a) -> O;
}
impl<Hw: executor::Hardware> ExecutorTrait for LocalExecutor<Hw> {
fn block_on<'a, O: 'a>(&self, fut: impl IntoFuture<Output = O> + 'a) -> O {
LocalExecutor::block_on(self, fut)
}
}
#[deprecated = "use custom executor"]
pub struct FuturesExecutor;
#[allow(deprecated)]
impl ExecutorTrait for FuturesExecutor {
fn block_on<'a, O: 'a>(&self, fut: impl IntoFuture<Output = O> + 'a) -> O {
futures::executor::block_on(fut.into_future())
}
}
pub struct TrivialExecutor;
impl ExecutorTrait for TrivialExecutor {
fn block_on<'a, O: 'a>(&self, fut: impl IntoFuture<Output = O> + 'a) -> O {
let mut fut = std::pin::pin!(fut.into_future());
let mut cx = std::task::Context::from_waker(std::task::Waker::noop());
loop {
match fut.as_mut().poll(&mut cx) {
Poll::Ready(v) => return v,
Poll::Pending => {
log::warn!("TrivialExecutor: future wasn't trivial");
continue;
}
}
}
}
}
impl<T: Disk> DiskSchemeInner<T> {
// Checks if any conflicting handles already exist
fn check_locks(&self, disk_i: u32, part_i_opt: Option<u32>) -> Result<()> {
for (_, handle) in self.handles.iter() {
match handle {
Handle::Disk(i) => {
if disk_i == *i {
return Err(Error::new(ENOLCK));
}
}
Handle::Partition(i, p) => {
if disk_i == *i {
match part_i_opt {
Some(part_i) => {
if part_i == *p {
return Err(Error::new(ENOLCK));
}
}
None => {
return Err(Error::new(ENOLCK));
}
}
}
}
_ => (),
}
}
Ok(())
}
}
impl<T: Disk> SchemeAsync for DiskSchemeInner<T> {
fn scheme_root(&mut self) -> Result<usize> {
Ok(self.handles.insert(Handle::SchemeRoot))
}
async fn openat(
&mut self,
dirfd: usize,
path_str: &str,
flags: usize,
_fcntl_flags: u32,
ctx: &CallerCtx,
) -> Result<OpenResult> {
if !matches!(self.handles.get(dirfd)?, Handle::SchemeRoot) {
return Err(Error::new(EACCES));
}
if ctx.uid != 0 {
return Err(Error::new(EACCES));
}
let path_str = path_str.trim_matches('/');
let handle = if path_str.is_empty() {
if flags & O_DIRECTORY == O_DIRECTORY || flags & O_STAT == O_STAT {
let mut list = String::new();
for (nsid, disk) in self.disks.iter() {
write!(list, "{}\n", nsid).unwrap();
if disk.pt.is_none() {
continue;
}
for part_num in 0..disk.pt.as_ref().unwrap().partitions.len() {
write!(list, "{}p{}\n", nsid, part_num).unwrap();
}
}
Handle::List(list.into_bytes())
} else {
return Err(Error::new(EISDIR));
}
} else if let Some(p_pos) = path_str.chars().position(|c| c == 'p') {
let nsid_str = &path_str[..p_pos];
if p_pos + 1 >= path_str.len() {
return Err(Error::new(ENOENT));
}
let part_num_str = &path_str[p_pos + 1..];
let nsid = nsid_str.parse::<u32>().or(Err(Error::new(ENOENT)))?;
let part_num = part_num_str.parse::<u32>().or(Err(Error::new(ENOENT)))?;
if let Some(disk) = self.disks.get(&nsid) {
if disk
.pt
.as_ref()
.ok_or(Error::new(ENOENT))?
.partitions
.get(part_num as usize)
.is_some()
{
self.check_locks(nsid, Some(part_num))?;
Handle::Partition(nsid, part_num)
} else {
return Err(Error::new(ENOENT));
}
} else {
return Err(Error::new(ENOENT));
}
} else {
let nsid = path_str.parse::<u32>().or(Err(Error::new(ENOENT)))?;
if self.disks.contains_key(&nsid) {
self.check_locks(nsid, None)?;
Handle::Disk(nsid)
} else {
return Err(Error::new(ENOENT));
}
};
let id = self.handles.insert(handle);
Ok(OpenResult::ThisScheme {
number: id,
flags: NewFdFlags::POSITIONED,
})
}
async fn getdents<'buf>(
&mut self,
_id: usize,
_buf: DirentBuf<&'buf mut [u8]>,
_opaque_offset: u64,
) -> Result<DirentBuf<&'buf mut [u8]>> {
// TODO
Err(Error::new(EOPNOTSUPP))
}
async fn fstat(&mut self, id: usize, stat: &mut Stat, _ctx: &CallerCtx) -> Result<()> {
match *self.handles.get(id)? {
Handle::List(ref data) => {
stat.st_mode = MODE_DIR;
stat.st_size = data.len() as u64;
Ok(())
}
Handle::Disk(number) => {
let disk = self.disks.get_mut(&number).ok_or(Error::new(EBADF))?;
stat.st_mode = MODE_FILE;
stat.st_blocks = disk.disk().size() / u64::from(disk.block_size());
stat.st_blksize = disk.block_size();
stat.st_size = disk.size();
Ok(())
}
Handle::Partition(disk_num, part_num) => {
let disk = self.disks.get_mut(&disk_num).ok_or(Error::new(EBADF))?;
let part = disk
.pt
.as_ref()
.ok_or(Error::new(EBADF))?
.partitions
.get(part_num as usize)
.ok_or(Error::new(EBADF))?;
stat.st_mode = MODE_FILE;
stat.st_size = part.size * u64::from(disk.block_size());
stat.st_blocks = part.size;
stat.st_blksize = disk.block_size();
Ok(())
}
Handle::SchemeRoot => Err(Error::new(EBADF)),
}
}
async fn fpath(&mut self, id: usize, buf: &mut [u8], _ctx: &CallerCtx) -> Result<usize> {
FpathWriter::with_legacy(buf, &self.scheme_name, |w| {
match *self.handles.get(id)? {
Handle::List(_) => (),
Handle::Disk(number) => {
write!(w, "{number}").unwrap();
}
Handle::Partition(disk_num, part_num) => {
write!(w, "{disk_num}p{part_num}").unwrap();
}
Handle::SchemeRoot => return Err(Error::new(EBADF)),
}
Ok(())
})
}
async fn read(
&mut self,
id: usize,
buf: &mut [u8],
offset: u64,
_fcntl_flags: u32,
_ctx: &CallerCtx,
) -> Result<usize> {
match *self.handles.get_mut(id)? {
Handle::List(ref handle) => {
let src = usize::try_from(offset)
.ok()
.and_then(|o| handle.get(o..))
.unwrap_or(&[]);
let count = core::cmp::min(src.len(), buf.len());
buf[..count].copy_from_slice(&src[..count]);
Ok(count)
}
Handle::Disk(number) => {
let disk = self.disks.get_mut(&number).ok_or(Error::new(EBADF))?;
let block = offset / u64::from(disk.block_size());
disk.read(None, block, buf).await
}
Handle::Partition(disk_num, part_num) => {
let disk = self.disks.get_mut(&disk_num).ok_or(Error::new(EBADF))?;
let block = offset / u64::from(disk.block_size());
disk.read(Some(part_num as usize), block, buf).await
}
Handle::SchemeRoot => Err(Error::new(EBADF)),
}
}
async fn write(
&mut self,
id: usize,
buf: &[u8],
offset: u64,
_fcntl_flags: u32,
_ctx: &CallerCtx,
) -> Result<usize> {
match *self.handles.get_mut(id)? {
Handle::List(_) => Err(Error::new(EBADF)),
Handle::Disk(number) => {
let disk = self.disks.get_mut(&number).ok_or(Error::new(EBADF))?;
let block = offset / u64::from(disk.block_size());
disk.write(None, block, buf).await
}
Handle::Partition(disk_num, part_num) => {
let disk = self.disks.get_mut(&disk_num).ok_or(Error::new(EBADF))?;
let block = offset / u64::from(disk.block_size());
disk.write(Some(part_num as usize), block, buf).await
}
Handle::SchemeRoot => Err(Error::new(EBADF)),
}
}
async fn fsize(&mut self, id: usize, _ctx: &CallerCtx) -> Result<u64> {
Ok(match *self.handles.get_mut(id)? {
Handle::List(ref handle) => handle.len() as u64,
Handle::Disk(number) => {
let disk = self.disks.get_mut(&number).ok_or(Error::new(EBADF))?;
disk.size()
}
Handle::Partition(disk_num, part_num) => {
let disk = self.disks.get_mut(&disk_num).ok_or(Error::new(EBADF))?;
let part = disk
.pt
.as_ref()
.ok_or(Error::new(EBADF))?
.partitions
.get(part_num as usize)
.ok_or(Error::new(EBADF))?;
part.size * u64::from(disk.block_size())
}
Handle::SchemeRoot => return Err(Error::new(EBADF)),
})
}
}
impl<D: Disk> DiskSchemeInner<D> {
pub fn on_close(&mut self, id: usize) {
let _ = self.handles.remove(id);
}
}
@@ -0,0 +1 @@
/target
@@ -0,0 +1,18 @@
[package]
name = "ided"
description = "PATA (IDE) driver"
version = "0.1.0"
edition = "2021"
[dependencies]
common = { path = "../../common" }
driver-block = { path = "../driver-block" }
libredox.workspace = true
log.workspace = true
pcid = { path = "../../pcid" }
daemon = { path = "../../../daemon" }
redox_syscall = { workspace = true, features = ["std"] }
redox_event.workspace = true
[lints]
workspace = true
@@ -0,0 +1,469 @@
use std::{
convert::TryInto,
sync::{Arc, Mutex},
thread,
time::{Duration, Instant},
};
use driver_block::Disk;
use syscall::error::{Error, Result, EIO};
use common::dma::Dma;
use common::io::{Io, Pio, ReadOnly, WriteOnly};
const TIMEOUT: Duration = Duration::new(5, 0);
#[repr(u8)]
pub enum AtaCommand {
ReadPio = 0x20,
ReadPioExt = 0x24,
ReadDma = 0xC8,
ReadDmaExt = 0x25,
WritePio = 0x30,
WritePioExt = 0x34,
WriteDma = 0xCA,
WriteDmaExt = 0x35,
CacheFlush = 0xE7,
CacheFlushExt = 0xEA,
Packet = 0xA0,
IdentifyPacket = 0xA1,
Identify = 0xEC,
}
#[repr(C, packed)]
struct PrdtEntry {
phys: u32,
size: u16,
flags: u16,
}
pub struct Channel {
pub data8: Pio<u8>,
pub data32: Pio<u32>,
pub error: ReadOnly<Pio<u8>>,
pub features: WriteOnly<Pio<u8>>,
pub sector_count: Pio<u8>,
pub lba_0: Pio<u8>,
pub lba_1: Pio<u8>,
pub lba_2: Pio<u8>,
pub device_select: Pio<u8>,
pub status: ReadOnly<Pio<u8>>,
pub command: WriteOnly<Pio<u8>>,
pub alt_status: ReadOnly<Pio<u8>>,
pub control: WriteOnly<Pio<u8>>,
pub busmaster_command: Pio<u8>,
pub busmaster_status: Pio<u8>,
pub busmaster_prdt: Pio<u32>,
prdt: Dma<[PrdtEntry; 128]>,
buf: Dma<[u8; 128 * 512]>,
}
impl Channel {
pub fn new(base: u16, control_base: u16, busmaster_base: u16) -> Result<Self> {
Ok(Self {
data8: Pio::new(base + 0),
data32: Pio::new(base + 0),
error: ReadOnly::new(Pio::new(base + 1)),
features: WriteOnly::new(Pio::new(base + 1)),
sector_count: Pio::new(base + 2),
lba_0: Pio::new(base + 3),
lba_1: Pio::new(base + 4),
lba_2: Pio::new(base + 5),
device_select: Pio::new(base + 6),
status: ReadOnly::new(Pio::new(base + 7)),
command: WriteOnly::new(Pio::new(base + 7)),
alt_status: ReadOnly::new(Pio::new(control_base)),
control: WriteOnly::new(Pio::new(control_base)),
busmaster_command: Pio::new(busmaster_base),
busmaster_status: Pio::new(busmaster_base + 2),
busmaster_prdt: Pio::new(busmaster_base + 4),
prdt: unsafe {
Dma::zeroed(
//TODO: PhysBox::new_in_32bit_space(4096)?
)?
.assume_init()
},
buf: unsafe {
Dma::zeroed(
//TODO: PhysBox::new_in_32bit_space(16 * 4096)?
)?
.assume_init()
},
})
}
pub fn primary_compat(busmaster_base: u16) -> Result<Self> {
Self::new(0x1F0, 0x3F6, busmaster_base)
}
pub fn secondary_compat(busmaster_base: u16) -> Result<Self> {
Self::new(0x170, 0x376, busmaster_base)
}
fn check_status(&mut self) -> Result<u8> {
let status = self.status.read();
if status & 0x01 != 0 {
log::error!("IDE error: {:#x}", self.error.read());
return Err(Error::new(EIO));
}
if status & 0x20 != 0 {
log::error!("IDE device write fault");
return Err(Error::new(EIO));
}
Ok(status)
}
fn polling(&mut self, read: bool, line: u32) -> Result<()> {
/*
#define ATA_SR_BSY 0x80 // Busy
#define ATA_SR_DRDY 0x40 // Drive ready
#define ATA_SR_DF 0x20 // Drive write fault
#define ATA_SR_DSC 0x10 // Drive seek complete
#define ATA_SR_DRQ 0x08 // Data request ready
#define ATA_SR_CORR 0x04 // Corrected data
#define ATA_SR_IDX 0x02 // Index
#define ATA_SR_ERR 0x01 // Error
*/
for _ in 0..4 {
// Doing this 4 times creates a 400ns delay
self.alt_status.read();
}
let start = Instant::now();
loop {
let status = self.check_status()?;
if status & 0x80 == 0 {
if read && status & 0x08 == 0 {
log::error!("IDE read data not ready");
return Err(Error::new(EIO));
}
break;
}
if start.elapsed() >= TIMEOUT {
log::error!(
"line {} polling {} timeout with status 0x{:02X}",
line,
if read { "read" } else { "write" },
status
);
return Err(Error::new(EIO));
}
thread::yield_now();
}
Ok(())
}
}
pub struct AtaDisk {
pub chan: Arc<Mutex<Channel>>,
pub chan_i: usize,
pub dev: u8,
pub size: u64,
pub dma: bool,
pub lba_48: bool,
}
impl Disk for AtaDisk {
fn block_size(&self) -> u32 {
512
}
fn size(&self) -> u64 {
self.size
}
// NOTE: not async
async fn read(&mut self, start_block: u64, buffer: &mut [u8]) -> Result<usize> {
let mut count = 0;
for chunk in buffer.chunks_mut(65536) {
let block = start_block + (count as u64) / 512;
//TODO: support other LBA modes
assert!(block < 0x1_0000_0000_0000);
let sectors = (chunk.len() + 511) / 512;
assert!(sectors <= 128);
log::trace!(
"IDE read chan {} dev {} block {:#x} count {:#x}",
self.chan_i,
self.dev,
block,
sectors
);
let mut chan = self.chan.lock().unwrap();
if self.dma {
// Stop bus master
chan.busmaster_command.writef(1, false);
// Make PRDT EOT match chunk size
for i in 0..sectors {
chan.prdt[i] = PrdtEntry {
phys: (chan.buf.physical() + i * 512).try_into().unwrap(),
size: 512,
flags: if i + 1 == sectors {
1 << 15 // End of table
} else {
0
},
};
}
// Set PRDT
let prdt = chan.prdt.physical();
chan.busmaster_prdt.write(prdt.try_into().unwrap());
// Set to read
chan.busmaster_command.writef(1 << 3, true);
// Clear interrupt and error bits
chan.busmaster_status.write(0b110);
}
// Select drive
//TODO: upper part of LBA 28
chan.device_select.write(0xE0 | (self.dev << 4));
if self.lba_48 {
// Set high sector count and LBA
chan.control.write(0x80);
chan.sector_count.write((sectors >> 8) as u8);
chan.lba_0.write((block >> 24) as u8);
chan.lba_1.write((block >> 32) as u8);
chan.lba_2.write((block >> 40) as u8);
chan.control.write(0x00);
}
// Set low sector count and LBA
chan.sector_count.write(sectors as u8);
chan.lba_0.write(block as u8);
chan.lba_1.write((block >> 8) as u8);
chan.lba_2.write((block >> 16) as u8);
// Send command
chan.command.write(if self.dma {
if self.lba_48 {
AtaCommand::ReadDmaExt as u8
} else {
AtaCommand::ReadDma as u8
}
} else {
if self.lba_48 {
AtaCommand::ReadPioExt as u8
} else {
AtaCommand::ReadPio as u8
}
});
// Read data
if self.dma {
// Start bus master
chan.busmaster_command.writef(1, true);
// Wait for transaction to finish
chan.polling(false, line!())?;
// Wait for bus master to finish
let start = Instant::now();
let error = loop {
let status = chan.busmaster_status.read();
if status & 1 << 1 != 0 {
// Break with error status
break true;
}
if status & 1 == 0 {
// Break when not busy and no error
break false;
}
if start.elapsed() >= TIMEOUT {
log::error!("busmaster read timeout with status 0x{:02X}", status);
return Err(Error::new(EIO));
}
thread::yield_now();
};
// Stop bus master
chan.busmaster_command.writef(1, false);
// Clear bus master error and interrupt
chan.busmaster_status.write(0b110);
if error {
log::error!("IDE bus master error");
return Err(Error::new(EIO));
}
// Read buffer
chunk.copy_from_slice(&chan.buf[..chunk.len()]);
} else {
for sector in 0..sectors {
chan.polling(true, line!())?;
for i in 0..128 {
let data = chan.data32.read();
chunk[sector * 512 + i * 4 + 0] = (data >> 0) as u8;
chunk[sector * 512 + i * 4 + 1] = (data >> 8) as u8;
chunk[sector * 512 + i * 4 + 2] = (data >> 16) as u8;
chunk[sector * 512 + i * 4 + 3] = (data >> 24) as u8;
}
}
}
count += chunk.len();
}
Ok(count)
}
// NOTE: not async
async fn write(&mut self, start_block: u64, buffer: &[u8]) -> Result<usize> {
let mut count = 0;
for chunk in buffer.chunks(65536) {
let block = start_block + (count as u64) / 512;
//TODO: support other LBA modes
assert!(block < 0x1_0000_0000_0000);
let sectors = (chunk.len() + 511) / 512;
assert!(sectors <= 128);
log::trace!(
"IDE write chan {} dev {} block {:#x} count {:#x}",
self.chan_i,
self.dev,
block,
sectors
);
let mut chan = self.chan.lock().unwrap();
if self.dma {
// Stop bus master
chan.busmaster_command.writef(1, false);
// Make PRDT EOT match chunk size
for i in 0..sectors {
chan.prdt[i] = PrdtEntry {
phys: (chan.buf.physical() + i * 512).try_into().unwrap(),
size: 512,
flags: if i + 1 == sectors {
1 << 15 // End of table
} else {
0
},
};
}
// Set PRDT
let prdt = chan.prdt.physical();
chan.busmaster_prdt.write(prdt.try_into().unwrap());
// Set to write
chan.busmaster_command.writef(1 << 3, false);
// Clear interrupt and error bits
chan.busmaster_status.write(0b110);
// Write buffer
chan.buf[..chunk.len()].copy_from_slice(chunk);
}
// Select drive
//TODO: upper part of LBA 28
chan.device_select.write(0xE0 | (self.dev << 4));
if self.lba_48 {
// Set high sector count and LBA
chan.control.write(0x80);
chan.sector_count.write((sectors >> 8) as u8);
chan.lba_0.write((block >> 24) as u8);
chan.lba_1.write((block >> 32) as u8);
chan.lba_2.write((block >> 40) as u8);
chan.control.write(0x00);
}
// Set low sector count and LBA
chan.sector_count.write(sectors as u8);
chan.lba_0.write(block as u8);
chan.lba_1.write((block >> 8) as u8);
chan.lba_2.write((block >> 16) as u8);
// Send command
chan.command.write(if self.dma {
if self.lba_48 {
AtaCommand::WriteDmaExt as u8
} else {
AtaCommand::WriteDma as u8
}
} else {
if self.lba_48 {
AtaCommand::WritePioExt as u8
} else {
AtaCommand::WritePio as u8
}
});
// Write data
if self.dma {
// Start bus master
chan.busmaster_command.writef(1, true);
// Wait for transaction to finish
chan.polling(false, line!())?;
// Wait for bus master to finish
let start = Instant::now();
let error = loop {
let status = chan.busmaster_status.read();
if status & 1 << 1 != 0 {
// Break with error status
break true;
}
if status & 1 == 0 {
// Break when not busy and no error
break false;
}
if start.elapsed() >= TIMEOUT {
log::error!("busmaster write timeout with status 0x{:02X}", status);
return Err(Error::new(EIO));
}
thread::yield_now();
};
// Stop bus master
chan.busmaster_command.writef(1, false);
// Clear bus master error and interrupt
chan.busmaster_status.write(0b110);
if error {
log::error!("IDE bus master error");
return Err(Error::new(EIO));
}
} else {
for sector in 0..sectors {
chan.polling(false, line!())?;
for i in 0..128 {
chan.data32.write(
((chunk[sector * 512 + i * 4 + 0] as u32) << 0)
| ((chunk[sector * 512 + i * 4 + 1] as u32) << 8)
| ((chunk[sector * 512 + i * 4 + 2] as u32) << 16)
| ((chunk[sector * 512 + i * 4 + 3] as u32) << 24),
);
}
}
}
chan.command.write(if self.lba_48 {
AtaCommand::CacheFlushExt as u8
} else {
AtaCommand::CacheFlush as u8
});
chan.polling(false, line!())?;
count += chunk.len();
}
Ok(count)
}
}
@@ -0,0 +1,304 @@
use common::io::Io as _;
use driver_block::{Disk, DiskScheme, ExecutorTrait, FuturesExecutor};
use event::{EventFlags, RawEventQueue};
use libredox::flag;
use log::{error, info};
use pcid_interface::PciFunctionHandle;
use std::{
fs::File,
io::{Read, Write},
os::unix::io::{FromRawFd, RawFd},
sync::{Arc, Mutex},
thread::{self, sleep},
time::Duration,
};
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::ide::{AtaCommand, AtaDisk, Channel};
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub mod ide;
fn main() {
pcid_interface::pci_daemon(daemon);
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! {
let pci_config = pcid_handle.config();
let mut name = pci_config.func.name();
name.push_str("_ide");
common::setup_logging(
"disk",
"pci",
&name,
common::output_level(),
common::file_level(),
);
info!("IDE PCI CONFIG: {:?}", pci_config);
// Get controller DMA capable
let dma = pci_config.func.full_device_id.interface & 0x80 != 0;
let busmaster_base = pci_config.func.bars[4].expect_port();
let (primary, primary_irq) = if pci_config.func.full_device_id.interface & 1 != 0 {
panic!("TODO: IDE primary channel is PCI native");
} else {
(Channel::primary_compat(busmaster_base).unwrap(), 14)
};
let (secondary, secondary_irq) = if pci_config.func.full_device_id.interface & 1 != 0 {
panic!("TODO: IDE secondary channel is PCI native");
} else {
(Channel::secondary_compat(busmaster_base + 8).unwrap(), 15)
};
common::acquire_port_io_rights().expect("ided: failed to get I/O privilege");
//TODO: move this to ide.rs?
let chans = vec![
Arc::new(Mutex::new(primary)),
Arc::new(Mutex::new(secondary)),
];
enum AnyDisk {
Ata(AtaDisk),
}
impl Disk for AnyDisk {
fn block_size(&self) -> u32 {
let AnyDisk::Ata(a) = self;
a.block_size()
}
fn size(&self) -> u64 {
let AnyDisk::Ata(a) = self;
a.size()
}
async fn write(&mut self, block: u64, buffer: &[u8]) -> syscall::Result<usize> {
let AnyDisk::Ata(a) = self;
a.write(block, buffer).await
}
async fn read(&mut self, block: u64, buffer: &mut [u8]) -> syscall::Result<usize> {
let AnyDisk::Ata(a) = self;
a.read(block, buffer).await
}
}
let mut disks: Vec<AnyDisk> = Vec::new();
for (chan_i, chan_lock) in chans.iter().enumerate() {
let mut chan = chan_lock.lock().unwrap();
println!(" - channel {}", chan_i);
// Disable IRQs
chan.control.write(2);
for dev in 0..=1 {
println!(" - device {}", dev);
// Select device
chan.device_select.write(0xA0 | (dev << 4));
sleep(Duration::from_millis(1));
// ATA identify command
chan.command.write(AtaCommand::Identify as u8);
sleep(Duration::from_millis(1));
// Check if device exists
if chan.status.read() == 0 {
println!(" not found");
continue;
}
// Poll for status
let error = loop {
let status = chan.status.read();
if status & 1 != 0 {
// Error
break true;
}
if status & 0x80 == 0 && status & 0x08 != 0 {
// Not busy and data ready
break false;
}
thread::yield_now();
};
//TODO: probe ATAPI
if error {
println!(" error");
continue;
}
// Read and print identity
{
let mut dest = [0u16; 256];
for chunk in dest.chunks_mut(2) {
let data = chan.data32.read();
chunk[0] = data as u16;
chunk[1] = (data >> 16) as u16;
}
let mut serial = String::new();
for word in 10..20 {
let d = dest[word];
let a = ((d >> 8) as u8) as char;
if a != '\0' {
serial.push(a);
}
let b = (d as u8) as char;
if b != '\0' {
serial.push(b);
}
}
let mut firmware = String::new();
for word in 23..27 {
let d = dest[word];
let a = ((d >> 8) as u8) as char;
if a != '\0' {
firmware.push(a);
}
let b = (d as u8) as char;
if b != '\0' {
firmware.push(b);
}
}
let mut model = String::new();
for word in 27..47 {
let d = dest[word];
let a = ((d >> 8) as u8) as char;
if a != '\0' {
model.push(a);
}
let b = (d as u8) as char;
if b != '\0' {
model.push(b);
}
}
let mut sectors = (dest[100] as u64)
| ((dest[101] as u64) << 16)
| ((dest[102] as u64) << 32)
| ((dest[103] as u64) << 48);
let lba_bits = if sectors == 0 {
sectors = (dest[60] as u64) | ((dest[61] as u64) << 16);
28
} else {
48
};
println!(" Serial: {}", serial.trim());
println!(" Firmware: {}", firmware.trim());
println!(" Model: {}", model.trim());
println!(" Size: {} MB", sectors / 2048);
println!(" DMA: {}", dma);
println!(" {}-bit LBA", lba_bits);
disks.push(AnyDisk::Ata(AtaDisk {
chan: chan_lock.clone(),
chan_i,
dev,
size: sectors * 512,
dma,
lba_48: lba_bits == 48,
}));
}
}
}
let scheme_name = format!("disk.{}", name);
let mut scheme = DiskScheme::new(
Some(daemon),
scheme_name,
disks
.into_iter()
.enumerate()
.map(|(i, disk)| (i as u32, disk))
.collect(),
// TODO: Should ided just use TrivialExecutor or would it be valuable to actually use a
// real executor?
&FuturesExecutor,
);
let primary_irq_fd = libredox::call::open(
&format!("/scheme/irq/{}", primary_irq),
flag::O_RDWR | flag::O_NONBLOCK,
0,
)
.expect("ided: failed to open irq file");
let mut primary_irq_file = unsafe { File::from_raw_fd(primary_irq_fd as RawFd) };
let secondary_irq_fd = libredox::call::open(
&format!("/scheme/irq/{}", secondary_irq),
flag::O_RDWR | flag::O_NONBLOCK,
0,
)
.expect("ided: failed to open irq file");
let mut secondary_irq_file = unsafe { File::from_raw_fd(secondary_irq_fd as RawFd) };
let event_queue = RawEventQueue::new().expect("ided: failed to open event file");
libredox::call::setrens(0, 0).expect("ided: failed to enter null namespace");
event_queue
.subscribe(scheme.event_handle().raw(), 0, EventFlags::READ)
.expect("ided: failed to event disk scheme");
event_queue
.subscribe(primary_irq_fd, 0, EventFlags::READ)
.expect("ided: failed to event irq scheme");
event_queue
.subscribe(secondary_irq_fd, 0, EventFlags::READ)
.expect("ided: failed to event irq scheme");
for event in event_queue {
let event = event.unwrap();
if event.fd == scheme.event_handle().raw() {
FuturesExecutor.block_on(scheme.tick()).unwrap();
} else if event.fd == primary_irq_fd {
let mut irq = [0; 8];
if primary_irq_file
.read(&mut irq)
.expect("ided: failed to read irq file")
>= irq.len()
{
let _chan = chans[0].lock().unwrap();
//TODO: check chan for irq
primary_irq_file
.write(&irq)
.expect("ided: failed to write irq file");
FuturesExecutor.block_on(scheme.tick()).unwrap();
}
} else if event.fd == secondary_irq_fd {
let mut irq = [0; 8];
if secondary_irq_file
.read(&mut irq)
.expect("ided: failed to read irq file")
>= irq.len()
{
let _chan = chans[1].lock().unwrap();
//TODO: check chan for irq
secondary_irq_file
.write(&irq)
.expect("ided: failed to write irq file");
FuturesExecutor.block_on(scheme.tick()).unwrap();
}
} else {
error!("Unknown event {}", event.fd);
}
}
std::process::exit(0);
}
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
fn daemon(daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! {
unimplemented!()
}
@@ -0,0 +1,20 @@
[package]
name = "lived"
description = "Live disk daemon"
authors = ["4lDO2 <4lDO2@protonmail.com>"]
version = "0.1.0"
edition = "2021"
license = "MIT"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow.workspace = true
libredox.workspace = true
daemon = { path = "../../../daemon" }
redox_syscall = { workspace = true, features = ["std"] }
redox_event.workspace = true
driver-block = { path = "../driver-block" }
[lints]
workspace = true
@@ -0,0 +1,177 @@
//! Disk scheme replacement when making live disk
#![feature(int_roundings)]
use std::collections::{BTreeMap, HashMap};
use std::fs::File;
use std::os::fd::AsRawFd;
use driver_block::{Disk, DiskScheme};
use driver_block::{ExecutorTrait, TrivialExecutor};
use libredox::call::MmapArgs;
use libredox::flag;
use syscall::error::*;
use syscall::PAGE_SIZE;
use anyhow::{anyhow, Context};
struct LiveDisk {
original: &'static [u8],
//TODO: drop overlay blocks if they match the original
overlay: HashMap<u64, Box<[u8]>>,
}
impl LiveDisk {
fn new(phys: usize, size: usize) -> anyhow::Result<LiveDisk> {
let start = phys.div_floor(PAGE_SIZE) * PAGE_SIZE;
let end = phys
.checked_add(size)
.context("phys + size overflow")?
.next_multiple_of(PAGE_SIZE);
let size = end - start;
let original = unsafe {
let file = File::open("/scheme/memory/physical")?;
let base = libredox::call::mmap(MmapArgs {
fd: file.as_raw_fd() as usize,
addr: core::ptr::null_mut(),
offset: start as u64,
length: size,
prot: flag::PROT_READ,
flags: flag::MAP_SHARED,
})
.map_err(|err| anyhow!("failed to mmap livedisk: {}", err))?;
std::slice::from_raw_parts_mut(base as *mut u8, size)
};
Ok(LiveDisk {
original,
overlay: HashMap::new(),
})
}
}
impl Disk for LiveDisk {
fn block_size(&self) -> u32 {
PAGE_SIZE as u32
}
fn size(&self) -> u64 {
self.original.len() as u64
}
async fn read(&mut self, mut block: u64, buffer: &mut [u8]) -> syscall::Result<usize> {
let mut offset = (block as usize) * PAGE_SIZE;
if offset + buffer.len() > self.original.len() {
return Err(syscall::Error::new(EINVAL));
}
for chunk in buffer.chunks_mut(PAGE_SIZE) {
match self.overlay.get(&block) {
Some(overlay) => {
chunk.copy_from_slice(&overlay[..chunk.len()]);
}
None => {
chunk.copy_from_slice(&self.original[offset..offset + chunk.len()]);
}
}
block += 1;
offset += PAGE_SIZE;
}
Ok(buffer.len())
}
async fn write(&mut self, mut block: u64, buffer: &[u8]) -> syscall::Result<usize> {
let mut offset = (block as usize) * PAGE_SIZE;
if offset + buffer.len() > self.original.len() {
return Err(syscall::Error::new(EINVAL));
}
for chunk in buffer.chunks(PAGE_SIZE) {
self.overlay.entry(block).or_insert_with(|| {
let offset = (block as usize) * PAGE_SIZE;
self.original[offset..offset + PAGE_SIZE]
.to_vec()
.into_boxed_slice()
})[..chunk.len()]
.copy_from_slice(chunk);
block += 1;
offset += PAGE_SIZE;
}
Ok(buffer.len())
}
}
fn main() {
daemon::Daemon::new(daemon);
}
fn daemon(daemon: daemon::Daemon) -> ! {
let mut phys = 0;
let mut size = 0;
// TODO: handle error
for line in std::fs::read_to_string("/scheme/sys/env")
.context("failed to read env")
.unwrap()
.lines()
{
let mut parts = line.splitn(2, '=');
let name = parts.next().unwrap_or("");
let value = parts.next().unwrap_or("");
if name == "DISK_LIVE_ADDR" {
phys = usize::from_str_radix(value, 16).unwrap_or(0);
}
if name == "DISK_LIVE_SIZE" {
size = usize::from_str_radix(value, 16).unwrap_or(0);
}
}
if phys == 0 || size == 0 {
// No live disk data, no need to say anything or exit with
daemon.ready();
std::process::exit(0);
}
let event_queue = event::EventQueue::new().unwrap();
event::user_data! {
enum Event {
Scheme,
}
};
let mut scheme = DiskScheme::new(
Some(daemon),
"disk.live".to_owned(),
BTreeMap::from([(
0,
LiveDisk::new(phys, size).unwrap_or_else(|err| {
eprintln!("failed to initialize livedisk scheme: {}", err);
std::process::exit(1)
}),
)]),
&TrivialExecutor,
);
libredox::call::setrens(0, 0).expect("nvmed: failed to enter null namespace");
event_queue
.subscribe(
scheme.event_handle().raw(),
Event::Scheme,
event::EventFlags::READ,
)
.unwrap();
for event in event_queue {
match event.unwrap().user_data {
Event::Scheme => TrivialExecutor.block_on(scheme.tick()).unwrap(),
}
}
std::process::exit(0);
}
@@ -0,0 +1 @@
/target
@@ -0,0 +1,28 @@
[package]
name = "nvmed"
description = "NVM Express (NVMe) driver"
version = "0.1.0"
edition = "2021"
[dependencies]
bitflags.workspace = true
futures = "0.3"
libredox.workspace = true
log.workspace = true
parking_lot.workspace = true
redox_event.workspace = true
redox_syscall = { workspace = true, features = ["std"] }
smallvec.workspace = true
executor = { path = "../../executor" }
common = { path = "../../common" }
daemon = { path = "../../../daemon" }
driver-block = { path = "../driver-block" }
partitionlib = { path = "../partitionlib" }
pcid = { path = "../../pcid" }
[features]
default = []
[lints]
workspace = true
@@ -0,0 +1,154 @@
use std::cell::RefCell;
use std::fs::File;
use std::io::{self, Read, Write};
use std::os::fd::AsRawFd;
use std::rc::Rc;
use std::sync::Arc;
use std::usize;
use driver_block::{Disk, DiskScheme};
use pcid_interface::{irq_helpers, PciFunctionHandle};
use crate::nvme::NvmeNamespace;
use self::nvme::Nvme;
mod nvme;
struct NvmeDisk {
nvme: Arc<Nvme>,
ns: NvmeNamespace,
}
impl Disk for NvmeDisk {
fn block_size(&self) -> u32 {
self.ns.block_size.try_into().unwrap()
}
fn size(&self) -> u64 {
self.ns.blocks * self.ns.block_size
}
async fn read(&mut self, block: u64, buffer: &mut [u8]) -> syscall::Result<usize> {
self.nvme.namespace_read(&self.ns, block, buffer).await
}
async fn write(&mut self, block: u64, buffer: &[u8]) -> syscall::Result<usize> {
self.nvme.namespace_write(&self.ns, block, buffer).await
}
}
fn time_arm(time_handle: &mut File, secs: i64) -> io::Result<()> {
let mut time_buf = [0_u8; core::mem::size_of::<libredox::data::TimeSpec>()];
if time_handle.read(&mut time_buf)? < time_buf.len() {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"time read too small",
));
}
match libredox::data::timespec_from_mut_bytes(&mut time_buf) {
time => {
time.tv_sec += secs;
}
}
time_handle.write(&time_buf)?;
Ok(())
}
fn main() {
pcid_interface::pci_daemon(daemon);
}
fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> ! {
let pci_config = pcid_handle.config();
let scheme_name = format!("disk.{}-nvme", pci_config.func.name());
common::setup_logging(
"disk",
"pci",
&scheme_name,
common::output_level(),
common::file_level(),
);
log::debug!("NVME PCI CONFIG: {:?}", pci_config);
let address = unsafe { pcid_handle.map_bar(0).ptr };
let interrupt_vector = irq_helpers::pci_allocate_interrupt_vector(&mut pcid_handle, "nvmed");
let iv = interrupt_vector.vector();
let irq_handle = interrupt_vector.irq_handle().try_clone().unwrap();
let mut nvme = Nvme::new(address.as_ptr() as usize, interrupt_vector, pcid_handle)
.expect("nvmed: failed to allocate driver data");
unsafe { nvme.init().expect("nvmed: failed to init") }
log::debug!("Finished base initialization");
let nvme = Arc::new(nvme);
let executor = nvme::executor::init(Arc::clone(&nvme), iv, false /* FIXME */, irq_handle);
let mut time_handle = File::open(&format!("/scheme/time/{}", libredox::flag::CLOCK_MONOTONIC))
.expect("failed to open time handle");
let mut time_events = Box::pin(
executor.register_external_event(time_handle.as_raw_fd() as usize, event::EventFlags::READ),
);
// Try to init namespaces for 5 seconds
time_arm(&mut time_handle, 5).expect("failed to arm timer");
let namespaces = executor.block_on(async {
let namespaces_future = nvme.init_with_queues();
let time_future = time_events.as_mut().next();
futures::pin_mut!(namespaces_future);
futures::pin_mut!(time_future);
match futures::future::select(namespaces_future, time_future).await {
futures::future::Either::Left((namespaces, _)) => namespaces,
futures::future::Either::Right(_) => panic!("timeout on init"),
}
});
log::debug!("Initialized!");
let scheme = Rc::new(RefCell::new(DiskScheme::new(
Some(daemon),
scheme_name,
namespaces
.into_iter()
.map(|(k, ns)| {
(
k,
NvmeDisk {
nvme: nvme.clone(),
ns,
},
)
})
.collect(),
&*executor,
)));
let mut scheme_events = Box::pin(executor.register_external_event(
scheme.borrow().event_handle().raw(),
event::EventFlags::READ,
));
libredox::call::setrens(0, 0).expect("nvmed: failed to enter null namespace");
log::debug!("Starting to listen for scheme events");
executor.block_on(async {
loop {
log::trace!("new event iteration");
if let Err(err) = scheme.borrow_mut().tick().await {
log::error!("scheme error: {err}");
}
let _ = scheme_events.as_mut().next().await;
}
});
//TODO: destroy NVMe stuff
std::process::exit(0);
}
@@ -0,0 +1,162 @@
use super::NvmeCmd;
impl NvmeCmd {
pub fn create_io_completion_queue(
cid: u16,
qid: u16,
ptr: usize,
size: u16,
iv: Option<u16>,
) -> Self {
const DW11_PHYSICALLY_CONTIGUOUS_BIT: u32 = 0x0000_0001;
const DW11_ENABLE_INTERRUPTS_BIT: u32 = 0x0000_0002;
const DW11_INTERRUPT_VECTOR_SHIFT: u8 = 16;
Self {
opcode: 5,
flags: 0,
cid,
nsid: 0,
_rsvd: 0,
mptr: 0,
dptr: [ptr as u64, 0],
cdw10: ((size as u32) << 16) | (qid as u32),
cdw11: DW11_PHYSICALLY_CONTIGUOUS_BIT
| if let Some(iv) = iv {
// enable interrupts if a vector is present
DW11_ENABLE_INTERRUPTS_BIT | (u32::from(iv) << DW11_INTERRUPT_VECTOR_SHIFT)
} else {
0
},
cdw12: 0,
cdw13: 0,
cdw14: 0,
cdw15: 0,
}
}
pub fn create_io_submission_queue(
cid: u16,
qid: u16,
ptr: usize,
size: u16,
cqid: u16,
) -> Self {
Self {
opcode: 1,
flags: 0,
cid,
nsid: 0,
_rsvd: 0,
mptr: 0,
dptr: [ptr as u64, 0],
cdw10: ((size as u32) << 16) | (qid as u32),
cdw11: ((cqid as u32) << 16) | 1, /* Physically Contiguous */
//TODO: QPRIO
cdw12: 0, //TODO: NVMSETID
cdw13: 0,
cdw14: 0,
cdw15: 0,
}
}
pub fn identify_namespace(cid: u16, ptr: usize, nsid: u32) -> Self {
Self {
opcode: 6,
flags: 0,
cid,
nsid,
_rsvd: 0,
mptr: 0,
dptr: [ptr as u64, 0],
cdw10: 0,
cdw11: 0,
cdw12: 0,
cdw13: 0,
cdw14: 0,
cdw15: 0,
}
}
pub fn identify_controller(cid: u16, ptr: usize) -> Self {
Self {
opcode: 6,
flags: 0,
cid,
nsid: 0,
_rsvd: 0,
mptr: 0,
dptr: [ptr as u64, 0],
cdw10: 1,
cdw11: 0,
cdw12: 0,
cdw13: 0,
cdw14: 0,
cdw15: 0,
}
}
pub fn identify_namespace_list(cid: u16, ptr: usize, base: u32) -> Self {
Self {
opcode: 6,
flags: 0,
cid,
nsid: base,
_rsvd: 0,
mptr: 0,
dptr: [ptr as u64, 0],
cdw10: 2,
cdw11: 0,
cdw12: 0,
cdw13: 0,
cdw14: 0,
cdw15: 0,
}
}
pub fn get_features(cid: u16, ptr: usize, fid: u8) -> Self {
Self {
opcode: 0xA,
dptr: [ptr as u64, 0],
cdw10: u32::from(fid), // TODO: SEL
..Default::default()
}
}
pub fn io_read(cid: u16, nsid: u32, lba: u64, blocks_1: u16, ptr0: u64, ptr1: u64) -> Self {
Self {
opcode: 2,
flags: 0,
cid,
nsid,
_rsvd: 0,
mptr: 0,
dptr: [ptr0, ptr1],
cdw10: lba as u32,
cdw11: (lba >> 32) as u32,
cdw12: blocks_1 as u32,
cdw13: 0,
cdw14: 0,
cdw15: 0,
}
}
pub fn io_write(cid: u16, nsid: u32, lba: u64, blocks_1: u16, ptr0: u64, ptr1: u64) -> Self {
Self {
opcode: 1,
flags: 0,
cid,
nsid,
_rsvd: 0,
mptr: 0,
dptr: [ptr0, ptr1],
cdw10: lba as u32,
cdw11: (lba >> 32) as u32,
cdw12: blocks_1 as u32,
cdw13: 0,
cdw14: 0,
cdw15: 0,
}
}
}
@@ -0,0 +1,82 @@
use std::cell::RefCell;
use std::fs::File;
use std::rc::Rc;
use std::sync::Arc;
use executor::{Hardware, LocalExecutor};
use super::{CmdId, CqId, Nvme, NvmeCmd, NvmeComp, SqId};
pub struct NvmeHw;
impl Hardware for NvmeHw {
type Iv = u16;
type Sqe = NvmeCmd;
type Cqe = NvmeComp;
type CmdId = CmdId;
type CqId = CqId;
type SqId = SqId;
type GlobalCtxt = Arc<Nvme>;
fn mask_vector(ctxt: &Arc<Nvme>, iv: Self::Iv) {
ctxt.set_vector_masked(iv, true)
}
fn unmask_vector(ctxt: &Arc<Nvme>, iv: Self::Iv) {
ctxt.set_vector_masked(iv, false)
}
fn set_sqe_cmdid(sqe: &mut NvmeCmd, id: CmdId) {
sqe.cid = id;
}
fn get_cqe_cmdid(cqe: &Self::Cqe) -> Self::CmdId {
cqe.cid
}
fn vtable() -> &'static std::task::RawWakerVTable {
&VTABLE
}
fn current() -> std::rc::Rc<executor::LocalExecutor<Self>> {
THE_EXECUTOR.with(|exec| Rc::clone(exec.borrow().as_ref().unwrap()))
}
fn try_submit(
nvme: &Arc<Nvme>,
sq_id: Self::SqId,
success: impl FnOnce(Self::CmdId) -> Self::Sqe,
fail: impl FnOnce(),
) -> Option<(Self::CqId, Self::CmdId)> {
let ctxt = nvme.cur_thread_ctxt();
let ctxt = ctxt.lock();
nvme.try_submit_raw(&*ctxt, sq_id, success, fail)
}
fn poll_cqes(nvme: &Arc<Nvme>, mut handle: impl FnMut(Self::CqId, Self::Cqe)) {
let ctxt = nvme.cur_thread_ctxt();
let ctxt = ctxt.lock();
for (sq_cq_id, (sq, cq)) in ctxt.queues.borrow_mut().iter_mut() {
while let Some((new_head, cqe)) = cq.complete() {
unsafe {
nvme.completion_queue_head(*sq_cq_id, new_head);
}
sq.head = cqe.sq_head;
log::trace!("new head {new_head} cqe {cqe:?}");
handle(*sq_cq_id, cqe);
}
}
}
fn sq_cq(_ctxt: &Arc<Nvme>, id: Self::CqId) -> Self::SqId {
id
}
}
static VTABLE: std::task::RawWakerVTable = executor::vtable::<NvmeHw>();
thread_local! {
static THE_EXECUTOR: RefCell<Option<Rc<LocalExecutor<NvmeHw>>>> = RefCell::new(None);
}
pub type NvmeExecutor = LocalExecutor<NvmeHw>;
pub fn init(nvme: Arc<Nvme>, iv: u16, intx: bool, irq_handle: File) -> Rc<LocalExecutor<NvmeHw>> {
let this = Rc::new(executor::init_raw(nvme, iv, intx, irq_handle));
THE_EXECUTOR.with(|exec| *exec.borrow_mut() = Some(Rc::clone(&this)));
this
}
@@ -0,0 +1,228 @@
use super::{Nvme, NvmeCmd, NvmeNamespace};
use common::dma::Dma;
/// See NVME spec section 5.15.2.2.
#[derive(Clone, Copy)]
#[repr(C, packed)]
pub struct IdentifyControllerData {
/// PCI vendor ID, always the same as in the PCI function header.
pub vid: u16,
/// PCI subsystem vendor ID.
pub ssvid: u16,
/// ASCII
pub serial_no: [u8; 20],
/// ASCII
pub model_no: [u8; 48],
/// ASCII
pub firmware_rev: [u8; 8],
// TODO: Lots of fields
pub _4k_pad: [u8; 4096 - 72],
}
/// See NVME spec section 5.15.2.1.
#[derive(Clone, Copy)]
#[repr(C, packed)]
pub struct IdentifyNamespaceData {
pub nsze: u64,
pub ncap: u64,
pub nuse: u64,
pub nsfeat: u8,
pub nlbaf: u8,
pub flbas: u8,
pub mc: u8,
pub dpc: u8,
pub dps: u8,
pub nmic: u8,
pub rescap: u8,
// 32
pub fpi: u8,
pub dlfeat: u8,
pub nawun: u16,
pub nawupf: u16,
pub nacwu: u16,
// 40
pub nabsn: u16,
pub nabo: u16,
pub nabspf: u16,
pub noiob: u16,
// 48
pub nvmcap: u128,
// 64
pub npwg: u16,
pub npwa: u16,
pub npdg: u16,
pub npda: u16,
// 72
pub nows: u16,
pub _rsvd1: [u8; 18],
// 92
pub anagrpid: u32,
pub _rsvd2: [u8; 3],
pub nsattr: u8,
// 100
pub nvmsetid: u16,
pub endgid: u16,
pub nguid: [u8; 16],
pub eui64: u64,
pub lba_format_support: [LbaFormat; 16],
pub _rsvd3: [u8; 192],
pub vendor_specific: [u8; 3712],
}
impl IdentifyNamespaceData {
pub fn size_in_blocks(&self) -> u64 {
self.nsze
}
pub fn capacity_in_blocks(&self) -> u64 {
self.ncap
}
/// Guaranteed to be within 0..=15
pub fn formatted_lba_size_idx(&self) -> usize {
(self.flbas & 0xF) as usize
}
pub fn formatted_lba_size(&self) -> &LbaFormat {
&self.lba_format_support[self.formatted_lba_size_idx()]
}
pub fn has_metadata_after_data(&self) -> bool {
(self.flbas & (1 << 4)) != 0
}
}
#[derive(Clone, Copy)]
#[repr(C, packed)]
pub struct LbaFormat(pub u32);
#[repr(u8)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum RelativePerformance {
Best = 0b00,
Better,
Good,
Degraded,
}
impl Ord for RelativePerformance {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
// higher performance is better, hence reversed
Ord::cmp(&(*self as u8), &(*other as u8)).reverse()
}
}
impl PartialOrd for RelativePerformance {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(Ord::cmp(self, other))
}
}
impl LbaFormat {
pub fn relative_performance(&self) -> RelativePerformance {
match ((self.0 >> 24) & 0b11) {
0b00 => RelativePerformance::Best,
0b01 => RelativePerformance::Better,
0b10 => RelativePerformance::Good,
0b11 => RelativePerformance::Degraded,
_ => unreachable!(),
}
}
pub fn is_available(&self) -> bool {
self.log_lba_data_size() != 0
}
pub fn log_lba_data_size(&self) -> u8 {
((self.0 >> 16) & 0xFF) as u8
}
pub fn lba_data_size(&self) -> Option<u64> {
if self.log_lba_data_size() < 9 {
return None;
}
if self.log_lba_data_size() >= 32 {
return None;
}
Some(1u64 << self.log_lba_data_size())
}
pub fn metadata_size(&self) -> u16 {
(self.0 & 0xFFFF) as u16
}
}
impl Nvme {
/// Returns the serial number, model, and firmware, in that order.
pub async fn identify_controller(&self) {
// TODO: Use same buffer
let data: Dma<IdentifyControllerData> = unsafe { Dma::zeroed().unwrap().assume_init() };
// println!(" - Attempting to identify controller");
let comp = self
.submit_and_complete_admin_command(|cid| {
NvmeCmd::identify_controller(cid, data.physical())
})
.await;
log::trace!("Completion: {:?}", comp);
// println!(" - Dumping identify controller");
let model_cow = String::from_utf8_lossy(&data.model_no);
let serial_cow = String::from_utf8_lossy(&data.serial_no);
let fw_cow = String::from_utf8_lossy(&data.firmware_rev);
let model = model_cow.trim();
let serial = serial_cow.trim();
let firmware = fw_cow.trim();
log::info!(
" - Model: {} Serial: {} Firmware: {}",
model,
serial,
firmware,
);
}
pub async fn identify_namespace_list(&self, base: u32) -> Vec<u32> {
// TODO: Use buffer
let data: Dma<[u32; 1024]> = unsafe { Dma::zeroed().unwrap().assume_init() };
// println!(" - Attempting to retrieve namespace ID list");
let comp = self
.submit_and_complete_admin_command(|cid| {
NvmeCmd::identify_namespace_list(cid, data.physical(), base)
})
.await;
log::trace!("Completion2: {:?}", comp);
// println!(" - Dumping namespace ID list");
data.iter().copied().take_while(|&nsid| nsid != 0).collect()
}
pub async fn identify_namespace(&self, nsid: u32) -> NvmeNamespace {
//TODO: Use buffer
let data: Dma<IdentifyNamespaceData> = unsafe { Dma::zeroed().unwrap().assume_init() };
log::debug!("Attempting to identify namespace {nsid}");
let comp = self
.submit_and_complete_admin_command(|cid| {
NvmeCmd::identify_namespace(cid, data.physical(), nsid)
})
.await;
log::debug!("Dumping identify namespace");
let size = data.size_in_blocks();
let capacity = data.capacity_in_blocks();
log::info!("NSID: {} Size: {} Capacity: {}", nsid, size, capacity);
let block_size = data
.formatted_lba_size()
.lba_data_size()
.expect("nvmed: error: size outside 512-2^64 range");
log::debug!("NVME block size: {}", block_size);
NvmeNamespace {
id: nsid,
blocks: size,
block_size,
}
}
}
@@ -0,0 +1,541 @@
use std::cell::RefCell;
use std::collections::{BTreeMap, HashMap};
use std::convert::TryFrom;
use std::iter;
use std::sync::atomic::AtomicU16;
use std::sync::Arc;
use parking_lot::{Mutex, ReentrantMutex, RwLock};
use pcid_interface::irq_helpers::InterruptVector;
use common::io::{Io, Mmio};
use common::timeout::Timeout;
use syscall::error::{Error, Result, EIO};
use common::dma::Dma;
pub mod cmd;
pub mod executor;
pub mod identify;
pub mod queues;
use self::executor::NvmeExecutor;
pub use self::queues::{NvmeCmd, NvmeCmdQueue, NvmeComp, NvmeCompQueue};
use pcid_interface::PciFunctionHandle;
#[repr(C, packed)]
pub struct NvmeRegs {
/// Controller Capabilities
cap_low: Mmio<u32>,
cap_high: Mmio<u32>,
/// Version
vs: Mmio<u32>,
/// Interrupt mask set
intms: Mmio<u32>,
/// Interrupt mask clear
intmc: Mmio<u32>,
/// Controller configuration
cc: Mmio<u32>,
/// Reserved
_rsvd: Mmio<u32>,
/// Controller status
csts: Mmio<u32>,
/// NVM subsystem reset
nssr: Mmio<u32>,
/// Admin queue attributes
aqa: Mmio<u32>,
/// Admin submission queue base address
asq_low: Mmio<u32>,
asq_high: Mmio<u32>,
/// Admin completion queue base address
acq_low: Mmio<u32>,
acq_high: Mmio<u32>,
/// Controller memory buffer location
cmbloc: Mmio<u32>,
/// Controller memory buffer size
cmbsz: Mmio<u32>,
}
#[derive(Copy, Clone, Debug)]
pub struct NvmeNamespace {
pub id: u32,
pub blocks: u64,
pub block_size: u64,
}
pub type CqId = u16;
pub type SqId = u16;
pub type CmdId = u16;
pub type AtomicCqId = AtomicU16;
pub type AtomicSqId = AtomicU16;
pub type AtomicCmdId = AtomicU16;
pub type Iv = u16;
pub struct Nvme {
interrupt_vector: Mutex<InterruptVector>,
pcid_interface: Mutex<PciFunctionHandle>,
regs: RwLock<&'static mut NvmeRegs>,
sq_ivs: RwLock<HashMap<SqId, Iv>>,
cq_ivs: RwLock<HashMap<CqId, Iv>>,
// maps interrupt vectors with the completion queues they have
thread_ctxts: RwLock<HashMap<Iv, Arc<ReentrantMutex<ThreadCtxt>>>>,
next_sqid: AtomicSqId,
next_cqid: AtomicCqId,
}
pub struct ThreadCtxt {
buffer: RefCell<Dma<[u8; 512 * 4096]>>, // 2MB of buffer
buffer_prp: RefCell<Dma<[u64; 512]>>, // 4KB of PRP for the buffer
// Yes, technically NVME allows multiple submission queues to be mapped to the same completion
// queue, but we don't use that feature.
queues: RefCell<HashMap<u16, (NvmeCmdQueue, NvmeCompQueue)>>,
}
unsafe impl Send for Nvme {}
unsafe impl Sync for Nvme {}
/// How to handle full submission queues.
pub enum FullSqHandling {
/// Return an error immediately prior to posting the command.
ErrorDirectly,
/// Tell the executor that we want to be notified when a command on the same submission queue
/// has been completed.
Wait,
}
impl Nvme {
pub fn new(
address: usize,
interrupt_vector: InterruptVector,
pcid_interface: PciFunctionHandle,
) -> Result<Self> {
Ok(Nvme {
regs: RwLock::new(unsafe { &mut *(address as *mut NvmeRegs) }),
thread_ctxts: RwLock::new(
iter::once((
0_u16,
Arc::new(ReentrantMutex::new(ThreadCtxt {
buffer: RefCell::new(unsafe { Dma::zeroed()?.assume_init() }),
buffer_prp: RefCell::new(unsafe { Dma::zeroed()?.assume_init() }),
queues: RefCell::new(
iter::once((0, (NvmeCmdQueue::new()?, NvmeCompQueue::new()?)))
.collect(),
),
})),
))
.collect(),
),
cq_ivs: RwLock::new(iter::once((0, 0)).collect()),
sq_ivs: RwLock::new(iter::once((0, 0)).collect()),
interrupt_vector: Mutex::new(interrupt_vector),
pcid_interface: Mutex::new(pcid_interface),
// TODO
next_sqid: AtomicSqId::new(2),
next_cqid: AtomicCqId::new(2),
})
}
/// Write to a doorbell register.
///
/// # Locking
/// Locks `regs`.
unsafe fn doorbell_write(&self, index: usize, value: u32) {
use std::ops::DerefMut;
let mut regs_guard = self.regs.write();
let regs: &mut NvmeRegs = regs_guard.deref_mut();
let dstrd = (regs.cap_high.read() & 0b1111) as usize;
let addr = (regs as *mut NvmeRegs as usize) + 0x1000 + index * (4 << dstrd);
(&mut *(addr as *mut Mmio<u32>)).write(value);
}
fn cur_thread_ctxt(&self) -> Arc<ReentrantMutex<ThreadCtxt>> {
// TODO: multi-threading
Arc::clone(self.thread_ctxts.read().get(&0).unwrap())
}
pub unsafe fn submission_queue_tail(&self, qid: u16, tail: u16) {
self.doorbell_write(2 * (qid as usize), u32::from(tail));
}
pub unsafe fn completion_queue_head(&self, qid: u16, head: u16) {
self.doorbell_write(2 * (qid as usize) + 1, u32::from(head));
}
pub unsafe fn init(&mut self) -> Result<()> {
let thread_ctxts = self.thread_ctxts.get_mut();
{
let regs = self.regs.read();
log::debug!("CAP_LOW: {:X}", regs.cap_low.read());
log::debug!("CAP_HIGH: {:X}", regs.cap_high.read());
log::debug!("VS: {:X}", regs.vs.read());
log::debug!("CC: {:X}", regs.cc.read());
log::debug!("CSTS: {:X}", regs.csts.read());
}
log::debug!("Disabling controller.");
self.regs.get_mut().cc.writef(1, false);
{
log::trace!("Waiting for not ready.");
let timeout = Timeout::from_secs(1);
loop {
let csts = self.regs.get_mut().csts.read();
log::trace!("CSTS: {:X}", csts);
if csts & 1 == 1 {
timeout.run().map_err(|()| {
log::error!("failed to wait for not ready");
Error::new(EIO)
})?;
} else {
break;
}
}
}
if !self.interrupt_vector.get_mut().set_masked_if_fast(false) {
self.regs.get_mut().intms.write(0xFFFF_FFFF);
self.regs.get_mut().intmc.write(0x0000_0001);
}
for (qid, iv) in self.cq_ivs.get_mut().iter_mut() {
let ctxt = thread_ctxts.get(&0).unwrap().lock();
let queues = ctxt.queues.borrow();
let &(ref cq, ref sq) = queues.get(qid).unwrap();
log::debug!(
"iv {iv} [cq {qid}: {:X}, {}] [sq {qid}: {:X}, {}]",
cq.data.physical(),
cq.data.len(),
sq.data.physical(),
sq.data.len()
);
}
{
let main_ctxt = thread_ctxts.get(&0).unwrap().lock();
for (i, prp) in main_ctxt.buffer_prp.borrow_mut().iter_mut().enumerate() {
*prp = (main_ctxt.buffer.borrow_mut().physical() + i * 4096) as u64;
}
let regs = self.regs.get_mut();
let mut queues = main_ctxt.queues.borrow_mut();
let (asq, acq) = queues.get_mut(&0).unwrap();
regs.aqa
.write(((acq.data.len() as u32 - 1) << 16) | (asq.data.len() as u32 - 1));
regs.asq_low.write(asq.data.physical() as u32);
regs.asq_high
.write((asq.data.physical() as u64 >> 32) as u32);
regs.acq_low.write(acq.data.physical() as u32);
regs.acq_high
.write((acq.data.physical() as u64 >> 32) as u32);
// Set IOCQES, IOSQES, AMS, MPS, and CSS
let mut cc = regs.cc.read();
cc &= 0xFF00000F;
cc |= (4 << 20) | (6 << 16);
regs.cc.write(cc);
}
log::debug!("Enabling controller.");
self.regs.get_mut().cc.writef(1, true);
{
log::debug!("Waiting for ready");
let timeout = Timeout::from_secs(1);
loop {
let csts = self.regs.get_mut().csts.read();
log::debug!("CSTS: {:X}", csts);
if csts & 1 == 0 {
timeout.run().map_err(|()| {
log::error!("failed to wait for ready");
Error::new(EIO)
})?;
} else {
break;
}
}
}
Ok(())
}
pub fn set_vector_masked(&self, vector: u16, masked: bool) {
let mut interrupt_vector_guard = (&self).interrupt_vector.lock();
if !interrupt_vector_guard.set_masked_if_fast(masked) {
let mut to_mask = 0x0000_0000;
let mut to_clear = 0x0000_0000;
let vector = vector as u8;
if masked {
assert_ne!(
to_clear & (1 << vector),
(1 << vector),
"nvmed: internal error: cannot both mask and set"
);
to_mask |= 1 << vector;
} else {
assert_ne!(
to_mask & (1 << vector),
(1 << vector),
"nvmed: internal error: cannot both mask and set"
);
to_clear |= 1 << vector;
}
if to_mask != 0 {
(&self).regs.write().intms.write(to_mask);
}
if to_clear != 0 {
(&self).regs.write().intmc.write(to_clear);
}
}
}
pub async fn submit_and_complete_command(
&self,
sq_id: SqId,
cmd_init: impl FnOnce(CmdId) -> NvmeCmd,
) -> NvmeComp {
NvmeExecutor::current().submit(sq_id, cmd_init(0)).await
}
pub async fn submit_and_complete_admin_command(
&self,
cmd_init: impl FnOnce(CmdId) -> NvmeCmd,
) -> NvmeComp {
self.submit_and_complete_command(0, cmd_init).await
}
pub fn try_submit_raw(
&self,
ctxt: &ThreadCtxt,
sq_id: SqId,
cmd_init: impl FnOnce(CmdId) -> NvmeCmd,
fail: impl FnOnce(),
) -> Option<(CqId, CmdId)> {
match ctxt.queues.borrow_mut().get_mut(&sq_id).unwrap() {
(sq, _cq) => {
if sq.is_full() {
fail();
return None;
}
let cmd_id = sq.tail;
let tail = sq.submit_unchecked(cmd_init(cmd_id));
// TODO: Submit in bulk
unsafe {
self.submission_queue_tail(sq_id, tail);
}
Some((sq_id, cmd_id))
}
}
}
pub async fn create_io_completion_queue(
&self,
io_cq_id: CqId,
vector: Option<Iv>,
) -> NvmeCompQueue {
let queue = NvmeCompQueue::new().expect("nvmed: failed to allocate I/O completion queue");
let len = u16::try_from(queue.data.len())
.expect("nvmed: internal error: I/O CQ longer than 2^16 entries");
let raw_len = len
.checked_sub(1)
.expect("nvmed: internal error: CQID 0 for I/O CQ");
let comp = self
.submit_and_complete_admin_command(|cid| {
NvmeCmd::create_io_completion_queue(
cid,
io_cq_id,
queue.data.physical(),
raw_len,
vector,
)
})
.await;
/*match comp.status.specific {
1 => panic!("invalid queue identifier"),
2 => panic!("invalid queue size"),
8 => panic!("invalid interrupt vector"),
_ => (),
}*/
queue
}
pub async fn create_io_submission_queue(&self, io_sq_id: SqId, io_cq_id: CqId) -> NvmeCmdQueue {
let q = NvmeCmdQueue::new().expect("failed to create submission queue");
let len = u16::try_from(q.data.len())
.expect("nvmed: internal error: I/O SQ longer than 2^16 entries");
let raw_len = len
.checked_sub(1)
.expect("nvmed: internal error: SQID 0 for I/O SQ");
let comp = self
.submit_and_complete_admin_command(|cid| {
NvmeCmd::create_io_submission_queue(
cid,
io_sq_id,
q.data.physical(),
raw_len,
io_cq_id,
)
})
.await;
/*match comp.status.specific {
0 => panic!("completion queue invalid"),
1 => panic!("invalid queue identifier"),
2 => panic!("invalid queue size"),
_ => (),
}*/
q
}
pub async fn init_with_queues(&self) -> BTreeMap<u32, NvmeNamespace> {
log::trace!("preinit");
self.identify_controller().await;
let nsids = self.identify_namespace_list(0).await;
log::debug!("first commands");
let mut namespaces = BTreeMap::new();
for nsid in nsids.iter().copied() {
namespaces.insert(nsid, self.identify_namespace(nsid).await);
}
// TODO: Multiple queues
let cq = self.create_io_completion_queue(1, Some(0)).await;
log::trace!("created compq");
let sq = self.create_io_submission_queue(1, 1).await;
log::trace!("created subq");
self.thread_ctxts
.read()
.get(&0)
.unwrap()
.lock()
.queues
.borrow_mut()
.insert(1, (sq, cq));
self.sq_ivs.write().insert(1, 0);
self.cq_ivs.write().insert(1, 0);
namespaces
}
async fn namespace_rw(
&self,
ctxt: &ThreadCtxt,
namespace: &NvmeNamespace,
lba: u64,
blocks_1: u16,
write: bool,
) -> Result<()> {
let block_size = namespace.block_size;
let prp = ctxt.buffer_prp.borrow_mut();
let bytes = ((blocks_1 as u64) + 1) * block_size;
let (ptr0, ptr1) = if bytes <= 4096 {
(prp[0], 0)
} else if bytes <= 8192 {
(prp[0], prp[1])
} else {
(prp[0], (prp.physical() + 8) as u64)
};
let mut cmd = NvmeCmd::default();
let comp = self
.submit_and_complete_command(1, |cid| {
cmd = if write {
NvmeCmd::io_write(cid, namespace.id, lba, blocks_1, ptr0, ptr1)
} else {
NvmeCmd::io_read(cid, namespace.id, lba, blocks_1, ptr0, ptr1)
};
cmd.clone()
})
.await;
let status = comp.status >> 1;
if status == 0 {
Ok(())
} else {
log::error!("command {:#x?} failed with status {:#x}", cmd, status);
Err(Error::new(EIO))
}
}
pub async fn namespace_read(
&self,
namespace: &NvmeNamespace,
mut lba: u64,
buf: &mut [u8],
) -> Result<usize> {
let ctxt = self.cur_thread_ctxt();
let ctxt = ctxt.lock();
let block_size = namespace.block_size as usize;
for chunk in buf.chunks_mut(/* TODO: buf len */ 8192) {
let blocks = (chunk.len() + block_size - 1) / block_size;
assert!(blocks > 0);
assert!(blocks <= 0x1_0000);
self.namespace_rw(&*ctxt, namespace, lba, (blocks - 1) as u16, false)
.await?;
chunk.copy_from_slice(&ctxt.buffer.borrow()[..chunk.len()]);
lba += blocks as u64;
}
Ok(buf.len())
}
pub async fn namespace_write(
&self,
namespace: &NvmeNamespace,
mut lba: u64,
buf: &[u8],
) -> Result<usize> {
let ctxt = self.cur_thread_ctxt();
let ctxt = ctxt.lock();
let block_size = namespace.block_size as usize;
for chunk in buf.chunks(/* TODO: buf len */ 8192) {
let blocks = (chunk.len() + block_size - 1) / block_size;
assert!(blocks > 0);
assert!(blocks <= 0x1_0000);
ctxt.buffer.borrow_mut()[..chunk.len()].copy_from_slice(chunk);
self.namespace_rw(&*ctxt, namespace, lba, (blocks - 1) as u16, true)
.await?;
lba += blocks as u64;
}
Ok(buf.len())
}
}
@@ -0,0 +1,151 @@
use std::cell::UnsafeCell;
use std::ptr;
use syscall::Result;
use common::dma::Dma;
/// A submission queue entry.
#[derive(Clone, Copy, Debug, Default)]
#[repr(C, packed)]
pub struct NvmeCmd {
/// Opcode
pub opcode: u8,
/// Flags
pub flags: u8,
/// Command ID
pub cid: u16,
/// Namespace identifier
pub nsid: u32,
/// Reserved
pub _rsvd: u64,
/// Metadata pointer
pub mptr: u64,
/// Data pointer
pub dptr: [u64; 2],
/// Command dword 10
pub cdw10: u32,
/// Command dword 11
pub cdw11: u32,
/// Command dword 12
pub cdw12: u32,
/// Command dword 13
pub cdw13: u32,
/// Command dword 14
pub cdw14: u32,
/// Command dword 15
pub cdw15: u32,
}
/// A completion queue entry.
#[derive(Clone, Copy, Debug)]
#[repr(C, packed)]
pub struct NvmeComp {
pub command_specific: u32,
pub _rsvd: u32,
pub sq_head: u16,
pub sq_id: u16,
pub cid: u16,
pub status: u16,
}
/// Completion queue
pub struct NvmeCompQueue {
pub data: Dma<[UnsafeCell<NvmeComp>]>,
pub head: u16,
pub phase: bool,
}
impl NvmeCompQueue {
pub fn new() -> Result<Self> {
Ok(Self {
data: unsafe { Dma::zeroed_slice(256)?.assume_init() },
head: 0,
phase: true,
})
}
/// Get a new completion queue entry, or return None if no entry is available yet.
pub(crate) fn complete(&mut self) -> Option<(u16, NvmeComp)> {
let entry = unsafe { ptr::read_volatile(self.data[usize::from(self.head)].get()) };
if ((entry.status & 1) == 1) == self.phase {
self.head = (self.head + 1) % (self.data.len() as u16);
if self.head == 0 {
self.phase = !self.phase;
}
Some((self.head, entry))
} else {
None
}
}
/// Get a new CQ entry, busy waiting until an entry appears.
pub fn complete_spin(&mut self) -> (u16, NvmeComp) {
log::debug!("Waiting for new CQ entry");
loop {
if let Some(some) = self.complete() {
return some;
} else {
unsafe {
std::hint::spin_loop();
}
}
}
}
}
/// Submission queue
pub struct NvmeCmdQueue {
pub data: Dma<[UnsafeCell<NvmeCmd>]>,
pub tail: u16,
pub head: u16,
}
impl NvmeCmdQueue {
pub fn new() -> Result<Self> {
Ok(Self {
data: unsafe { Dma::zeroed_slice(64)?.assume_init() },
tail: 0,
head: 0,
})
}
pub fn is_empty(&self) -> bool {
self.head == self.tail
}
pub fn is_full(&self) -> bool {
self.head == self.tail + 1
}
/// Add a new submission command entry to the queue. The caller must ensure that the queue have free
/// entries; this can be checked using `is_full`.
pub fn submit_unchecked(&mut self, entry: NvmeCmd) -> u16 {
unsafe { ptr::write_volatile(self.data[usize::from(self.tail)].get(), entry) }
self.tail = (self.tail + 1) % (self.data.len() as u16);
self.tail
}
}
#[derive(Debug)]
pub enum Status {
GenericCmdStatus(u8),
CommandSpecificStatus(u8),
IntegrityError(u8),
PathRelatedStatus(u8),
Rsvd(u8),
Vendor(u8),
}
impl Status {
pub fn parse(raw: u16) -> Self {
let code = (raw >> 1) as u8;
match (raw >> 9) & 0b111 {
0 => Self::GenericCmdStatus(code),
1 => Self::CommandSpecificStatus(code),
2 => Self::IntegrityError(code),
3 => Self::PathRelatedStatus(code),
4..=6 => Self::Rsvd(code),
7 => Self::Vendor(code),
_ => unreachable!(),
}
}
}
@@ -0,0 +1,12 @@
[package]
name = "partitionlib"
description = "GPT and MBR partition table library"
version = "0.1.0"
authors = ["Deepak Sirone <deepaksirone94@gmail.com>"]
edition = "2021"
license = "MIT"
[dependencies]
gpt = { version = "3.0.1" }
scroll = { version = "0.10", features = ["derive"] }
uuid = { version = "1.0", features = ["v4"] }
@@ -0,0 +1,3 @@
mod mbr;
mod partition;
pub use self::partition::*;
@@ -0,0 +1,57 @@
use scroll::{Pread, Pwrite};
use std::io::{self, Read, Seek};
#[derive(Clone, Copy, Debug, Pread, Pwrite)]
pub(crate) struct Entry {
pub(crate) drive_attrs: u8,
pub(crate) start_head: u8,
pub(crate) start_cs: u16,
pub(crate) sys_id: u8,
pub(crate) end_head: u8,
pub(crate) end_cs: u16,
pub(crate) rel_sector: u32,
pub(crate) len: u32,
}
#[derive(Pread, Pwrite)]
pub(crate) struct Header {
pub(crate) bootstrap: [u8; 446],
pub(crate) first_entry: Entry,
pub(crate) second_entry: Entry,
pub(crate) third_entry: Entry,
pub(crate) fourth_entry: Entry,
pub(crate) last_signature: u16, // 0xAA55
}
pub(crate) fn read_header<D: Read + Seek>(device: &mut D) -> io::Result<Option<Header>> {
device.seek(io::SeekFrom::Start(0))?;
let mut bytes = [0u8; 512];
device.read_exact(&mut bytes)?;
let header: Header = bytes.pread_with(0, scroll::LE).unwrap();
if header.last_signature != 0xAA55 {
return Ok(None);
}
Ok(Some(header))
}
impl Header {
pub(crate) fn partitions(&self) -> impl Iterator<Item = Entry> {
[
self.first_entry,
self.second_entry,
self.third_entry,
self.fourth_entry,
]
.into_iter()
.filter(Entry::is_valid)
}
}
impl Entry {
fn is_valid(&self) -> bool {
(self.drive_attrs == 0 || self.drive_attrs == 0x80) && self.len != 0
}
}
@@ -0,0 +1,84 @@
pub use gpt::disk::LogicalBlockSize;
use std::io::{self, Read, Seek};
use uuid::Uuid;
/// A union of the MBR and GPT partition entry
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct Partition {
/// The starting logical block number
pub start_lba: u64,
/// The size of the partition in sectors
pub size: u64,
pub flags: Option<u64>,
pub name: Option<String>,
pub uuid: Option<Uuid>,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum PartitionTableKind {
Mbr,
Gpt,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct PartitionTable {
pub partitions: Vec<Partition>,
pub kind: PartitionTableKind,
}
fn get_gpt_partitions<D: Read + Seek>(
device: &mut D,
sector_size: LogicalBlockSize,
) -> io::Result<PartitionTable> {
let header = gpt::header::read_header_from_arbitrary_device(device, sector_size)?;
Ok(PartitionTable {
partitions: gpt::partition::file_read_partitions(device, &header, sector_size).map(
|btree| {
btree
.into_iter()
.map(|(_, part)| Partition {
flags: Some(part.flags),
size: part.last_lba - part.first_lba + 1,
name: Some(part.name.clone()),
uuid: Some(part.part_guid),
start_lba: part.first_lba,
})
.collect()
},
)?,
kind: PartitionTableKind::Gpt,
})
}
fn get_mbr_partitions<D: Read + Seek>(device: &mut D) -> io::Result<Option<PartitionTable>> {
let Some(header) = crate::mbr::read_header(device)? else {
return Ok(None);
};
Ok(Some(PartitionTable {
kind: PartitionTableKind::Mbr,
partitions: header
.partitions()
.map(|partition: crate::mbr::Entry| Partition {
name: None,
uuid: None, // TODO: Some kind of one-way conversion should be possible
flags: None, // TODO
size: partition.len.into(),
start_lba: partition.rel_sector.into(),
})
.collect(),
}))
}
pub fn get_partitions<D: Read + Seek>(
device: &mut D,
sector_size: LogicalBlockSize,
) -> io::Result<Option<PartitionTable>> {
get_gpt_partitions(device, sector_size)
.map(Some)
.or_else(|_| get_mbr_partitions(device))
}
impl Partition {
pub fn to_offset(&self, sector_size: LogicalBlockSize) -> u64 {
let blksize: u64 = sector_size.into();
self.start_lba * blksize
}
}
@@ -0,0 +1,45 @@
use std::fs::File;
use partitionlib::{
get_partitions, LogicalBlockSize, Partition, PartitionTable, PartitionTableKind,
};
fn get_partitions_from_file(path: &str) -> PartitionTable {
let mut file = File::open(path).unwrap();
get_partitions(&mut file, LogicalBlockSize::Lb512)
.unwrap()
.unwrap()
}
// NOTE: The following tests rely on outside resource files being correct.
#[test]
fn gpt() {
let table = get_partitions_from_file("./resources/disk.img");
assert_eq!(table.kind, PartitionTableKind::Gpt);
assert_eq!(
&table.partitions,
&[Partition {
flags: Some(0),
name: Some("bug".to_owned()),
uuid: Some(uuid::Uuid::parse_str("b665fba9-74d5-4069-a6b9-5ba3a164fdfe").unwrap()), // Microsoft basic data
size: 957,
start_lba: 34,
}]
);
}
#[test]
fn mbr() {
let table = get_partitions_from_file("./resources/disk_mbr.img");
assert_eq!(table.kind, PartitionTableKind::Mbr);
assert_eq!(
&table.partitions,
&[Partition {
flags: None,
name: None,
uuid: None,
size: 3,
start_lba: 1,
}]
);
}
@@ -0,0 +1 @@
/target
@@ -0,0 +1,23 @@
[package]
name = "usbscsid"
description = "USB SCSI driver"
version = "0.1.0"
authors = ["4lDO2 <4lDO2@protonmail.com>"]
edition = "2021"
license = "MIT"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
base64 = "0.11" # Only for debugging
libredox.workspace = true
plain.workspace = true
driver-block = { path = "../driver-block" }
daemon = { path = "../../../daemon" }
redox_event.workspace = true
redox_syscall = { workspace = true, features = ["std"] }
thiserror.workspace = true
xhcid = { path = "../../usb/xhcid" }
[lints]
workspace = true
@@ -0,0 +1,168 @@
use std::collections::BTreeMap;
use std::env;
use driver_block::{Disk, DiskScheme, ExecutorTrait};
use syscall::{Error, EIO};
use xhcid_interface::{ConfigureEndpointsReq, PortId, XhciClientHandle};
pub mod protocol;
pub mod scsi;
use crate::protocol::Protocol;
use crate::scsi::Scsi;
fn main() {
daemon::Daemon::new(daemon);
}
fn daemon(daemon: daemon::Daemon) -> ! {
let mut args = env::args().skip(1);
const USAGE: &'static str = "usbscsid <scheme> <port> <protocol>";
let scheme = args.next().expect(USAGE);
let port = args
.next()
.expect(USAGE)
.parse::<PortId>()
.expect("Expected port ID");
let protocol = args
.next()
.expect(USAGE)
.parse::<u8>()
.expect("protocol has to be a number 0-255");
println!(
"USB SCSI driver spawned with scheme `{}`, port {}, protocol {}",
scheme, port, protocol
);
let disk_scheme_name = format!("disk.usb-{scheme}+{port}-scsi");
// TODO: Use eventfds.
let handle =
XhciClientHandle::new(scheme.to_owned(), port).expect("Failed to open XhciClientHandle");
let desc = handle
.get_standard_descs()
.expect("Failed to get standard descriptors");
// TODO: Perhaps the drivers should just be given the config, interface, and alternate setting
// from xhcid.
let (conf_desc, configuration_value, (if_desc, interface_num, alternate_setting)) = desc
.config_descs
.iter()
.find_map(|config_desc| {
let interface_desc = config_desc.interface_descs.iter().find_map(|if_desc| {
if if_desc.class == 8 && if_desc.sub_class == 6 && if_desc.protocol == 0x50 {
Some((if_desc.clone(), if_desc.number, if_desc.alternate_setting))
} else {
None
}
})?;
Some((
config_desc.clone(),
config_desc.configuration_value,
interface_desc,
))
})
.expect("Failed to find suitable configuration");
handle
.configure_endpoints(&ConfigureEndpointsReq {
config_desc: configuration_value,
interface_desc: Some(interface_num),
alternate_setting: Some(alternate_setting),
hub_ports: None,
})
.expect("Failed to configure endpoints");
let mut protocol = protocol::setup(&handle, protocol, &desc, &conf_desc, &if_desc)
.expect("Failed to setup protocol");
// TODO: Let all of the USB drivers fork or be managed externally, and xhcid won't have to keep
// track of all the drivers.
let mut scsi = Scsi::new(&mut *protocol).expect("usbscsid: failed to setup SCSI");
println!("SCSI initialized");
let mut buffer = [0u8; 512];
scsi.read(&mut *protocol, 0, &mut buffer).unwrap();
println!("DISK CONTENT: {}", base64::encode(&buffer[..]));
let event_queue = event::EventQueue::new().unwrap();
event::user_data! {
enum Event {
Scheme,
}
};
let mut scheme = DiskScheme::new(
None,
disk_scheme_name,
BTreeMap::from([(
0,
UsbDisk {
scsi: &mut scsi,
protocol: &mut *protocol,
},
)]),
&driver_block::FuturesExecutor,
);
// FIXME should this wait notifying readiness until the disk scheme is created?
daemon.ready();
//libredox::call::setrens(0, 0).expect("nvmed: failed to enter null namespace");
event_queue
.subscribe(
scheme.event_handle().raw(),
Event::Scheme,
event::EventFlags::READ,
)
.unwrap();
for event in event_queue {
match event.unwrap().user_data {
Event::Scheme => driver_block::FuturesExecutor
.block_on(scheme.tick())
.unwrap(),
}
}
std::process::exit(0);
}
struct UsbDisk<'a> {
scsi: &'a mut Scsi,
protocol: &'a mut dyn Protocol,
}
impl Disk for UsbDisk<'_> {
fn block_size(&self) -> u32 {
self.scsi.block_size
}
fn size(&self) -> u64 {
self.scsi.get_disk_size()
}
async fn read(&mut self, block: u64, buffer: &mut [u8]) -> syscall::Result<usize> {
match self.scsi.read(self.protocol, block, buffer) {
Ok(bytes_read) => Ok(bytes_read as usize),
Err(err) => {
eprintln!("usbscsid: READ IO ERROR: {err}");
Err(Error::new(EIO))
}
}
}
async fn write(&mut self, block: u64, buffer: &[u8]) -> syscall::Result<usize> {
match self.scsi.write(self.protocol, block, buffer) {
Ok(bytes_written) => Ok(bytes_written as usize),
Err(err) => {
eprintln!("usbscsid: WRITE IO ERROR: {err}");
Err(Error::new(EIO))
}
}
}
}
@@ -0,0 +1,363 @@
use std::num::NonZeroU32;
use std::slice;
use xhcid_interface::{
ConfDesc, DeviceReqData, EndpBinaryDirection, EndpDirection, EndpointStatus, IfDesc, Invalid,
PortReqRecipient, PortReqTy, PortTransferStatus, PortTransferStatusKind, XhciClientHandle,
XhciClientHandleError, XhciEndpHandle,
};
use super::{Protocol, ProtocolError, SendCommandStatus, SendCommandStatusKind};
pub const CBW_SIGNATURE: u32 = 0x43425355;
/// 0 means host to dev, 1 means dev to host
pub const CBW_FLAGS_DIRECTION_BIT: u8 = 1 << CBW_FLAGS_DIRECTION_SHIFT;
pub const CBW_FLAGS_DIRECTION_SHIFT: u8 = 7;
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Default)]
pub struct CommandBlockWrapper {
pub signature: u32,
pub tag: u32,
pub data_transfer_len: u32,
pub flags: u8, // upper nibble reserved
pub lun: u8, // bits 7:5 reserved
pub cb_len: u8,
pub command_block: [u8; 16],
}
impl CommandBlockWrapper {
pub fn new(
tag: u32,
data_transfer_len: u32,
direction: EndpBinaryDirection,
lun: u8,
cb: &[u8],
) -> Result<Self, ProtocolError> {
let mut command_block = [0u8; 16];
if cb.len() > 16 {
return Err(ProtocolError::TooLargeCommandBlock(cb.len()));
}
command_block[..cb.len()].copy_from_slice(&cb);
Ok(Self {
signature: CBW_SIGNATURE,
tag,
data_transfer_len,
flags: match direction {
EndpBinaryDirection::Out => 0,
EndpBinaryDirection::In => 1,
} << CBW_FLAGS_DIRECTION_SHIFT,
lun,
cb_len: cb.len() as u8,
command_block,
})
}
}
unsafe impl plain::Plain for CommandBlockWrapper {}
pub const CSW_SIGNATURE: u32 = 0x53425355;
#[repr(u8)]
pub enum CswStatus {
Passed = 0,
Failed = 1,
PhaseError = 2,
// the rest are reserved
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug, Default)]
pub struct CommandStatusWrapper {
pub signature: u32,
pub tag: u32,
pub data_residue: u32,
pub status: u8,
}
unsafe impl plain::Plain for CommandStatusWrapper {}
impl CommandStatusWrapper {
pub fn is_valid(&self) -> bool {
self.signature == CSW_SIGNATURE
}
}
pub struct BulkOnlyTransport<'a> {
handle: &'a XhciClientHandle,
bulk_in: XhciEndpHandle,
bulk_out: XhciEndpHandle,
bulk_in_num: u8,
bulk_out_num: u8,
max_lun: u8,
current_tag: u32,
interface_num: u8,
}
pub const FEATURE_ENDPOINT_HALT: u16 = 0;
impl<'a> BulkOnlyTransport<'a> {
pub fn init(
handle: &'a XhciClientHandle,
config_desc: &ConfDesc,
if_desc: &IfDesc,
) -> Result<Self, ProtocolError> {
let endpoints = &if_desc.endpoints;
let bulk_in_num = (endpoints
.iter()
.position(|endpoint| endpoint.direction() == EndpDirection::In)
.unwrap()
+ 1) as u8;
let bulk_out_num = (endpoints
.iter()
.position(|endpoint| endpoint.direction() == EndpDirection::Out)
.unwrap()
+ 1) as u8;
let max_lun = get_max_lun(handle, 0)?;
println!("BOT_MAX_LUN {}", max_lun);
Ok(Self {
bulk_in: handle.open_endpoint(bulk_in_num)?,
bulk_out: handle.open_endpoint(bulk_out_num)?,
bulk_in_num,
bulk_out_num,
handle,
max_lun,
current_tag: 0,
interface_num: if_desc.number,
})
}
fn clear_stall_in(&mut self) -> Result<(), XhciClientHandleError> {
if self.bulk_in.status()? == EndpointStatus::Halted {
self.bulk_in.reset(false)?;
self.handle.clear_feature(
PortReqRecipient::Endpoint,
u16::from(self.bulk_in_num),
FEATURE_ENDPOINT_HALT,
)?;
}
Ok(())
}
fn clear_stall_out(&mut self) -> Result<(), XhciClientHandleError> {
if self.bulk_out.status()? == EndpointStatus::Halted {
self.bulk_out.reset(false)?;
self.handle.clear_feature(
PortReqRecipient::Endpoint,
u16::from(self.bulk_out_num),
FEATURE_ENDPOINT_HALT,
)?;
}
Ok(())
}
fn reset_recovery(&mut self) -> Result<(), ProtocolError> {
bulk_only_mass_storage_reset(self.handle, self.interface_num.into())?;
self.clear_stall_in()?;
self.clear_stall_out()?;
if self.bulk_in.status()? == EndpointStatus::Halted
|| self.bulk_out.status()? == EndpointStatus::Halted
{
return Err(ProtocolError::RecoveryFailed);
}
Ok(())
}
fn read_csw_raw(
&mut self,
csw_buffer: &mut [u8; 13],
already: bool,
) -> Result<(), ProtocolError> {
match self.bulk_in.transfer_read(&mut csw_buffer[..])? {
PortTransferStatus {
kind: PortTransferStatusKind::Stalled,
..
} => {
if already {
self.reset_recovery()?;
}
println!("bulk in endpoint stalled when reading CSW");
self.clear_stall_in()?;
self.read_csw_raw(csw_buffer, true)?;
}
PortTransferStatus {
kind: PortTransferStatusKind::ShortPacket,
bytes_transferred,
} if bytes_transferred != 13 => {
panic!(
"received a short packet when reading CSW ({} != 13)",
bytes_transferred
)
}
_ => (),
}
Ok(())
}
fn read_csw(&mut self, csw_buffer: &mut [u8; 13]) -> Result<(), ProtocolError> {
self.read_csw_raw(csw_buffer, false)
}
}
impl<'a> Protocol for BulkOnlyTransport<'a> {
fn send_command(
&mut self,
cb: &[u8],
data: DeviceReqData,
) -> Result<SendCommandStatus, ProtocolError> {
self.current_tag += 1;
let tag = self.current_tag;
let mut cbw_bytes = [0u8; 31];
let cbw = plain::from_mut_bytes::<CommandBlockWrapper>(&mut cbw_bytes).unwrap();
*cbw = CommandBlockWrapper::new(tag, data.len() as u32, data.direction().into(), 0, cb)?;
let cbw = *cbw;
match self.bulk_out.transfer_write(&cbw_bytes)? {
PortTransferStatus {
kind: PortTransferStatusKind::Stalled,
..
} => {
// TODO: Error handling
panic!("bulk out endpoint stalled when sending CBW {:?}", cbw);
//self.clear_stall_out()?;
//dbg!(self.bulk_in.status()?, self.bulk_out.status()?);
}
PortTransferStatus {
bytes_transferred, ..
} if bytes_transferred != 31 => {
panic!(
"received short packet when sending CBW ({} != 31)",
bytes_transferred
);
}
_ => (),
}
let early_residue: Option<NonZeroU32> = match data {
DeviceReqData::In(buffer) => match self.bulk_in.transfer_read(buffer)? {
PortTransferStatus {
kind,
bytes_transferred,
} => match kind {
PortTransferStatusKind::Success => None,
PortTransferStatusKind::ShortPacket => {
println!(
"received short packet (len {}) when transferring data",
bytes_transferred
);
NonZeroU32::new(bytes_transferred)
}
PortTransferStatusKind::Stalled => {
panic!("bulk in endpoint stalled when reading data");
//self.clear_stall_in()?;
}
PortTransferStatusKind::Unknown => {
return Err(ProtocolError::XhciError(
XhciClientHandleError::InvalidResponse(Invalid(
"unknown transfer status",
)),
));
}
},
},
DeviceReqData::Out(buffer) => match self.bulk_out.transfer_write(buffer)? {
PortTransferStatus {
kind,
bytes_transferred,
} => match kind {
PortTransferStatusKind::Success => None,
PortTransferStatusKind::ShortPacket => {
println!(
"received short packet (len {}) when transferring data",
bytes_transferred
);
NonZeroU32::new(bytes_transferred)
}
PortTransferStatusKind::Stalled => {
panic!("bulk out endpoint stalled when reading data");
//self.clear_stall_out()?;
}
PortTransferStatusKind::Unknown => {
return Err(ProtocolError::XhciError(
XhciClientHandleError::InvalidResponse(Invalid(
"unknown transfer status",
)),
));
}
},
},
DeviceReqData::NoData => None,
};
let mut csw_buffer = [0u8; 13];
self.read_csw(&mut csw_buffer)?;
let csw = plain::from_bytes::<CommandStatusWrapper>(&csw_buffer).unwrap();
let residue = early_residue.or(NonZeroU32::new(csw.data_residue));
if csw.status == CswStatus::Failed as u8 {
println!("CSW indicated failure (CSW {:?}, CBW {:?})", csw, cbw);
}
if !csw.is_valid() || csw.tag != cbw.tag {
println!("Invald CSW {:?} (for CBW {:?})", csw, cbw);
self.reset_recovery()?;
if self.bulk_in.status()? == EndpointStatus::Halted
|| self.bulk_out.status()? == EndpointStatus::Halted
{
return Err(ProtocolError::ProtocolError(
"Reset Recovery didn't reset endpoints",
));
}
return Err(ProtocolError::ProtocolError(
"CSW invalid, but a recover was successful",
));
}
/*if self.bulk_in.status()? == EndpointStatus::Halted
|| self.bulk_out.status()? == EndpointStatus::Halted
{
println!("Trying to recover from stall");
dbg!(self.bulk_in.status()?, self.bulk_out.status()?);
}*/
Ok(SendCommandStatus {
kind: if csw.status == CswStatus::Passed as u8 {
SendCommandStatusKind::Success
} else if csw.status == CswStatus::Failed as u8 {
SendCommandStatusKind::Failed
} else {
return Err(ProtocolError::ProtocolError(
"bulk-only transport phase error, or other",
));
},
residue,
})
}
}
pub fn bulk_only_mass_storage_reset(
handle: &XhciClientHandle,
if_num: u16,
) -> Result<(), XhciClientHandleError> {
handle.device_request(
PortReqTy::Class,
PortReqRecipient::Interface,
0xFF,
0,
if_num,
DeviceReqData::NoData,
)
}
pub fn get_max_lun(handle: &XhciClientHandle, if_num: u16) -> Result<u8, XhciClientHandleError> {
let mut lun = 0u8;
let buffer = slice::from_mut(&mut lun);
handle.device_request(
PortReqTy::Class,
PortReqRecipient::Interface,
0xFE,
0,
if_num,
DeviceReqData::In(buffer),
)?;
Ok(lun)
}
@@ -0,0 +1,81 @@
use std::io;
use std::num::NonZeroU32;
use thiserror::Error;
use xhcid_interface::{
ConfDesc, DevDesc, DeviceReqData, IfDesc, XhciClientHandle, XhciClientHandleError,
};
#[derive(Debug, Error)]
pub enum ProtocolError {
#[error("Too large command block ({0} > 16)")]
TooLargeCommandBlock(usize),
#[error("xhcid connection error: {0}")]
XhciError(#[from] XhciClientHandleError),
#[error("i/o error")]
IoError(#[from] io::Error),
#[error("attempted recovery failed")]
RecoveryFailed,
#[error("protocol error")]
ProtocolError(&'static str),
}
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
pub struct SendCommandStatus {
pub residue: Option<NonZeroU32>,
pub kind: SendCommandStatusKind,
}
impl SendCommandStatus {
pub fn bytes_transferred(&self, transfer_len: u32) -> u32 {
transfer_len - self.residue.map(u32::from).unwrap_or(0)
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum SendCommandStatusKind {
Success,
Failed,
}
impl Default for SendCommandStatusKind {
fn default() -> Self {
Self::Success
}
}
pub trait Protocol {
fn send_command(
&mut self,
command: &[u8],
data: DeviceReqData,
) -> Result<SendCommandStatus, ProtocolError>;
}
/// Bulk-only transport
pub mod bot;
mod uas {
// TODO
}
use bot::BulkOnlyTransport;
pub fn setup<'a>(
handle: &'a XhciClientHandle,
protocol: u8,
dev_desc: &DevDesc,
conf_desc: &ConfDesc,
if_desc: &IfDesc,
) -> Option<Box<dyn Protocol + 'a>> {
match protocol {
0x50 => Some(Box::new(
BulkOnlyTransport::init(handle, conf_desc, if_desc).unwrap(),
)),
_ => None,
}
}
@@ -0,0 +1,559 @@
use super::opcodes::Opcode;
use std::convert::TryInto;
use std::{fmt, mem, slice};
#[repr(C, packed)]
pub struct Inquiry {
pub opcode: u8,
/// bits 7:2 are reserved, bit 1 (CMDDT) is obsolete, bit 0 is EVPD
pub evpd: u8,
pub page_code: u8,
/// big endian
pub alloc_len: u16,
pub control: u8,
}
unsafe impl plain::Plain for Inquiry {}
impl Inquiry {
pub const fn new(evpd: bool, page_code: u8, alloc_len: u16, control: u8) -> Self {
Self {
opcode: Opcode::Inquiry as u8,
evpd: evpd as u8,
page_code,
alloc_len: u16::to_be(alloc_len),
control,
}
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct StandardInquiryData {
/// Peripheral device type (bits 4:0), and peripheral device qualifier (bits 7:5).
pub a: u8,
/// Removable media bit (bit 7, bits 6:0 are reserved).
pub rmb: u8,
/// Version of the SCSI command set.
pub version: u8,
pub b: u8,
pub additional_len: u8,
pub c: u8,
pub d: u8,
pub e: u8,
pub t10_vendor_info: u64,
pub product_ident: [u8; 16],
pub product_rev_label: u32,
pub driver_serial_no: [u8; 8],
pub vendor_uniq: [u8; 12],
_rsvd1: [u8; 2],
pub version_descs: [u16; 8],
_rsvd2: [u8; 22],
}
unsafe impl plain::Plain for StandardInquiryData {}
impl StandardInquiryData {
pub const fn periph_dev_ty(&self) -> u8 {
self.a & 0x1F
}
pub const fn periph_dev_qual(&self) -> u8 {
(self.a & 0xE0) >> 5
}
pub const fn version(&self) -> u8 {
self.version
}
}
#[repr(u8)]
pub enum PeriphDeviceType {
DirectAccess,
SeqAccess,
// there are more
}
#[repr(u8)]
pub enum InquiryVersion {
NoConformance,
Spc,
Spc2,
Spc3,
Spc4,
Spc5,
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct RequestSense {
pub opcode: u8,
pub desc: u8, // bits 7:1 reserved
_rsvd: u16,
pub alloc_len: u8,
pub control: u8,
}
unsafe impl plain::Plain for RequestSense {}
impl RequestSense {
pub const MINIMAL_ALLOC_LEN: u8 = 252;
pub const fn new(desc: bool, alloc_len: u8, control: u8) -> Self {
Self {
opcode: Opcode::RequestSense as u8,
desc: desc as u8,
_rsvd: 0,
alloc_len,
control,
}
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct FixedFormatSenseData {
pub a: u8,
_obsolete: u8,
pub b: u8,
pub info: u32,
pub add_sense_len: u8,
pub command_specific_info: u32,
pub add_sense_code: u8,
pub add_sense_code_qual: u8,
pub field_replacable_unit_code: u8,
pub sense_key_specific: [u8; 3], // big endian
pub add_sense_bytes: [u8; 0],
}
unsafe impl plain::Plain for FixedFormatSenseData {}
impl FixedFormatSenseData {
pub const fn additional_len(&self) -> u16 {
self.add_sense_len as u16 + 7
}
pub unsafe fn add_sense_bytes(&self) -> &[u8] {
slice::from_raw_parts(
&self.add_sense_len as *const u8,
self.add_sense_len as usize - 18,
)
}
pub fn sense_key(&self) -> SenseKey {
let sense_key_raw = self.b & 0b1111;
// Safe because all possible values (0-15) are used by the enum.
unsafe { mem::transmute(sense_key_raw) }
}
}
#[repr(u8)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum SenseKey {
NoSense = 0x00,
RecoveredError = 0x01,
NotReady = 0x02,
MediumError = 0x03,
HardwareError = 0x04,
IllegalRequest = 0x05,
UnitAttention = 0x06,
DataProtect = 0x07,
BlankCheck = 0x08,
VendorSpecific = 0x09,
CopyAborted = 0x0A,
AbortedCommand = 0x0B,
Reserved = 0x0C,
VolumeOverflow = 0x0D,
Miscompare = 0x0E,
Completed = 0x0F,
}
impl Default for SenseKey {
fn default() -> Self {
Self::NoSense
}
}
pub const ADD_SENSE_CODE05_INVAL_CDB_FIELD: u8 = 0x24;
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct Read16 {
pub opcode: u8,
pub a: u8,
pub lba: u64,
pub transfer_len: u32,
pub b: u8,
pub control: u8,
}
unsafe impl plain::Plain for Read16 {}
impl Read16 {
pub const fn new(lba: u64, transfer_len: u32, control: u8) -> Self {
// TODO: RDPROTECT, DPO, FUA, RARC
// TODO: DLD
// TODO: Group number
Self {
opcode: Opcode::Read16 as u8,
a: 0,
lba: u64::to_be(lba),
transfer_len: u32::to_be(transfer_len),
b: 0,
control,
}
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct Write16 {
pub opcode: u8,
pub a: u8,
pub lba: u64, // big endian
pub transfer_len: u32,
pub b: u8,
pub control: u8,
}
unsafe impl plain::Plain for Write16 {}
impl Write16 {
pub const fn new(lba: u64, transfer_len: u32, control: u8) -> Self {
Self {
// TODO
opcode: Opcode::Write16 as u8,
a: 0,
lba,
transfer_len,
b: 0,
control,
}
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct ModeSense6 {
pub opcode: u8,
pub a: u8,
pub b: u8,
pub subpage_code: u8,
pub alloc_len: u8,
pub control: u8,
}
unsafe impl plain::Plain for ModeSense6 {}
impl ModeSense6 {
pub const fn new(
dbd: bool,
page_code: u8,
pc: u8,
subpage_code: u8,
alloc_len: u8,
control: u8,
) -> Self {
Self {
opcode: Opcode::ModeSense6 as u8,
a: (dbd as u8) << 3,
b: page_code | (pc << 6),
subpage_code,
alloc_len,
control,
}
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct ModeSense10 {
pub opcode: u8,
pub a: u8,
pub b: u8,
pub subpage_code: u8,
pub _rsvd: [u8; 3],
pub alloc_len: u16,
pub control: u8,
}
unsafe impl plain::Plain for ModeSense10 {}
impl ModeSense10 {
pub const fn new(
dbd: bool,
llbaa: bool,
page_code: u8,
pc: ModePageControl,
subpage_code: u8,
alloc_len: u16,
control: u8,
) -> Self {
Self {
opcode: Opcode::ModeSense10 as u8,
a: ((dbd as u8) << 3) | ((llbaa as u8) << 4),
b: page_code | ((pc as u8) << 6),
subpage_code,
_rsvd: [0u8; 3],
alloc_len: u16::from_be(alloc_len),
control,
}
}
pub const fn get_block_desc(alloc_len: u16, control: u8) -> Self {
Self::new(
false,
true,
0x3F,
ModePageControl::CurrentValues,
0x00,
alloc_len,
control,
)
}
}
#[repr(u8)]
pub enum ModePageControl {
CurrentValues,
ChangeableChanges,
DefaultValues,
SavedValue,
}
#[repr(C, packed)]
#[derive(Clone, Copy)]
pub struct ShortLbaModeParamBlkDesc {
pub block_count: u32,
_rsvd: u8,
pub logical_block_len: [u8; 3],
}
unsafe impl plain::Plain for ShortLbaModeParamBlkDesc {}
impl ShortLbaModeParamBlkDesc {
pub const fn block_count(&self) -> u32 {
u32::from_be(self.block_count)
}
pub const fn logical_block_len(&self) -> u32 {
u24_be_to_u32(self.logical_block_len)
}
}
impl fmt::Debug for ShortLbaModeParamBlkDesc {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ShortLbaModeParamBlkDesc")
.field("block_count", &self.block_count())
.field("logical_block_len", &self.logical_block_len())
.finish()
}
}
const fn u24_be_to_u32(u24: [u8; 3]) -> u32 {
((u24[0] as u32) << 16) | ((u24[1] as u32) << 8) | (u24[2] as u32)
}
/// From SPC-3, when LONGLBA is not set, and the peripheral device type of the INQUIRY data indicates that the device is not a direct access device. Otherwise, `ShortLbaModeParamBlkDesc` is used instead.
#[repr(C, packed)]
#[derive(Clone, Copy)]
pub struct GeneralModeParamBlkDesc {
pub density_code: u8,
pub block_count: [u8; 3],
_rsvd: u8,
pub block_length: [u8; 3],
}
unsafe impl plain::Plain for GeneralModeParamBlkDesc {}
impl GeneralModeParamBlkDesc {
pub fn block_count(&self) -> u32 {
u24_be_to_u32(self.block_count)
}
pub fn logical_block_len(&self) -> u32 {
u24_be_to_u32(self.block_length)
}
}
impl fmt::Debug for GeneralModeParamBlkDesc {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("GeneralModeParamBlkDesc")
.field("density_code", &self.density_code)
.field("block_count", &u24_be_to_u32(self.block_count))
.field("block_length", &u24_be_to_u32(self.block_length))
.finish()
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct LongLbaModeParamBlkDesc {
pub block_count: u64,
_rsvd: u32,
pub logical_block_len: u32,
}
unsafe impl plain::Plain for LongLbaModeParamBlkDesc {}
impl LongLbaModeParamBlkDesc {
pub const fn block_count(&self) -> u64 {
u64::from_be(self.block_count)
}
pub const fn logical_block_len(&self) -> u32 {
u32::from_be(self.logical_block_len)
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct ModeParamHeader6 {
pub mode_data_len: u8,
pub medium_ty: u8,
pub a: u8,
pub block_desc_len: u8,
}
unsafe impl plain::Plain for ModeParamHeader6 {}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct ModeParamHeader10 {
pub mode_data_len: u16,
pub medium_ty: u8,
pub a: u8,
pub b: u8,
_rsvd: u8,
pub block_desc_len: u16,
}
unsafe impl plain::Plain for ModeParamHeader10 {}
impl ModeParamHeader10 {
pub const fn mode_data_len(&self) -> u16 {
u16::from_be(self.mode_data_len)
}
pub const fn block_desc_len(&self) -> u16 {
u16::from_be(self.block_desc_len)
}
pub const fn longlba(&self) -> bool {
(self.b & 0x01) != 0
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct ReadCapacity10 {
pub opcode: u8,
_rsvd1: u8,
obsolete_lba: u32,
_rsvd2: [u8; 3],
pub control: u8,
}
unsafe impl plain::Plain for ReadCapacity10 {}
impl ReadCapacity10 {
pub const fn new(control: u8) -> Self {
Self {
opcode: Opcode::ReadCapacity10 as u8,
_rsvd1: 0,
obsolete_lba: 0,
_rsvd2: [0; 3],
control,
}
}
}
// TODO: ReadCapacity16
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct ReadCapacity10ParamData {
pub max_lba: u32,
pub block_len: u32,
}
unsafe impl plain::Plain for ReadCapacity10ParamData {}
impl ReadCapacity10ParamData {
pub const fn block_count(&self) -> u32 {
u32::from_be(self.max_lba)
}
pub const fn logical_block_len(&self) -> u32 {
u32::from_be(self.block_len)
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct RwErrorRecoveryPage {
pub a: u8,
pub page_length: u8,
pub b: u8,
pub read_retry_count: u8,
_obsolete: [u8; 3],
_rsvd: u8,
pub recovery_time_limit: u16,
}
unsafe impl plain::Plain for RwErrorRecoveryPage {}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct CachingModePage {
pub a: u8,
pub page_length: u8,
// TODO: more
}
unsafe impl plain::Plain for CachingModePage {}
pub(crate) struct ModePageIterRaw<'a> {
buffer: &'a [u8],
}
impl<'a> Iterator for ModePageIterRaw<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<Self::Item> {
if self.buffer.len() < 2 {
return None;
}
let a = self.buffer[0];
let page_len = if a & (1 << 6) == 0 {
// item is page_0 mode
self.buffer[1] as usize + 1
} else {
// item is sub_page mode
u16::from_be_bytes((&self.buffer[2..3]).try_into().ok()?) as usize + 3
};
if self.buffer.len() < page_len {
return None;
}
let buffer = &self.buffer[..page_len];
self.buffer = if page_len == self.buffer.len() {
&[]
} else {
&self.buffer[page_len..]
};
Some(buffer)
}
}
#[derive(Clone, Copy, Debug)]
pub enum AnyModePage<'a> {
RwErrorRecovery(&'a RwErrorRecoveryPage),
Caching(&'a CachingModePage),
}
struct ModePageIter<'a> {
raw: ModePageIterRaw<'a>,
}
impl<'a> Iterator for ModePageIter<'a> {
type Item = AnyModePage<'a>;
fn next(&mut self) -> Option<Self::Item> {
let next_buf = self.raw.next()?;
let a = next_buf[0];
let page_code = a & 0x1F;
let spf = a & (1 << 6) != 0;
if !spf {
if page_code == 0x01 {
Some(AnyModePage::RwErrorRecovery(
plain::from_bytes(next_buf).ok()?,
))
} else if page_code == 0x08 {
Some(AnyModePage::Caching(plain::from_bytes(next_buf).ok()?))
} else {
println!("Unimplemented sub_page {}", base64::encode(next_buf));
None
}
} else {
println!("Unimplemented page_0 {}", base64::encode(next_buf));
None
}
}
}
pub fn mode_page_iter(buffer: &[u8]) -> impl Iterator<Item = AnyModePage<'_>> {
ModePageIter {
raw: ModePageIterRaw { buffer },
}
}
@@ -0,0 +1,339 @@
use std::convert::TryFrom;
use std::mem;
pub mod cmds;
pub mod opcodes;
use thiserror::Error;
use xhcid_interface::DeviceReqData;
use crate::protocol::{Protocol, ProtocolError, SendCommandStatus, SendCommandStatusKind};
use cmds::StandardInquiryData;
pub struct Scsi {
command_buffer: [u8; 16],
inquiry_buffer: [u8; 259],
data_buffer: Vec<u8>,
pub block_size: u32,
pub block_count: u64,
}
const INQUIRY_CMD_LEN: u8 = 6;
const REPORT_SUPP_OPCODES_CMD_LEN: u8 = 12;
const REQUEST_SENSE_CMD_LEN: u8 = 6;
const MIN_INQUIRY_ALLOC_LEN: u16 = 5;
const MIN_REPORT_SUPP_OPCODES_ALLOC_LEN: u32 = 4;
type Result<T, E = ScsiError> = std::result::Result<T, E>;
#[derive(Debug, Error)]
pub enum ScsiError {
// TODO: Add some kind of context here, since it's very useful indeed to be able to see which
// command returned the protocol error.
#[error("protocol error when sending command: {0}")]
ProtocolError(#[from] ProtocolError),
#[error("overflow")]
Overflow(&'static str),
}
impl Scsi {
pub fn new(protocol: &mut dyn Protocol) -> Result<Self> {
assert_eq!(std::mem::size_of::<StandardInquiryData>(), 96);
let mut this = Self {
command_buffer: [0u8; 16],
// separate buffer since the inquiry data is most likely going to be used in the
// future.
inquiry_buffer: [0u8; 259], // additional_len = 255 max
data_buffer: Vec::new(),
block_size: 0,
block_count: 0,
};
// Get the max length that the device supports, of the Standard Inquiry Data.
let max_inquiry_len = this.get_inquiry_alloc_len(protocol)?;
// Get the Standard Inquiry Data.
this.get_standard_inquiry_data(protocol, max_inquiry_len)?;
let version = this.res_standard_inquiry_data().version();
println!("Inquiry version: {}", version);
let (block_size, block_count) = {
let (_, blkdescs, mode_page_iter) = this.get_mode_sense10(protocol)?;
for page in mode_page_iter {
println!("PAGE: {:?}", page);
}
// TODO: Can there be multiple disks at all?
if let Some(only_blkdesc) = blkdescs.get(0) {
println!("Found block desc: {:?}", only_blkdesc);
(only_blkdesc.block_size(), only_blkdesc.block_count())
} else {
println!("read_capacity10");
let r = this.read_capacity(protocol)?;
println!("read_capacity10 result: {:?}", r);
(r.logical_block_len(), r.block_count().into())
}
};
this.block_size = block_size;
this.block_count = block_count;
Ok(this)
}
pub fn get_inquiry_alloc_len(&mut self, protocol: &mut dyn Protocol) -> Result<u16> {
self.get_standard_inquiry_data(protocol, MIN_INQUIRY_ALLOC_LEN)?;
let standard_inquiry_data = self.res_standard_inquiry_data();
Ok(4 + u16::from(standard_inquiry_data.additional_len))
}
pub fn get_standard_inquiry_data(
&mut self,
protocol: &mut dyn Protocol,
max_inquiry_len: u16,
) -> Result<()> {
let inquiry = self.cmd_inquiry();
*inquiry = cmds::Inquiry::new(false, 0, max_inquiry_len, 0);
protocol.send_command(
&self.command_buffer[..INQUIRY_CMD_LEN as usize],
DeviceReqData::In(&mut self.inquiry_buffer[..max_inquiry_len as usize]),
)?;
Ok(())
}
pub fn get_ff_sense(&mut self, protocol: &mut dyn Protocol, alloc_len: u8) -> Result<()> {
let request_sense = self.cmd_request_sense();
*request_sense = cmds::RequestSense::new(false, alloc_len, 0);
self.data_buffer.resize(alloc_len.into(), 0);
protocol.send_command(
&self.command_buffer[..REQUEST_SENSE_CMD_LEN as usize],
DeviceReqData::In(&mut self.data_buffer[..alloc_len as usize]),
)?;
Ok(())
}
pub fn read_capacity(
&mut self,
protocol: &mut dyn Protocol,
) -> Result<&cmds::ReadCapacity10ParamData> {
// The spec explicitly states that the allocation length is 8 bytes.
let read_capacity10 = self.cmd_read_capacity10();
*read_capacity10 = cmds::ReadCapacity10::new(0);
self.data_buffer.resize(10usize, 0u8);
protocol.send_command(
&self.command_buffer[..10],
DeviceReqData::In(&mut self.data_buffer[..8]),
)?;
Ok(self.res_read_capacity10())
}
pub fn get_mode_sense10(
&mut self,
protocol: &mut dyn Protocol,
) -> Result<(
&cmds::ModeParamHeader10,
BlkDescSlice<'_>,
impl Iterator<Item = cmds::AnyModePage<'_>>,
)> {
let initial_alloc_len = mem::size_of::<cmds::ModeParamHeader10>() as u16; // covers both mode_data_len and blk_desc_len.
let mode_sense10 = self.cmd_mode_sense10();
*mode_sense10 = cmds::ModeSense10::get_block_desc(initial_alloc_len, 0);
self.data_buffer
.resize(mem::size_of::<cmds::ModeParamHeader10>(), 0);
if let SendCommandStatus {
kind: SendCommandStatusKind::Failed,
..
} = protocol.send_command(
&self.command_buffer[..10],
DeviceReqData::In(&mut self.data_buffer[..initial_alloc_len as usize]),
)? {
self.get_ff_sense(protocol, 252)?;
panic!("{:?}", self.res_ff_sense_data());
}
let optimal_alloc_len = self.res_mode_param_header10().mode_data_len() + 2; // the length of the mode data field itself
let mode_sense10 = self.cmd_mode_sense10();
*mode_sense10 = cmds::ModeSense10::get_block_desc(optimal_alloc_len, 0);
self.data_buffer.resize(optimal_alloc_len as usize, 0);
protocol.send_command(
&self.command_buffer[..10],
DeviceReqData::In(&mut self.data_buffer[..optimal_alloc_len as usize]),
)?;
Ok((
self.res_mode_param_header10(),
self.res_blkdesc_mode10(),
self.res_mode_pages10(),
))
}
pub fn cmd_inquiry(&mut self) -> &mut cmds::Inquiry {
plain::from_mut_bytes(&mut self.command_buffer).unwrap()
}
pub fn cmd_mode_sense6(&mut self) -> &mut cmds::ModeSense6 {
plain::from_mut_bytes(&mut self.command_buffer).unwrap()
}
pub fn cmd_mode_sense10(&mut self) -> &mut cmds::ModeSense10 {
plain::from_mut_bytes(&mut self.command_buffer).unwrap()
}
pub fn cmd_request_sense(&mut self) -> &mut cmds::RequestSense {
plain::from_mut_bytes(&mut self.command_buffer).unwrap()
}
pub fn cmd_read_capacity10(&mut self) -> &mut cmds::ReadCapacity10 {
plain::from_mut_bytes(&mut self.command_buffer).unwrap()
}
pub fn cmd_read16(&mut self) -> &mut cmds::Read16 {
plain::from_mut_bytes(&mut self.command_buffer).unwrap()
}
pub fn cmd_write16(&mut self) -> &mut cmds::Write16 {
plain::from_mut_bytes(&mut self.command_buffer).unwrap()
}
pub fn res_standard_inquiry_data(&self) -> &StandardInquiryData {
plain::from_bytes(&self.inquiry_buffer).unwrap()
}
pub fn res_ff_sense_data(&self) -> &cmds::FixedFormatSenseData {
plain::from_bytes(&self.data_buffer).unwrap()
}
pub fn res_mode_param_header6(&self) -> &cmds::ModeParamHeader6 {
plain::from_bytes(&self.data_buffer).unwrap()
}
pub fn res_mode_param_header10(&self) -> &cmds::ModeParamHeader10 {
plain::from_bytes(&self.data_buffer).unwrap()
}
pub fn res_blkdesc_mode6(&self) -> &[cmds::ShortLbaModeParamBlkDesc] {
let header = self.res_mode_param_header6();
let descs_start = mem::size_of::<cmds::ModeParamHeader6>();
plain::slice_from_bytes(
&self.data_buffer[descs_start..descs_start + usize::from(header.block_desc_len)],
)
.unwrap()
}
pub fn res_blkdesc_mode10(&self) -> BlkDescSlice<'_> {
let header = self.res_mode_param_header10();
let descs_start = mem::size_of::<cmds::ModeParamHeader10>();
if header.longlba() {
BlkDescSlice::Long(
plain::slice_from_bytes(
&self.data_buffer
[descs_start..descs_start + usize::from(header.block_desc_len())],
)
.unwrap(),
)
} else if self.res_standard_inquiry_data().periph_dev_ty()
!= cmds::PeriphDeviceType::DirectAccess as u8
&& self.res_standard_inquiry_data().version() == cmds::InquiryVersion::Spc3 as u8
{
BlkDescSlice::General(
plain::slice_from_bytes(
&self.data_buffer
[descs_start..descs_start + usize::from(header.block_desc_len())],
)
.unwrap(),
)
} else {
BlkDescSlice::Short(
plain::slice_from_bytes(
&self.data_buffer
[descs_start..descs_start + usize::from(header.block_desc_len())],
)
.unwrap(),
)
}
}
pub fn res_mode_pages10(&self) -> impl Iterator<Item = cmds::AnyModePage<'_>> {
let header = self.res_mode_param_header10();
let descs_start = mem::size_of::<cmds::ModeParamHeader10>();
let buffer = &self.data_buffer[descs_start + header.block_desc_len() as usize..];
cmds::mode_page_iter(buffer)
}
pub fn res_read_capacity10(&self) -> &cmds::ReadCapacity10ParamData {
plain::from_bytes(&self.data_buffer).unwrap()
}
pub fn get_disk_size(&self) -> u64 {
self.block_count * u64::from(self.block_size)
}
pub fn read(
&mut self,
protocol: &mut dyn Protocol,
lba: u64,
buffer: &mut [u8],
) -> Result<u32> {
let blocks_to_read = buffer.len() as u64 / u64::from(self.block_size);
let bytes_to_read = blocks_to_read as usize * self.block_size as usize;
let transfer_len = u32::try_from(blocks_to_read).or(Err(ScsiError::Overflow(
"number of blocks to read couldn't fit inside a u32",
)))?;
{
let read = self.cmd_read16();
*read = cmds::Read16::new(lba, transfer_len, 0);
}
// TODO: Use the to-be-written TransferReadStream instead of relying on everything being
// able to fit within a single buffer.
self.data_buffer.resize(bytes_to_read, 0u8);
let status = protocol.send_command(
&self.command_buffer[..16],
DeviceReqData::In(&mut self.data_buffer[..bytes_to_read]),
)?;
buffer[..bytes_to_read].copy_from_slice(&self.data_buffer[..bytes_to_read]);
Ok(status.bytes_transferred(bytes_to_read as u32))
}
pub fn write(&mut self, protocol: &mut dyn Protocol, lba: u64, buffer: &[u8]) -> Result<u32> {
let blocks_to_write = buffer.len() as u64 / u64::from(self.block_size);
let bytes_to_write = blocks_to_write as usize * self.block_size as usize;
let transfer_len = u32::try_from(blocks_to_write).or(Err(ScsiError::Overflow(
"number of blocks to write couldn't fit inside a u32",
)))?;
{
let read = self.cmd_write16();
*read = cmds::Write16::new(lba, transfer_len, 0);
}
// TODO: Use the to-be-written TransferReadStream instead of relying on everything being
// able to fit within a single buffer.
self.data_buffer.resize(bytes_to_write, 0u8);
self.data_buffer[..bytes_to_write].copy_from_slice(&buffer[..bytes_to_write]);
let status = protocol.send_command(
&self.command_buffer[..16],
DeviceReqData::Out(&buffer[..bytes_to_write]),
)?;
Ok(status.bytes_transferred(bytes_to_write as u32))
}
}
#[derive(Debug)]
pub enum BlkDescSlice<'a> {
Short(&'a [cmds::ShortLbaModeParamBlkDesc]),
General(&'a [cmds::GeneralModeParamBlkDesc]),
Long(&'a [cmds::LongLbaModeParamBlkDesc]),
}
#[derive(Debug)]
pub enum BlkDesc<'a> {
Short(&'a cmds::ShortLbaModeParamBlkDesc),
General(&'a cmds::GeneralModeParamBlkDesc),
Long(&'a cmds::LongLbaModeParamBlkDesc),
}
impl<'a> BlkDesc<'a> {
fn block_size(&self) -> u32 {
match self {
Self::Short(s) => s.logical_block_len(),
Self::General(g) => g.logical_block_len(),
Self::Long(l) => l.logical_block_len(),
}
}
fn block_count(&self) -> u64 {
match self {
Self::Short(s) => s.block_count().into(),
Self::General(g) => g.block_count().into(),
Self::Long(l) => l.block_count(),
}
}
}
impl<'a> BlkDescSlice<'a> {
fn get(&self, idx: usize) -> Option<BlkDesc<'a>> {
match self {
Self::Short(s) => s.get(idx).map(BlkDesc::Short),
Self::Long(l) => l.get(idx).map(BlkDesc::Long),
Self::General(g) => g.get(idx).map(BlkDesc::General),
}
}
}
@@ -0,0 +1,112 @@
#[repr(u8)]
pub enum Opcode {
TestUnitReady = 0x00,
/// obsolete
RezeroUnit = 0x01,
RequestSense = 0x03,
FormatUnit = 0x04,
ReassignBlocks = 0x07,
/// obsolete
Read6 = 0x08,
/// obsolete
Write6 = 0x0A,
/// obsolete
Seek = 0x0B,
Inquiry = 0x12,
ModeSelect6 = 0x15,
/// obsolete
Reserve6 = 0x16,
/// obsolete
Release6 = 0x17,
ModeSense6 = 0x1A,
StartStopUnit = 0x1B,
RecvDiagnosticRes = 0x1C,
SendDiagnostic = 0x1D,
ReadCapacity10 = 0x25,
Read10 = 0x28,
Write10 = 0x2A,
/// obsolete
SeekExt = 0x2B,
WriteAndVerify10 = 0x2E,
Verify10 = 0x2F,
SyncCache10 = 0x35,
ReadDefectData10 = 0x37,
WriteBuf10 = 0x3B,
ReadBuf10 = 0x3C,
/// obsolete
ReadLong10 = 0x3E,
WriteLong10 = 0x3F,
/// obsolete
ChangeDef = 0x40,
WriteSame10 = 0x41,
Unmap = 0x42,
Sanitize = 0x48,
LogSelect = 0x4C,
LogSense = 0x4D,
ModeSelect10 = 0x55,
/// obsolete
Reserve10 = 0x56,
/// obsolete
Release10 = 0x57,
ModeSense10 = 0x5A,
PersistentResvIn = 0x5E,
PersistentResvOut = 0x5F,
ServiceAction7F = 0x7F,
Read16 = 0x88,
Write16 = 0x8A,
WriteAndVerify16 = 0x8E,
Verify16 = 0x8F,
SyncCache16 = 0x91,
WriteSame16 = 0x93,
WriteStream16 = 0x9A,
ReadBuf16 = 0x9B,
WriteAtomic16 = 0x9C,
ServiceAction9E = 0x9E,
ServiceAction9F,
ReportLuns = 0xA0,
SecurityProtoIn = 0xA2,
ServiceActionA3 = 0xA3,
ServiceActionA4 = 0xA4,
Read12 = 0xA8,
Write12 = 0xAA,
WriteAndVerify12 = 0xAE,
Verify12 = 0xAF,
SecurityProtoOut = 0xB5,
ReadDefectData12 = 0xB7,
}
#[repr(u8)]
pub enum ServiceAction7F {
Read32 = 0x09,
Verify32 = 0x0A,
Write32 = 0x0B,
WriteAndVerify32 = 0x0C,
WriteSame32 = 0x0D,
WriteAtomic32 = 0x18,
}
#[repr(u8)]
pub enum ServiceAction9E {
ReadCapacity16 = 0x10,
ReadLong16 = 0x11,
GetLbaStatus = 0x12,
StreamControl = 0x14,
BackgroundControl = 0x15,
GetStreamStatus = 0x16,
}
#[repr(u8)]
pub enum ServiceAction9F {
WriteLong16 = 0x11,
}
#[repr(u8)]
pub enum ServiceActionA3 {
ReportIdentInfo = 0x05,
ReportSuppOpcodes = 0x0C,
ReportSuppTaskManFuncs = 0x0D,
ReportTimestamp = 0x0F,
}
#[repr(u8)]
pub enum ServiceActionA4 {
SetIdentInfo = 0x06,
SetTimestamp = 0x0F,
}
@@ -0,0 +1,27 @@
[package]
name = "virtio-blkd"
description = "VirtIO block (storage) driver"
version = "0.1.0"
edition = "2021"
authors = ["Anhad Singh <andypython@protonmail.com>"]
[dependencies]
anyhow.workspace = true
log.workspace = true
thiserror.workspace = true
static_assertions.workspace = true
futures = { version = "0.3.28", features = ["executor"] }
spin.workspace = true
redox_event.workspace = true
redox_syscall = { workspace = true, features = ["std"] }
common = { path = "../../common" }
daemon = { path = "../../../daemon" }
driver-block = { path = "../driver-block" }
pcid = { path = "../../pcid" }
virtio-core = { path = "../../virtio-core" }
libredox.workspace = true
[lints]
workspace = true
@@ -0,0 +1,182 @@
#![deny(trivial_numeric_casts, unused_allocation)]
use std::collections::BTreeMap;
use std::sync::{Arc, Weak};
use driver_block::DiskScheme;
use static_assertions::const_assert_eq;
use pcid_interface::*;
use virtio_core::spec::*;
use virtio_core::transport::Transport;
use virtio_core::utils::VolatileCell;
mod scheme;
use thiserror::Error;
use crate::scheme::VirtioDisk;
#[derive(Debug, Error)]
pub enum Error {
#[error("capability {0:?} not found")]
InCapable(CfgType),
#[error("failed to map memory")]
Physmap,
#[error("failed to allocate an interrupt vector")]
ExhaustedInt,
#[error("syscall error")]
SyscallError(syscall::Error),
}
#[repr(C)]
pub struct BlockGeometry {
pub cylinders: VolatileCell<u16>,
pub heads: VolatileCell<u8>,
pub sectors: VolatileCell<u8>,
}
#[repr(u8)]
pub enum DeviceConfigTy {
Capacity = 0,
SizeMax = 0x8,
SeqMax = 0xc,
Geometry = 0x10,
BlkSize = 0x14,
}
pub struct BlockDeviceConfig(Weak<dyn Transport>);
impl BlockDeviceConfig {
#[inline]
fn new(tranport: &Arc<dyn Transport>) -> Self {
Self(Arc::downgrade(&tranport))
}
pub fn load_config<T>(&self, ty: DeviceConfigTy) -> T
where
T: Sized + TryFrom<u64>,
<T as TryFrom<u64>>::Error: std::fmt::Debug,
{
let transport = self.0.upgrade().unwrap();
let size = core::mem::size_of::<T>()
.try_into()
.expect("load_config: invalid size");
let value = transport.load_config(ty as u8, size);
T::try_from(value).unwrap()
}
/// Returns the capacity of the block device in bytes.
#[inline]
pub fn capacity(&self) -> u64 {
self.load_config(DeviceConfigTy::Capacity)
}
#[inline]
pub fn block_size(&self) -> u32 {
self.load_config(DeviceConfigTy::BlkSize)
}
}
#[repr(u32)]
pub enum BlockRequestTy {
In = 0,
Out = 1,
}
const_assert_eq!(core::mem::size_of::<BlockRequestTy>(), 4);
#[repr(C)]
pub struct BlockVirtRequest {
pub ty: BlockRequestTy,
pub reserved: u32,
pub sector: u64,
}
const_assert_eq!(core::mem::size_of::<BlockVirtRequest>(), 16);
fn main() {
pcid_interface::pci_daemon(daemon_runner);
}
fn daemon_runner(redox_daemon: daemon::Daemon, pcid_handle: PciFunctionHandle) -> ! {
daemon(redox_daemon, pcid_handle).unwrap();
unreachable!();
}
fn daemon(daemon: daemon::Daemon, mut pcid_handle: PciFunctionHandle) -> anyhow::Result<()> {
common::setup_logging(
"disk",
"pci",
"virtio-blkd",
common::output_level(),
common::file_level(),
);
// Double check that we have the right device.
//
// 0x1001 - virtio-blk
let pci_config = pcid_handle.config();
assert_eq!(pci_config.func.full_device_id.device_id, 0x1001);
log::info!("virtio-blk: initiating startup sequence :^)");
let device = virtio_core::probe_device(&mut pcid_handle)?;
device.transport.finalize_features();
let queue = device
.transport
.setup_queue(virtio_core::MSIX_PRIMARY_VECTOR, &device.irq_handle)?;
let device_space = BlockDeviceConfig::new(&device.transport);
// At this point the device is alive!
device.transport.run_device();
log::info!(
"virtio-blk: disk size: {} sectors and block size of {} bytes",
device_space.capacity(),
device_space.block_size()
);
let mut name = pci_config.func.name();
name.push_str("_virtio_blk");
let scheme_name = format!("disk.{}", name);
let event_queue = event::EventQueue::new().unwrap();
event::user_data! {
enum Event {
Scheme,
}
};
let mut scheme = DiskScheme::new(
Some(daemon),
scheme_name,
BTreeMap::from([(0, VirtioDisk::new(queue, device_space))]),
&driver_block::FuturesExecutor,
);
libredox::call::setrens(0, 0).expect("nvmed: failed to enter null namespace");
event_queue
.subscribe(
scheme.event_handle().raw(),
Event::Scheme,
event::EventFlags::READ,
)
.unwrap();
for event in event_queue {
match event.unwrap().user_data {
Event::Scheme => futures::executor::block_on(scheme.tick()).unwrap(),
}
}
Ok(())
}
@@ -0,0 +1,103 @@
use std::sync::Arc;
use common::dma::Dma;
use virtio_core::spec::{Buffer, ChainBuilder, DescriptorFlags};
use virtio_core::transport::Queue;
use crate::BlockDeviceConfig;
use crate::BlockRequestTy;
use crate::BlockVirtRequest;
trait BlkExtension {
async fn read(&self, block: u64, target: &mut [u8]) -> usize;
async fn write(&self, block: u64, target: &[u8]) -> usize;
}
impl BlkExtension for Queue<'_> {
async fn read(&self, block: u64, target: &mut [u8]) -> usize {
let req = Dma::new(BlockVirtRequest {
ty: BlockRequestTy::In,
reserved: 0,
sector: block,
})
.unwrap();
let result = unsafe {
Dma::<[u8]>::zeroed_slice(target.len())
.unwrap()
.assume_init()
};
let status = Dma::new(u8::MAX).unwrap();
let chain = ChainBuilder::new()
.chain(Buffer::new(&req))
.chain(Buffer::new_unsized(&result).flags(DescriptorFlags::WRITE_ONLY))
.chain(Buffer::new(&status).flags(DescriptorFlags::WRITE_ONLY))
.build();
// XXX: Subtract 1 because the of status byte.
let written = self.send(chain).await as usize - 1;
assert_eq!(*status, 0);
target[..written].copy_from_slice(&result);
written
}
async fn write(&self, block: u64, target: &[u8]) -> usize {
let req = Dma::new(BlockVirtRequest {
ty: BlockRequestTy::Out,
reserved: 0,
sector: block,
})
.unwrap();
let mut result = unsafe {
Dma::<[u8]>::zeroed_slice(target.len())
.unwrap()
.assume_init()
};
result.copy_from_slice(target.as_ref());
let status = Dma::new(u8::MAX).unwrap();
let chain = ChainBuilder::new()
.chain(Buffer::new(&req))
.chain(Buffer::new_sized(&result, target.len()))
.chain(Buffer::new(&status).flags(DescriptorFlags::WRITE_ONLY))
.build();
self.send(chain).await as usize;
assert_eq!(*status, 0);
target.len()
}
}
pub(crate) struct VirtioDisk<'a> {
queue: Arc<Queue<'a>>,
cfg: BlockDeviceConfig,
}
impl<'a> VirtioDisk<'a> {
pub(crate) fn new(queue: Arc<Queue<'a>>, cfg: BlockDeviceConfig) -> Self {
Self { queue, cfg }
}
}
impl driver_block::Disk for VirtioDisk<'_> {
fn block_size(&self) -> u32 {
self.cfg.block_size()
}
fn size(&self) -> u64 {
self.cfg.capacity() * u64::from(self.cfg.block_size())
}
async fn read(&mut self, block: u64, buffer: &mut [u8]) -> syscall::Result<usize> {
Ok(self.queue.read(block, buffer).await)
}
async fn write(&mut self, block: u64, buffer: &[u8]) -> syscall::Result<usize> {
Ok(self.queue.write(block, buffer).await)
}
}