feat: build system transition to release fork + archive hardening

Release fork infrastructure:
- REDBEAR_RELEASE=0.1.1 with offline enforcement (fetch/distclean/unfetch blocked)
- 195 BLAKE3-verified source archives in standard format
- Atomic provisioning via provision-release.sh (staging + .complete sentry)
- 5-phase improvement plan: restore format auto-detection, source tree
  validation (validate-source-trees.py), archive-map.json, REPO_BINARY fallback

Archive normalization:
- Removed 87 duplicate/unversioned archives from shared pool
- Regenerated all archives in consistent format with source/ + recipe.toml
- BLAKE3SUMS and manifest.json generated from stable tarball set

Patch management:
- verify-patches.sh: pre-sync dry-run report (OK/REVERSED/CONFLICT)
- 121 upstream-absorbed patches moved to absorbed/ directories
- 43 active patches verified clean against rebased sources
- Stress test: base updated to upstream HEAD, relibc reset and patched

Compilation fixes:
- relibc: Vec imports in redox-rt (proc.rs, lib.rs, sys.rs)
- relibc: unsafe from_raw_parts in mod.rs (2024 edition)
- fetch.rs: rev comparison handles short/full hash prefixes
- kibi recipe: corrected rev mismatch

New scripts: restore-sources.sh, provision-release.sh, verify-sources-archived.sh,
check-upstream-releases.sh, validate-source-trees.py, verify-patches.sh,
repair-archive-format.sh, generate-manifest.py

Documentation: AGENTS.md, README.md, local/AGENTS.md updated for release fork model
This commit is contained in:
2026-05-02 01:41:17 +01:00
parent f55acba68c
commit 5851974b20
242 changed files with 29015 additions and 1818 deletions
@@ -0,0 +1,601 @@
# P2-storage-error-handling.patch
#
# Storage driver error handling: replace unwrap()/expect()/panic!() with proper
# error propagation and graceful exits across AHCI, IDE, NVMe, and VirtIO block drivers.
#
# Covers:
# - ahcid/disk_ata.rs: replace unreachable!() with EIO error
# - ahcid/disk_atapi.rs: replace unreachable!() with EBADF error
# - ahcid/hba.rs: DMA allocation error handling
# - ided/ide.rs: assert→debug_assert, try_into error handling
# - nvmed/executor.rs: executor initialization error handling
# - nvmed/identify.rs: DMA allocation, unreachable!() fallback
# - nvmed/mod.rs: assert→debug_assert, unwrap→proper error/exit
# - nvmed/queues.rs: unreachable!()→safe fallback
# - virtio-blkd/scheme.rs: DMA allocation error handling, assert→if check
#
diff --git a/drivers/storage/ahcid/src/ahci/disk_ata.rs b/drivers/storage/ahcid/src/ahci/disk_ata.rs
index 4f83c51d..7423603b 100644
--- a/drivers/storage/ahcid/src/ahci/disk_ata.rs
+++ b/drivers/storage/ahcid/src/ahci/disk_ata.rs
@@ -1,7 +1,7 @@
use std::convert::TryInto;
use std::ptr;
-use syscall::error::Result;
+use syscall::error::{Error, Result, EIO};
use common::dma::Dma;
@@ -39,7 +39,7 @@ impl DiskATA {
.map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() }))
.collect::<Result<Vec<_>>>()?
.try_into()
- .unwrap_or_else(|_| unreachable!());
+ .map_err(|_| Error::new(EIO))?;
let mut fb = unsafe { Dma::zeroed()?.assume_init() };
let buf = unsafe { Dma::zeroed()?.assume_init() };
diff --git a/drivers/storage/ahcid/src/ahci/disk_atapi.rs b/drivers/storage/ahcid/src/ahci/disk_atapi.rs
index a0e75c09..8fbdfbef 100644
--- a/drivers/storage/ahcid/src/ahci/disk_atapi.rs
+++ b/drivers/storage/ahcid/src/ahci/disk_atapi.rs
@@ -37,7 +37,7 @@ impl DiskATAPI {
.map(|_| Ok(unsafe { Dma::zeroed()?.assume_init() }))
.collect::<Result<Vec<_>>>()?
.try_into()
- .unwrap_or_else(|_| unreachable!());
+ .map_err(|_| Error::new(EBADF))?;
let mut fb = unsafe { Dma::zeroed()?.assume_init() };
let mut buf = unsafe { Dma::zeroed()?.assume_init() };
diff --git a/drivers/storage/ahcid/src/ahci/hba.rs b/drivers/storage/ahcid/src/ahci/hba.rs
index bea8792c..11a3d4ae 100644
--- a/drivers/storage/ahcid/src/ahci/hba.rs
+++ b/drivers/storage/ahcid/src/ahci/hba.rs
@@ -178,7 +178,10 @@ impl HbaPort {
clb: &mut Dma<[HbaCmdHeader; 32]>,
ctbas: &mut [Dma<HbaCmdTable>; 32],
) -> Result<u64> {
- let dest: Dma<[u16; 256]> = Dma::new([0; 256]).unwrap();
+ let dest: Dma<[u16; 256]> = Dma::new([0; 256]).map_err(|err| {
+ error!("ahcid: failed to allocate DMA buffer: {err}");
+ Error::new(EIO)
+ })?;
let slot = self
.ata_start(clb, ctbas, |cmdheader, cmdfis, prdt_entries, _acmd| {
diff --git a/drivers/storage/ided/src/ide.rs b/drivers/storage/ided/src/ide.rs
index 5faf3250..094e5889 100644
--- a/drivers/storage/ided/src/ide.rs
+++ b/drivers/storage/ided/src/ide.rs
@@ -184,10 +184,10 @@ impl Disk for AtaDisk {
let block = start_block + (count as u64) / 512;
//TODO: support other LBA modes
- assert!(block < 0x1_0000_0000_0000);
+ debug_assert!(block < 0x1_0000_0000_0000);
let sectors = (chunk.len() + 511) / 512;
- assert!(sectors <= 128);
+ debug_assert!(sectors <= 128);
log::trace!(
"IDE read chan {} dev {} block {:#x} count {:#x}",
@@ -205,7 +205,7 @@ impl Disk for AtaDisk {
// Make PRDT EOT match chunk size
for i in 0..sectors {
chan.prdt[i] = PrdtEntry {
- phys: (chan.buf.physical() + i * 512).try_into().unwrap(),
+ phys: (chan.buf.physical() + i * 512).try_into().map_err(|_| Error::new(EIO))?,
size: 512,
flags: if i + 1 == sectors {
1 << 15 // End of table
@@ -216,7 +216,7 @@ impl Disk for AtaDisk {
}
// Set PRDT
let prdt = chan.prdt.physical();
- chan.busmaster_prdt.write(prdt.try_into().unwrap());
+ chan.busmaster_prdt.write(prdt.try_into().map_err(|_| Error::new(EIO))?);
// Set to read
chan.busmaster_command.writef(1 << 3, true);
// Clear interrupt and error bits
@@ -325,10 +325,10 @@ impl Disk for AtaDisk {
let block = start_block + (count as u64) / 512;
//TODO: support other LBA modes
- assert!(block < 0x1_0000_0000_0000);
+ debug_assert!(block < 0x1_0000_0000_0000);
let sectors = (chunk.len() + 511) / 512;
- assert!(sectors <= 128);
+ debug_assert!(sectors <= 128);
log::trace!(
"IDE write chan {} dev {} block {:#x} count {:#x}",
@@ -346,7 +346,7 @@ impl Disk for AtaDisk {
// Make PRDT EOT match chunk size
for i in 0..sectors {
chan.prdt[i] = PrdtEntry {
- phys: (chan.buf.physical() + i * 512).try_into().unwrap(),
+ phys: (chan.buf.physical() + i * 512).try_into().map_err(|_| Error::new(EIO))?,
size: 512,
flags: if i + 1 == sectors {
1 << 15 // End of table
@@ -357,7 +357,7 @@ impl Disk for AtaDisk {
}
// Set PRDT
let prdt = chan.prdt.physical();
- chan.busmaster_prdt.write(prdt.try_into().unwrap());
+ chan.busmaster_prdt.write(prdt.try_into().map_err(|_| Error::new(EIO))?);
// Set to write
chan.busmaster_command.writef(1 << 3, false);
// Clear interrupt and error bits
diff --git a/drivers/storage/nvmed/src/nvme/executor.rs b/drivers/storage/nvmed/src/nvme/executor.rs
index 6242fa98..c1435e88 100644
--- a/drivers/storage/nvmed/src/nvme/executor.rs
+++ b/drivers/storage/nvmed/src/nvme/executor.rs
@@ -34,7 +34,12 @@ impl Hardware for NvmeHw {
&VTABLE
}
fn current() -> std::rc::Rc<executor::LocalExecutor<Self>> {
- THE_EXECUTOR.with(|exec| Rc::clone(exec.borrow().as_ref().unwrap()))
+ THE_EXECUTOR.with(|exec| {
+ Rc::clone(exec.borrow().as_ref().unwrap_or_else(|| {
+ log::error!("nvmed: internal error: executor not initialized");
+ std::process::exit(1);
+ }))
+ })
}
fn try_submit(
nvme: &Arc<Nvme>,
diff --git a/drivers/storage/nvmed/src/nvme/identify.rs b/drivers/storage/nvmed/src/nvme/identify.rs
index 05e5b9b2..b1b6e959 100644
--- a/drivers/storage/nvmed/src/nvme/identify.rs
+++ b/drivers/storage/nvmed/src/nvme/identify.rs
@@ -126,7 +126,7 @@ impl LbaFormat {
0b01 => RelativePerformance::Better,
0b10 => RelativePerformance::Good,
0b11 => RelativePerformance::Degraded,
- _ => unreachable!(),
+ _ => RelativePerformance::Degraded,
}
}
pub fn is_available(&self) -> bool {
@@ -153,7 +153,14 @@ impl Nvme {
/// Returns the serial number, model, and firmware, in that order.
pub async fn identify_controller(&self) {
// TODO: Use same buffer
- let data: Dma<IdentifyControllerData> = unsafe { Dma::zeroed().unwrap().assume_init() };
+ let data: Dma<IdentifyControllerData> = unsafe {
+ Dma::zeroed()
+ .map(|dma| dma.assume_init())
+ .unwrap_or_else(|err| {
+ log::error!("nvmed: failed to allocate identify DMA: {err}");
+ std::process::exit(1);
+ })
+ };
// println!(" - Attempting to identify controller");
let comp = self
@@ -182,7 +189,14 @@ impl Nvme {
}
pub async fn identify_namespace_list(&self, base: u32) -> Vec<u32> {
// TODO: Use buffer
- let data: Dma<[u32; 1024]> = unsafe { Dma::zeroed().unwrap().assume_init() };
+ let data: Dma<[u32; 1024]> = unsafe {
+ Dma::zeroed()
+ .map(|dma| dma.assume_init())
+ .unwrap_or_else(|err| {
+ log::error!("nvmed: failed to allocate namespace list DMA: {err}");
+ std::process::exit(1);
+ })
+ };
// println!(" - Attempting to retrieve namespace ID list");
let comp = self
@@ -198,7 +212,14 @@ impl Nvme {
}
pub async fn identify_namespace(&self, nsid: u32) -> NvmeNamespace {
//TODO: Use buffer
- let data: Dma<IdentifyNamespaceData> = unsafe { Dma::zeroed().unwrap().assume_init() };
+ let data: Dma<IdentifyNamespaceData> = unsafe {
+ Dma::zeroed()
+ .map(|dma| dma.assume_init())
+ .unwrap_or_else(|err| {
+ log::error!("nvmed: failed to allocate namespace DMA: {err}");
+ std::process::exit(1);
+ })
+ };
log::debug!("Attempting to identify namespace {nsid}");
let comp = self
@@ -216,7 +237,10 @@ impl Nvme {
let block_size = data
.formatted_lba_size()
.lba_data_size()
- .expect("nvmed: error: size outside 512-2^64 range");
+ .unwrap_or_else(|| {
+ log::error!("nvmed: error: size outside 512-2^64 range");
+ std::process::exit(1);
+ });
log::debug!("NVME block size: {}", block_size);
NvmeNamespace {
diff --git a/drivers/storage/nvmed/src/nvme/mod.rs b/drivers/storage/nvmed/src/nvme/mod.rs
index 682ee933..90a25d5b 100644
--- a/drivers/storage/nvmed/src/nvme/mod.rs
+++ b/drivers/storage/nvmed/src/nvme/mod.rs
@@ -160,7 +160,15 @@ impl Nvme {
}
fn cur_thread_ctxt(&self) -> Arc<ReentrantMutex<ThreadCtxt>> {
// TODO: multi-threading
- Arc::clone(self.thread_ctxts.read().get(&0).unwrap())
+ Arc::clone(
+ self.thread_ctxts
+ .read()
+ .get(&0)
+ .unwrap_or_else(|| {
+ log::error!("nvmed: internal error: thread context 0 missing");
+ std::process::exit(1);
+ }),
+ )
}
pub unsafe fn submission_queue_tail(&self, qid: u16, tail: u16) {
@@ -208,10 +216,22 @@ impl Nvme {
}
for (qid, iv) in self.cq_ivs.get_mut().iter_mut() {
- let ctxt = thread_ctxts.get(&0).unwrap().lock();
+ let ctxt = match thread_ctxts.get(&0) {
+ Some(c) => c.lock(),
+ None => {
+ log::error!("nvmed: internal error: thread context 0 missing");
+ return Err(Error::new(EIO));
+ }
+ };
let queues = ctxt.queues.borrow();
- let &(ref cq, ref sq) = queues.get(qid).unwrap();
+ let (cq, sq) = match queues.get(qid) {
+ Some(pair) => pair,
+ None => {
+ log::error!("nvmed: internal error: queue {qid} missing");
+ return Err(Error::new(EIO));
+ }
+ };
log::debug!(
"iv {iv} [cq {qid}: {:X}, {}] [sq {qid}: {:X}, {}]",
cq.data.physical(),
@@ -222,7 +242,13 @@ impl Nvme {
}
{
- let main_ctxt = thread_ctxts.get(&0).unwrap().lock();
+ let main_ctxt = match thread_ctxts.get(&0) {
+ Some(c) => c.lock(),
+ None => {
+ log::error!("nvmed: internal error: thread context 0 missing");
+ return Err(Error::new(EIO));
+ }
+ };
for (i, prp) in main_ctxt.buffer_prp.borrow_mut().iter_mut().enumerate() {
*prp = (main_ctxt.buffer.borrow_mut().physical() + i * 4096) as u64;
@@ -231,7 +257,13 @@ impl Nvme {
let regs = self.regs.get_mut();
let mut queues = main_ctxt.queues.borrow_mut();
- let (asq, acq) = queues.get_mut(&0).unwrap();
+ let (asq, acq) = match queues.get_mut(&0) {
+ Some(pair) => pair,
+ None => {
+ log::error!("nvmed: internal error: admin queue pair missing");
+ return Err(Error::new(EIO));
+ }
+ };
regs.aqa
.write(((acq.data.len() as u32 - 1) << 16) | (asq.data.len() as u32 - 1));
regs.asq_low.write(asq.data.physical() as u32);
@@ -281,14 +313,14 @@ impl Nvme {
let vector = vector as u8;
if masked {
- assert_ne!(
+ debug_assert_ne!(
to_clear & (1 << vector),
(1 << vector),
"nvmed: internal error: cannot both mask and set"
);
to_mask |= 1 << vector;
} else {
- assert_ne!(
+ debug_assert_ne!(
to_mask & (1 << vector),
(1 << vector),
"nvmed: internal error: cannot both mask and set"
@@ -326,22 +358,27 @@ impl Nvme {
cmd_init: impl FnOnce(CmdId) -> NvmeCmd,
fail: impl FnOnce(),
) -> Option<(CqId, CmdId)> {
- match ctxt.queues.borrow_mut().get_mut(&sq_id).unwrap() {
- (sq, _cq) => {
- if sq.is_full() {
- fail();
- return None;
- }
- let cmd_id = sq.tail;
- let tail = sq.submit_unchecked(cmd_init(cmd_id));
-
- // TODO: Submit in bulk
- unsafe {
- self.submission_queue_tail(sq_id, tail);
- }
- Some((sq_id, cmd_id))
+ let mut queues_ref = ctxt.queues.borrow_mut();
+ let (sq, _cq) = match queues_ref.get_mut(&sq_id) {
+ Some(pair) => pair,
+ None => {
+ log::error!("nvmed: internal error: submission queue {sq_id} missing");
+ fail();
+ return None;
}
+ };
+ if sq.is_full() {
+ fail();
+ return None;
+ }
+ let cmd_id = sq.tail;
+ let tail = sq.submit_unchecked(cmd_init(cmd_id));
+
+ // TODO: Submit in bulk
+ unsafe {
+ self.submission_queue_tail(sq_id, tail);
}
+ Some((sq_id, cmd_id))
}
pub async fn create_io_completion_queue(
@@ -349,13 +386,19 @@ impl Nvme {
io_cq_id: CqId,
vector: Option<Iv>,
) -> NvmeCompQueue {
- let queue = NvmeCompQueue::new().expect("nvmed: failed to allocate I/O completion queue");
-
- let len = u16::try_from(queue.data.len())
- .expect("nvmed: internal error: I/O CQ longer than 2^16 entries");
- let raw_len = len
- .checked_sub(1)
- .expect("nvmed: internal error: CQID 0 for I/O CQ");
+ let queue = NvmeCompQueue::new().unwrap_or_else(|err| {
+ log::error!("nvmed: failed to allocate I/O completion queue: {err}");
+ std::process::exit(1);
+ });
+
+ let len = u16::try_from(queue.data.len()).unwrap_or_else(|_| {
+ log::error!("nvmed: internal error: I/O CQ longer than 2^16 entries");
+ std::process::exit(1);
+ });
+ let raw_len = len.checked_sub(1).unwrap_or_else(|| {
+ log::error!("nvmed: internal error: CQID 0 for I/O CQ");
+ std::process::exit(1);
+ });
let comp = self
.submit_and_complete_admin_command(|cid| {
@@ -370,22 +413,28 @@ impl Nvme {
.await;
/*match comp.status.specific {
- 1 => panic!("invalid queue identifier"),
- 2 => panic!("invalid queue size"),
- 8 => panic!("invalid interrupt vector"),
+ 1 => { log::error!("nvmed: invalid queue identifier"); std::process::exit(1); }
+ 2 => { log::error!("nvmed: invalid queue size"); std::process::exit(1); }
+ 8 => { log::error!("nvmed: invalid interrupt vector"); std::process::exit(1); }
_ => (),
}*/
queue
}
pub async fn create_io_submission_queue(&self, io_sq_id: SqId, io_cq_id: CqId) -> NvmeCmdQueue {
- let q = NvmeCmdQueue::new().expect("failed to create submission queue");
-
- let len = u16::try_from(q.data.len())
- .expect("nvmed: internal error: I/O SQ longer than 2^16 entries");
- let raw_len = len
- .checked_sub(1)
- .expect("nvmed: internal error: SQID 0 for I/O SQ");
+ let q = NvmeCmdQueue::new().unwrap_or_else(|err| {
+ log::error!("nvmed: failed to create submission queue: {err}");
+ std::process::exit(1);
+ });
+
+ let len = u16::try_from(q.data.len()).unwrap_or_else(|_| {
+ log::error!("nvmed: internal error: I/O SQ longer than 2^16 entries");
+ std::process::exit(1);
+ });
+ let raw_len = len.checked_sub(1).unwrap_or_else(|| {
+ log::error!("nvmed: internal error: SQID 0 for I/O SQ");
+ std::process::exit(1);
+ });
let comp = self
.submit_and_complete_admin_command(|cid| {
@@ -399,9 +448,9 @@ impl Nvme {
})
.await;
/*match comp.status.specific {
- 0 => panic!("completion queue invalid"),
- 1 => panic!("invalid queue identifier"),
- 2 => panic!("invalid queue size"),
+ 0 => { log::error!("nvmed: completion queue invalid"); std::process::exit(1); }
+ 1 => { log::error!("nvmed: invalid queue identifier"); std::process::exit(1); }
+ 2 => { log::error!("nvmed: invalid queue size"); std::process::exit(1); }
_ => (),
}*/
@@ -431,7 +480,10 @@ impl Nvme {
self.thread_ctxts
.read()
.get(&0)
- .unwrap()
+ .unwrap_or_else(|| {
+ log::error!("nvmed: internal error: thread context 0 missing");
+ std::process::exit(1);
+ })
.lock()
.queues
.borrow_mut()
@@ -497,8 +549,8 @@ impl Nvme {
for chunk in buf.chunks_mut(/* TODO: buf len */ 8192) {
let blocks = (chunk.len() + block_size - 1) / block_size;
- assert!(blocks > 0);
- assert!(blocks <= 0x1_0000);
+ debug_assert!(blocks > 0);
+ debug_assert!(blocks <= 0x1_0000);
self.namespace_rw(&*ctxt, namespace, lba, (blocks - 1) as u16, false)
.await?;
@@ -525,8 +577,8 @@ impl Nvme {
for chunk in buf.chunks(/* TODO: buf len */ 8192) {
let blocks = (chunk.len() + block_size - 1) / block_size;
- assert!(blocks > 0);
- assert!(blocks <= 0x1_0000);
+ debug_assert!(blocks > 0);
+ debug_assert!(blocks <= 0x1_0000);
ctxt.buffer.borrow_mut()[..chunk.len()].copy_from_slice(chunk);
diff --git a/drivers/storage/nvmed/src/nvme/queues.rs b/drivers/storage/nvmed/src/nvme/queues.rs
index a3712aeb..438c905c 100644
--- a/drivers/storage/nvmed/src/nvme/queues.rs
+++ b/drivers/storage/nvmed/src/nvme/queues.rs
@@ -145,7 +145,7 @@ impl Status {
3 => Self::PathRelatedStatus(code),
4..=6 => Self::Rsvd(code),
7 => Self::Vendor(code),
- _ => unreachable!(),
+ _ => Self::Vendor(code),
}
}
}
diff --git a/drivers/storage/virtio-blkd/src/scheme.rs b/drivers/storage/virtio-blkd/src/scheme.rs
index ec4ecf73..39fb24a8 100644
--- a/drivers/storage/virtio-blkd/src/scheme.rs
+++ b/drivers/storage/virtio-blkd/src/scheme.rs
@@ -15,19 +15,34 @@ trait BlkExtension {
impl BlkExtension for Queue<'_> {
async fn read(&self, block: u64, target: &mut [u8]) -> usize {
- let req = Dma::new(BlockVirtRequest {
+ let req = match Dma::new(BlockVirtRequest {
ty: BlockRequestTy::In,
reserved: 0,
sector: block,
- })
- .unwrap();
+ }) {
+ Ok(req) => req,
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate read request DMA: {err}");
+ return 0;
+ }
+ };
let result = unsafe {
- Dma::<[u8]>::zeroed_slice(target.len())
- .unwrap()
- .assume_init()
+ match Dma::<[u8]>::zeroed_slice(target.len()) {
+ Ok(dma) => dma.assume_init(),
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate read buffer DMA: {err}");
+ return 0;
+ }
+ }
+ };
+ let status = match Dma::new(u8::MAX) {
+ Ok(s) => s,
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate read status DMA: {err}");
+ return 0;
+ }
};
- let status = Dma::new(u8::MAX).unwrap();
let chain = ChainBuilder::new()
.chain(Buffer::new(&req))
@@ -37,28 +52,46 @@ impl BlkExtension for Queue<'_> {
// XXX: Subtract 1 because the of status byte.
let written = self.send(chain).await as usize - 1;
- assert_eq!(*status, 0);
+ if *status != 0 {
+ log::error!("virtio-blkd: read failed with status {}", *status);
+ return 0;
+ }
target[..written].copy_from_slice(&result);
written
}
async fn write(&self, block: u64, target: &[u8]) -> usize {
- let req = Dma::new(BlockVirtRequest {
+ let req = match Dma::new(BlockVirtRequest {
ty: BlockRequestTy::Out,
reserved: 0,
sector: block,
- })
- .unwrap();
+ }) {
+ Ok(req) => req,
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate write request DMA: {err}");
+ return 0;
+ }
+ };
let mut result = unsafe {
- Dma::<[u8]>::zeroed_slice(target.len())
- .unwrap()
- .assume_init()
+ match Dma::<[u8]>::zeroed_slice(target.len()) {
+ Ok(dma) => dma.assume_init(),
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate write buffer DMA: {err}");
+ return 0;
+ }
+ }
};
result.copy_from_slice(target.as_ref());
- let status = Dma::new(u8::MAX).unwrap();
+ let status = match Dma::new(u8::MAX) {
+ Ok(s) => s,
+ Err(err) => {
+ log::error!("virtio-blkd: failed to allocate write status DMA: {err}");
+ return 0;
+ }
+ };
let chain = ChainBuilder::new()
.chain(Buffer::new(&req))
@@ -67,7 +100,10 @@ impl BlkExtension for Queue<'_> {
.build();
self.send(chain).await as usize;
- assert_eq!(*status, 0);
+ if *status != 0 {
+ log::error!("virtio-blkd: write failed with status {}", *status);
+ return 0;
+ }
target.len()
}