milestone: desktop path Phases 1-5

Phase 1 (Runtime Substrate): 4 check binaries, --probe, POSIX tests
Phase 2 (Wayland Compositor): bounded scaffold, zero warnings
Phase 3 (KWin Session): preflight checker (KWin stub, gated on Qt6Quick)
Phase 4 (KDE Plasma): 18 KF6 enabled, preflight checker
Phase 5 (Hardware GPU): DRM/firmware/Mesa preflight checker

Build: zero warnings, all scripts syntax-clean. Oracle-verified.
This commit is contained in:
2026-04-29 09:54:06 +01:00
parent b23714f542
commit 8acc73d774
508 changed files with 76526 additions and 396 deletions
+15
View File
@@ -0,0 +1,15 @@
[package]
name = "redox-initfs"
description = "Userspace bootstrap image library"
version = "0.2.0"
authors = ["4lDO2 <4lDO2@protonmail.com>", "Kamil Koczurek <koczurekk@gmail.com>"]
edition = "2024"
license = "MIT"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
plain.workspace = true
[lints]
workspace = true
+285
View File
@@ -0,0 +1,285 @@
#![no_std]
//! A super simple initfs, only meant to be loaded into RAM by the bootloader, and then directly be
//! read.
use core::convert::{TryFrom, TryInto};
pub mod types;
use self::types::*;
#[derive(Clone, Copy)]
pub struct InitFs<'initfs> {
base: &'initfs [u8],
}
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct Inode(u16);
#[derive(Clone, Copy, Debug)]
pub struct Error;
impl core::fmt::Display for Error {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "invalid or corrupt initfs")
}
}
impl core::error::Error for Error {}
type Result<T> = core::result::Result<T, Error>;
#[derive(Clone, Copy)]
pub struct InodeStruct<'initfs> {
initfs: InitFs<'initfs>,
inode_id: Inode,
inode: &'initfs InodeHeader,
}
#[derive(Clone, Copy)]
pub struct InodeFile<'initfs> {
inner: InodeStruct<'initfs>,
}
impl<'initfs> InodeFile<'initfs> {
pub fn inode(self) -> InodeStruct<'initfs> {
self.inner
}
pub fn data(&self) -> Result<&'initfs [u8]> {
self.inner.data()
}
}
#[derive(Clone, Copy)]
pub struct InodeDir<'initfs> {
inner: InodeStruct<'initfs>,
}
impl<'initfs> InodeDir<'initfs> {
pub fn inode(self) -> InodeStruct<'initfs> {
self.inner
}
pub fn entry_count(&self) -> Result<u32> {
let len = self.entries()?.len();
// NOTE: Len is originally stored as a u32 in the struct, so it can never exceed u32
// despite first being converted to usize.
let len = len as u32;
Ok(len)
}
pub fn get_entry(&self, idx: u32) -> Result<Option<Entry<'initfs>>> {
let idx = usize::try_from(idx).map_err(|_| Error)?;
self.entries().map(|entries| {
let entry = entries.get(idx)?;
Some(Entry {
entry,
initfs: self.inner.initfs,
})
})
}
fn entries(&self) -> Result<&'initfs [DirEntry]> {
let bytes = self.inner.data()?;
let entries = plain::slice_from_bytes::<DirEntry>(bytes)
.expect("expected dir entry to have alignment 1");
Ok(entries)
}
}
#[derive(Clone, Copy)]
pub struct InodeLink<'initfs> {
inner: InodeStruct<'initfs>,
}
impl<'initfs> InodeLink<'initfs> {
pub fn inode(self) -> InodeStruct<'initfs> {
self.inner
}
pub fn data(&self) -> Result<&'initfs [u8]> {
self.inner.data()
}
}
#[derive(Clone, Copy)]
pub struct Entry<'initfs> {
initfs: InitFs<'initfs>,
entry: &'initfs DirEntry,
}
impl<'initfs> Entry<'initfs> {
pub fn inode(&self) -> Inode {
Inode(self.entry.inode.get())
}
pub fn name(&self) -> Result<&'initfs [u8]> {
let name_offset: usize = self
.entry
.name_offset
.0
.get()
.try_into()
.map_err(|_| Error)?;
let name_length: usize = self.entry.name_len.get().into();
let name_end = name_offset.checked_add(name_length).ok_or(Error)?;
self.initfs.base.get(name_offset..name_end).ok_or(Error)
}
}
#[derive(Clone, Copy)]
pub enum InodeKind<'initfs> {
File(InodeFile<'initfs>),
Dir(InodeDir<'initfs>),
Link(InodeLink<'initfs>),
Unknown,
}
impl<'initfs> InodeStruct<'initfs> {
pub fn id(&self) -> u64 {
self.inode_id.0.into()
}
fn data(&self) -> Result<&'initfs [u8]> {
let start: usize = self.inode.offset.0.get().try_into().map_err(|_| Error)?;
let length: usize = self.inode.length.0.get().try_into().map_err(|_| Error)?;
let end = start.checked_add(length).ok_or(Error)?;
self.initfs.base.get(start..end).ok_or(Error)
}
pub fn mode(&self) -> u16 {
match self.ty() {
Some(InodeType::RegularFile) => 0o444,
Some(InodeType::ExecutableFile) => 0o555,
Some(InodeType::Dir) => 0o555,
Some(InodeType::Link) => 0o777,
None => 0o000,
}
}
fn ty(&self) -> Option<InodeType> {
InodeType::try_from(self.inode.type_.get()).ok()
}
pub fn kind(&self) -> InodeKind<'initfs> {
let inner = *self;
match self.ty() {
Some(InodeType::Dir) => InodeKind::Dir(InodeDir { inner }),
Some(InodeType::RegularFile | InodeType::ExecutableFile) => {
InodeKind::File(InodeFile { inner })
}
Some(InodeType::Link) => InodeKind::Link(InodeLink { inner }),
None => InodeKind::Unknown,
}
}
}
impl<'initfs> InitFs<'initfs> {
pub fn new(base: &'initfs [u8], required_page_size: Option<u16>) -> Result<Self> {
let this = Self { base };
if base.len() < core::mem::size_of::<Header>() {
return Err(Error);
}
if u32::try_from(base.len()).is_err() {
return Err(Error);
}
let header = this.get_header_assume_valid();
if header.magic != Magic(MAGIC) {
return Err(Error);
}
let inode_table_offset = header.inode_table_offset.0.get();
if inode_table_offset < u32::from(Self::header_len_8()) {
return Err(Error);
}
let inode_table_size = this.inode_table_size();
let inode_table_end = inode_table_offset
.checked_add(inode_table_size)
.ok_or(Error)?;
if inode_table_end > this.base_len_32() {
return Err(Error);
}
if let Some(required_page_size) = required_page_size
&& header.page_size.get() != required_page_size
{
return Err(Error);
}
// From now on, we can be completely sure that the header and inode tables offsets are
// valid, and thus continue based on that assumption.
Ok(this)
}
fn get_header_assume_valid(&self) -> &Header {
plain::from_bytes::<Header>(&self.base[..core::mem::size_of::<Header>()])
.expect("expected header type to require no alignment, and size to be sufficient")
}
pub fn header(&self) -> &Header {
self.get_header_assume_valid()
}
fn header_len_8() -> u8 {
core::mem::size_of::<Header>()
.try_into()
.expect("expected header size to fit within u8")
}
fn inode_struct_len_8() -> u8 {
core::mem::size_of::<InodeHeader>()
.try_into()
.expect("expected inode struct size to fit within u8")
}
fn inode_table_offset_usize(&self) -> usize {
// NOTE: We have already validated that the inode table fits within the initfs slice, and
// that this length, which must fit within usize, also fits within u32.
self.get_header_assume_valid().inode_table_offset.0.get() as usize
}
fn inode_table_end_usize(&self) -> usize {
// NOTE: This follows the same reasoning as in inode_table_offset_usize(). The end offset
// has been checked against u32 and the initfs slice length.
self.inode_table_offset_usize()
.wrapping_add(self.inode_table_size() as usize)
}
fn inode_table_range(&self) -> core::ops::Range<usize> {
self.inode_table_offset_usize()..self.inode_table_end_usize()
}
fn base_len_32(&self) -> u32 {
// NOTE: We have already validated that the length is sufficient.
self.base.len() as u32
}
fn inode_table_size(&self) -> u32 {
let count = self.get_header_assume_valid().inode_count.get();
let struct_size = Self::inode_struct_len_8();
// NOTE: We know for a fact, that even the largest u8 (255) and the largest u16 (65536),
// can only be approximately 2^24 at max.
u32::wrapping_mul(u32::from(count), u32::from(struct_size))
}
fn inode_table(&self) -> &'initfs [InodeHeader] {
let inode_table_bytes = &self.base[self.inode_table_range()];
plain::slice_from_bytes::<InodeHeader>(inode_table_bytes)
.expect("expected inode struct alignment to be 1")
}
pub fn root_inode(&self) -> Inode {
Inode(self.get_header_assume_valid().root_inode.get())
}
pub fn all_inodes(&self) -> impl Iterator<Item = Inode> {
(0..self.inode_count()).map(Inode)
}
pub fn inode_count(&self) -> u16 {
self.get_header_assume_valid().inode_count.get()
}
pub fn get_inode(&self, inode_id: Inode) -> Option<InodeStruct<'initfs>> {
Some(InodeStruct {
initfs: *self,
inode_id,
inode: self.inode_table().get(usize::from(inode_id.0))?,
})
}
}
+119
View File
@@ -0,0 +1,119 @@
use core::mem::offset_of;
pub const MAGIC_LEN: usize = 8;
pub const MAGIC: [u8; 8] = *b"RedoxFtw";
macro_rules! primitive(
($wrapper:ident, $bits:expr, $primitive:ident) => {
#[repr(transparent)]
#[derive(Clone, Copy, Default)]
pub struct $wrapper([u8; $bits / 8]);
impl $wrapper {
#[inline]
pub const fn get(self) -> $primitive {
<$primitive>::from_le_bytes(self.0)
}
#[inline]
pub const fn new(primitive: $primitive) -> Self {
Self(<$primitive>::to_le_bytes(primitive))
}
}
impl From<$primitive> for $wrapper {
fn from(primitive: $primitive) -> Self {
Self::new(primitive)
}
}
impl From<$wrapper> for $primitive {
fn from(wrapper: $wrapper) -> Self {
wrapper.get()
}
}
impl core::fmt::Debug for $wrapper {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{:#0width$x}", self.get(), width = 2 * core::mem::size_of::<$primitive>())
}
}
}
);
primitive!(U16, 16, u16);
primitive!(U32, 32, u32);
primitive!(U64, 64, u64);
#[repr(transparent)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct Magic(pub [u8; MAGIC_LEN]);
#[repr(transparent)]
#[derive(Clone, Copy, Debug)]
pub struct Offset(pub U32);
#[repr(transparent)]
#[derive(Clone, Copy, Debug)]
pub struct Length(pub U32);
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct Header {
pub magic: Magic,
pub inode_table_offset: Offset,
pub initfs_size: U64,
pub page_size: U16,
pub root_inode: U16,
pub inode_count: U16,
pub bootstrap_entry: U64,
}
const _: () = {
// Ensure the offsets of field used by the bootloader stay stable.
assert!(offset_of!(Header, bootstrap_entry) == 0x1a);
};
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct InodeHeader {
pub type_: U32,
pub length: Length,
pub offset: Offset,
}
#[derive(Clone, Copy, Debug)]
#[repr(u8)]
pub enum InodeType {
RegularFile = 0x0,
ExecutableFile = 0x1,
Dir = 0x2,
Link = 0x3,
// All other bit patterns are reserved... for now.
}
impl TryFrom<u32> for InodeType {
type Error = ();
fn try_from(value: u32) -> Result<Self, ()> {
Ok(if value == InodeType::RegularFile as u32 {
InodeType::RegularFile
} else if value == InodeType::ExecutableFile as u32 {
InodeType::ExecutableFile
} else if value == InodeType::Dir as u32 {
InodeType::Dir
} else if value == InodeType::Link as u32 {
InodeType::Link
} else {
return Err(());
})
}
}
#[repr(C, packed)]
#[derive(Clone, Copy, Debug)]
pub struct DirEntry {
pub inode: U16,
pub name_len: U16,
pub name_offset: Offset,
}
unsafe impl plain::Plain for Header {}
unsafe impl plain::Plain for InodeHeader {}
unsafe impl plain::Plain for DirEntry {}
+31
View File
@@ -0,0 +1,31 @@
[package]
name = "redox-initfs-tools"
version = "0.2.0"
authors = ["4lDO2 <4lDO2@protonmail.com>", "Kamil Koczurek <koczurekk@gmail.com>"]
edition = "2021"
description = "Tooling to archive a directory into an userspace bootstrap image (initfs)"
license = "MIT"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[[bin]]
name = "redox-initfs-ar"
path = "src/bin/archive.rs"
[[bin]]
name = "redox-initfs-dump"
path = "src/bin/dump.rs"
[dependencies]
anyhow.workspace = true
clap = {workspace = true, features = ["cargo"]}
env_logger = "0.8"
log.workspace = true
pathdiff = "0.2.1"
plain.workspace = true
twox-hash = "1.6"
redox-initfs = {path = ".."}
[lints]
workspace = true
@@ -0,0 +1 @@
file.txt
@@ -0,0 +1 @@
This is a file meant to be used in a redox-initfs test.
@@ -0,0 +1,54 @@
use std::path::Path;
use anyhow::Result;
use clap::{Arg, Command};
use redox_initfs_tools::{self as archive, Args, DEFAULT_MAX_SIZE};
fn main() -> Result<()> {
let matches = Command::new("redox-initfs-ar")
.about("create an initfs image from a directory")
.version(clap::crate_version!())
.author(clap::crate_authors!())
// TODO: support non-utf8 paths (applies to other paths as well)
.arg(
Arg::new("SOURCE")
.required(true)
.help("Specify the source directory to build the image from."),
)
.arg(
Arg::new("BOOTSTRAP_CODE")
.required(true)
.help("Specify the bootstrap ELF file to include in the image."),
)
.arg(
Arg::new("OUTPUT")
.required(true)
.long("output")
.short('o')
.help("Specify the path of the new image file."),
)
.get_matches();
env_logger::init();
let source = matches
.get_one::<String>("SOURCE")
.expect("expected the required arg SOURCE to exist");
let bootstrap_code = matches
.get_one::<String>("BOOTSTRAP_CODE")
.expect("expected the required arg BOOTSTRAP_CODE to exist");
let destination = matches
.get_one::<String>("OUTPUT")
.expect("expected the required arg OUTPUT to exist");
let args = Args {
source: Path::new(source),
bootstrap_code: Path::new(bootstrap_code),
destination_path: Path::new(destination),
max_size: DEFAULT_MAX_SIZE,
};
archive::archive(&args)
}
@@ -0,0 +1,112 @@
use std::{ffi::OsStr, path::Path};
use anyhow::{Context, Result};
use clap::{Arg, Command};
use redox_initfs::{InitFs, InodeKind};
fn main() -> Result<()> {
let matches = Command::new("redox-initfs-dump")
.about("dump initfs metadata")
.version(clap::crate_version!())
.author(clap::crate_authors!())
.arg(
Arg::new("IMAGE")
.required(true)
.help("specify the image to dump"),
)
.get_matches();
// TODO: support non-utf8 paths
let source = matches
.get_one::<String>("IMAGE")
.expect("expected the required arg IMAGE to exist");
let bytes = std::fs::read(source).context("failed to read image into memory")?;
let initfs = InitFs::new(&bytes, None).context("failed to parse initfs header")?;
println!("{:#?}", initfs.header());
for inode in initfs.all_inodes() {
print!("{:?}: ", inode);
let inode_struct = match initfs.get_inode(inode) {
Some(s) => s,
None => {
println!("failed to obtain.");
continue;
}
};
match inode_struct.kind() {
InodeKind::Unknown => println!("(unknown)"),
InodeKind::Dir(dir) => {
print!("dir{{");
let ec = match dir.entry_count().ok() {
Some(c) => c,
None => {
println!("(failed to get entry count)}}");
continue;
}
};
println!("entries=[");
for entry in 0..ec {
let entry = match dir.get_entry(entry).ok().flatten() {
Some(e) => e,
None => {
println!("\t(unknown),");
continue;
}
};
let name = match entry.name().ok() {
Some(name) => name,
None => {
println!("\t(unknown name),");
continue;
}
};
println!(
"\t`{}` => {:?},",
String::from_utf8_lossy(name),
entry.inode()
);
}
println!("]}}");
}
InodeKind::File(file) => {
print!("file{{");
match file.data().ok() {
Some(d) => {
use std::hash::Hasher;
let mut hasher = twox_hash::XxHash64::with_seed(0);
hasher.write(d);
print!("len={}, hash={:#0x}", d.len(), hasher.finish());
}
None => {
print!("(failed to get data)");
}
}
println!("}}");
}
InodeKind::Link(link) => {
print!("link{{");
match link.data().ok() {
Some(d) => {
use std::os::unix::ffi::OsStrExt;
print!("dst={}", Path::new(OsStr::from_bytes(d)).display());
}
None => {
print!("(failed to get data)");
}
}
println!("}}");
}
}
}
Ok(())
}
+589
View File
@@ -0,0 +1,589 @@
use std::convert::{TryFrom, TryInto};
use std::fs::{DirEntry, File, OpenOptions};
use std::io::{prelude::*, SeekFrom};
use std::path::{Path, PathBuf};
use std::os::unix::ffi::OsStrExt;
use std::os::unix::fs::{FileExt, FileTypeExt, PermissionsExt};
use anyhow::{anyhow, bail, Context, Result};
use redox_initfs::types as initfs;
const KIBIBYTE: u64 = 1024;
const MEBIBYTE: u64 = KIBIBYTE * 1024;
#[cfg(debug_assertions)]
pub const DEFAULT_MAX_SIZE: u64 = 256 * MEBIBYTE;
#[cfg(not(debug_assertions))]
pub const DEFAULT_MAX_SIZE: u64 = 64 * MEBIBYTE;
// FIXME make this configurable to handle systems with 16k and 64k pages.
const PAGE_SIZE: u16 = 4096;
pub enum EntryKind {
File { file: File, executable: bool },
Dir(Vec<Entry>),
Link(PathBuf),
}
pub struct Entry {
pub name: Vec<u8>,
pub kind: EntryKind,
}
struct State<'path> {
file: OutputImageGuard<'path>,
offset: u64,
max_size: u64,
buffer: Box<[u8]>,
inode_table: InodeTable,
}
fn write_all_at(file: &File, buf: &[u8], offset: u64, r#where: &str) -> Result<()> {
file.write_all_at(buf, offset)?;
log::trace!(
"Wrote {}..{} within {}",
offset,
offset + buf.len() as u64,
r#where
);
Ok(())
}
fn read_directory(path: &Path, root_path: &Path) -> Result<Vec<Entry>> {
let read_dir = path
.read_dir()
.with_context(|| anyhow!("failed to read directory `{}`", path.to_string_lossy(),))?;
let entries = read_dir
.map(|result| {
let entry = result.with_context(|| {
anyhow!(
"failed to get a directory entry from `{}`",
path.to_string_lossy(),
)
})?;
let metadata = entry.metadata().with_context(|| {
anyhow!(
"failed to get metadata for `{}`",
entry.path().to_string_lossy(),
)
})?;
let file_type = metadata.file_type();
let unsupported_type = |ty: &str, entry: &DirEntry| {
Err(anyhow!(
"failed to include {} at `{}`: not supported by redox-initfs",
ty,
entry.path().to_string_lossy()
))
};
let name = entry
.path()
.file_name()
.context("expected path to have a valid filename")?
.as_bytes()
.to_owned();
let entry_kind = if file_type.is_socket() {
return unsupported_type("socket", &entry);
} else if file_type.is_fifo() {
return unsupported_type("FIFO", &entry);
} else if file_type.is_block_device() {
return unsupported_type("block device", &entry);
} else if file_type.is_char_device() {
return unsupported_type("character device", &entry);
} else if file_type.is_file() {
let executable = metadata.permissions().mode() & 0o100 != 0;
EntryKind::File {
file: File::open(entry.path()).with_context(|| {
anyhow!("failed to open file `{}`", entry.path().to_string_lossy(),)
})?,
executable,
}
} else if file_type.is_dir() {
EntryKind::Dir(read_directory(&entry.path(), root_path)?)
} else if file_type.is_symlink() {
let link_file_path = entry.path();
let link_path = std::fs::read_link(&link_file_path)?;
let cannonical = if link_path.is_absolute() {
link_path.clone()
} else {
let Some(link_parent) = link_file_path.parent() else {
bail!("Link at `{}` has no parent", link_file_path.display())
};
link_parent.canonicalize()?.join(link_path.clone())
};
let dir_path = path
.canonicalize()
.context("Failed to cannonicalize path")?;
let path = pathdiff::diff_paths(cannonical, &dir_path).ok_or_else(|| {
anyhow!(
"Failed to diff symlink path [{}] to path [{}]",
link_path.display(),
dir_path.display()
)
})?;
EntryKind::Link(path)
} else {
return Err(anyhow!(
"unknown file type at `{}`",
entry.path().to_string_lossy()
));
};
Ok(Entry {
kind: entry_kind,
name,
})
})
.collect::<Result<Vec<_>>>()?;
Ok(entries)
}
fn bump_alloc(state: &mut State, size: u64, why: &str) -> Result<u64> {
let end = (state.offset + size).next_multiple_of(PAGE_SIZE.into());
if end <= state.max_size {
let offset = state.offset;
state.offset = end;
log::debug!("Allocating range {}..{} in {}", offset, state.offset, why);
Ok(offset)
} else {
Err(anyhow!("bump allocation failed: max limit reached"))
}
}
struct WriteResult {
size: u32,
offset: u32,
}
fn allocate_and_write_file(state: &mut State, mut file: &File) -> Result<WriteResult> {
let size = file
.seek(SeekFrom::End(0))
.context("failed to seek to end")?;
let size: u32 = size.try_into().context("file too large")?;
let offset: u32 = bump_alloc(state, size.into(), "allocate space for file")
.context("failed to allocate space for file")?
.try_into()
.context("file offset too high")?;
let buffer_size: u32 = state.buffer.len().try_into().context("buffer too large")?;
file.seek(SeekFrom::Start(0))
.context("failed to seek to start")?;
let mut relative_offset = 0;
// TODO: If this would ever turn out to be a bottleneck, then perhaps we could use
// copy_file_range in `nix`.
while relative_offset < size {
let allowed_length = std::cmp::min(buffer_size, size - relative_offset);
let allowed_length =
usize::try_from(allowed_length).expect("expected buffer size not to be outside usize");
file.read(&mut state.buffer[..allowed_length])
.context("failed to read from source file")?;
write_all_at(
&state.file,
&state.buffer[..allowed_length],
u64::from(offset + relative_offset),
"allocate_and_write_file buffer chunk",
)
.context("failed to write source file into destination image")?;
relative_offset += buffer_size;
}
Ok(WriteResult { size, offset })
}
fn allocate_and_write_link(state: &mut State, link: &Path) -> Result<WriteResult> {
let data = link.as_os_str().as_bytes();
let size: u32 = data.len().try_into().unwrap();
let offset: u32 = bump_alloc(state, size.into(), "allocate space for file")
.context("failed to allocate space for file")?
.try_into()
.context("file offset too high")?;
write_all_at(
&state.file,
data,
u64::from(offset),
"allocate_and_write_link target path",
)
.context("failed to write source file into destination image")?;
Ok(WriteResult { size, offset })
}
fn allocate_and_write_dir(state: &mut State, dir: &[Entry]) -> Result<WriteResult> {
let entry_size =
u16::try_from(std::mem::size_of::<initfs::DirEntry>()).context("entry size too large")?;
let entry_count = u16::try_from(dir.len()).context("too many subdirectories")?;
let entry_table_length = u32::from(entry_count)
.checked_mul(u32::from(entry_size))
.ok_or_else(|| anyhow!("entry table length too large when multiplying by size"))?;
let entry_table_offset: u32 =
bump_alloc(state, entry_table_length.into(), "allocate entry table")
.context("failed to allocate entry table")?
.try_into()
.context("directory entries offset too high")?;
for (index, entry) in dir.iter().enumerate() {
let (write_result, ty) = match entry.kind {
EntryKind::Dir(ref subdir) => {
let write_result = allocate_and_write_dir(state, subdir).with_context(|| {
anyhow!(
"failed to copy directory entries from `{}` into image",
String::from_utf8_lossy(&entry.name)
)
})?;
(write_result, initfs::InodeType::Dir)
}
EntryKind::File {
ref file,
executable,
} => {
let write_result = allocate_and_write_file(state, file)
.context("failed to copy file into image")?;
let type_ = if executable {
initfs::InodeType::ExecutableFile
} else {
initfs::InodeType::RegularFile
};
(write_result, type_)
}
EntryKind::Link(ref path) => {
let write_result = allocate_and_write_link(state, path)
.context("failed to copy symbolic link into image")?;
(write_result, initfs::InodeType::Link)
}
};
let index: u16 = index
.try_into()
.expect("expected dir entry count not to exceed u32");
let inode = state.inode_table.allocate(ty, write_result);
let (name_offset, name_len) = {
let name_len: u16 = entry.name.len().try_into().context("file name too long")?;
let offset: u32 = bump_alloc(state, u64::from(name_len), "allocate file name")
.context("failed to allocate space for file name")?
.try_into()
.context("file name offset too high up")?;
write_all_at(&state.file, &entry.name, offset.into(), "writing file name")
.context("failed to write file name")?;
(offset, name_len)
};
{
let mut direntry_buf = [0_u8; std::mem::size_of::<initfs::DirEntry>()];
let direntry = plain::from_mut_bytes::<initfs::DirEntry>(&mut direntry_buf)
.expect("expected dir entry struct to have alignment 1, and buffer size to match");
log::debug!(
"Linking inode {} into dir entry index {}, file name `{}`",
inode,
index,
String::from_utf8_lossy(&entry.name)
);
*direntry = initfs::DirEntry {
inode: inode.into(),
name_len: name_len.into(),
name_offset: initfs::Offset(name_offset.into()),
};
write_all_at(
&state.file,
&direntry_buf,
u64::from(entry_table_offset + u32::from(index) * u32::from(entry_size)),
"allocate_and_write_dir entry",
)
.context("failed to write dir entry struct to image")?;
}
}
Ok(WriteResult {
size: entry_table_length,
offset: entry_table_offset,
})
}
fn allocate_contents(state: &mut State, dir: &[Entry]) -> Result<initfs::U16> {
let write_result = allocate_and_write_dir(state, dir)
.context("failed to allocate and write all directories and files")?;
let root_inode = state
.inode_table
.allocate(initfs::InodeType::Dir, write_result);
Ok(root_inode.into())
}
struct InodeTable {
entries: Vec<initfs::InodeHeader>,
}
impl InodeTable {
fn new() -> Self {
Self { entries: vec![] }
}
fn count(&self) -> u16 {
self.entries
.len()
.try_into()
.expect("inode count too large")
}
fn allocate(&mut self, ty: initfs::InodeType, write_result: WriteResult) -> u16 {
let inode = self.entries.len();
self.entries.push(initfs::InodeHeader {
type_: (ty as u32).into(),
length: initfs::Length(write_result.size.into()),
offset: initfs::Offset(write_result.offset.into()),
});
inode.try_into().expect("inode count too large")
}
}
fn write_inode_table(state: &mut State) -> Result<initfs::Offset> {
log::debug!("there are {} inodes", state.inode_table.count());
let inode_size: u32 = std::mem::size_of::<initfs::InodeHeader>()
.try_into()
.expect("inode header length cannot fit within u32");
let inode_table_length = {
u64::from(inode_size)
.checked_mul(u64::from(state.inode_table.count()))
.ok_or_else(|| anyhow!("inode table too large"))?
};
let inode_table_offset = bump_alloc(state, inode_table_length, "allocate inode table")?;
let inode_table_offset =
u32::try_from(inode_table_offset).with_context(|| "inode table located too far away")?;
for (i, inode) in state.inode_table.entries.iter().enumerate() {
// TODO: Use main buffer and write in bulk.
let mut inode_buf = [0_u8; std::mem::size_of::<initfs::InodeHeader>()];
let inode_hdr = plain::from_mut_bytes::<initfs::InodeHeader>(&mut inode_buf)
.expect("expected inode struct to have alignment 1, and buffer size to match");
*inode_hdr = *inode;
log::debug!(
"Writing inode index {} from offset {}",
i,
inode_table_offset
);
write_all_at(
&state.file,
&inode_buf,
u64::from(inode_table_offset + u32::try_from(i).unwrap() * inode_size),
"write_inode",
)
.context("failed to write inode struct to disk image")?;
}
let inode_table_offset = initfs::Offset(inode_table_offset.into());
Ok(inode_table_offset)
}
struct OutputImageGuard<'a> {
file: File,
path: &'a Path,
ok: bool,
}
impl std::ops::Deref for OutputImageGuard<'_> {
type Target = File;
fn deref(&self) -> &Self::Target {
&self.file
}
}
impl std::ops::DerefMut for OutputImageGuard<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.file
}
}
impl Drop for OutputImageGuard<'_> {
fn drop(&mut self) {
if !self.ok {
let _ = std::fs::remove_file(self.path);
}
}
}
pub struct Args<'a> {
pub destination_path: &'a Path,
pub max_size: u64,
pub source: &'a Path,
pub bootstrap_code: &'a Path,
}
pub fn archive(
&Args {
destination_path,
max_size,
source,
bootstrap_code,
}: &Args,
) -> Result<()> {
let root_path = source;
let root = read_directory(root_path, root_path).context("failed to read root")?;
build_initfs(destination_path, max_size, bootstrap_code, root)
}
pub fn build_initfs(
destination_path: &Path,
max_size: u64,
bootstrap_code: &Path,
root: Vec<Entry>,
) -> std::result::Result<(), anyhow::Error> {
let previous_extension = destination_path.extension().map_or("", |ext| {
ext.to_str()
.expect("expected destination path to be valid UTF-8")
});
if !destination_path
.metadata()
.map_or(true, |metadata| metadata.is_file())
{
return Err(anyhow!("Destination file must be a file"));
}
let destination_temp_path =
destination_path.with_extension(format!("{}.partial", previous_extension));
let destination_temp_file = OpenOptions::new()
.read(false)
.write(true)
.create(true)
.truncate(true)
.create_new(false)
.open(&destination_temp_path)
.context("failed to open destination file")?;
let guard = OutputImageGuard {
file: destination_temp_file,
path: &destination_temp_path,
ok: false,
};
const BUFFER_SIZE: usize = 8192;
let mut state = State {
file: guard,
offset: 0,
max_size,
buffer: vec![0_u8; BUFFER_SIZE].into_boxed_slice(),
inode_table: InodeTable::new(),
};
// NOTE: The header is always stored at offset zero.
let header_offset = bump_alloc(&mut state, 4096, "allocate header")?;
assert_eq!(header_offset, 0);
allocate_and_write_file(
&mut state,
&File::open(bootstrap_code).with_context(|| {
anyhow!(
"failed to open bootstrap code file `{}`",
bootstrap_code.to_string_lossy(),
)
})?,
)?;
let bootstrap_data = std::fs::read(bootstrap_code).with_context(|| {
anyhow!(
"failed to read bootstrap code file `{}`",
bootstrap_code.to_string_lossy(),
)
})?;
let bootstrap_entry = elf_entry(&bootstrap_data);
let root_inode = allocate_contents(&mut state, &root)?;
let inode_table_offset = write_inode_table(&mut state)?;
{
let mut header_bytes = [0_u8; std::mem::size_of::<initfs::Header>()];
let header = plain::from_mut_bytes(&mut header_bytes)
.expect("expected header size to be sufficient and alignment to be 1");
*header = initfs::Header {
magic: initfs::Magic(initfs::MAGIC),
inode_count: state.inode_table.count().into(),
inode_table_offset,
bootstrap_entry: bootstrap_entry.into(),
initfs_size: state
.file
.metadata()
.context("failed to get initfs size")?
.len()
.into(),
page_size: PAGE_SIZE.into(),
root_inode,
};
write_all_at(&state.file, &header_bytes, header_offset, "writing header")
.context("failed to write header")?;
}
std::fs::rename(&destination_temp_path, destination_path)
.context("failed to rename output image")?;
state.file.ok = true;
Ok(())
}
fn elf_entry(data: &[u8]) -> u64 {
assert!(&data[..4] == b"\x7FELF");
match (data[4], data[5]) {
// 32-bit, little endian
(1, 1) => u32::from_le_bytes(
<[u8; 4]>::try_from(&data[0x18..0x18 + 4]).expect("conversion cannot fail"),
) as u64,
// 32-bit, big endian
(1, 2) => u32::from_be_bytes(
<[u8; 4]>::try_from(&data[0x18..0x18 + 4]).expect("conversion cannot fail"),
) as u64,
// 64-bit, little endian
(2, 1) => u64::from_le_bytes(
<[u8; 8]>::try_from(&data[0x18..0x18 + 8]).expect("conversion cannot fail"),
),
// 64-bit, big endian
(2, 2) => u64::from_be_bytes(
<[u8; 8]>::try_from(&data[0x18..0x18 + 8]).expect("conversion cannot fail"),
),
(ei_class, ei_data) => {
panic!("Unsupported ELF EI_CLASS {} EI_DATA {}", ei_class, ei_data);
}
}
}
@@ -0,0 +1,112 @@
use std::{collections::HashMap, path::Path};
use anyhow::{anyhow, Context, Result};
use redox_initfs::{InitFs, InodeKind, InodeStruct};
#[derive(Debug, Clone, PartialEq)]
enum Node {
Link { to: Vec<u8> },
File { data: Vec<u8> },
Dir(HashMap<Vec<u8>, Node>),
Unknown,
}
impl Node {
fn link(to: impl Into<Vec<u8>>) -> Self {
Node::Link { to: to.into() }
}
fn file(data: impl Into<Vec<u8>>) -> Self {
Node::File { data: data.into() }
}
fn dir(entries: impl IntoIterator<Item = (impl Into<Vec<u8>>, Node)>) -> Self {
Self::Dir(
entries
.into_iter()
.map(|(name, node)| (name.into(), node))
.collect(),
)
}
}
fn build_tree<'a>(fs: InitFs<'a>, inode: InodeStruct<'a>) -> anyhow::Result<Node> {
use InodeKind::*;
let node = match inode.kind() {
File(file) => {
let data = file.data().context("failed to get file data")?.to_owned();
Node::File { data }
}
Link(link) => {
let data = link.data().context("failed to get link data")?.to_owned();
Node::Link { to: data }
}
Dir(dir) => {
let mut entries = HashMap::new();
for idx in 0..dir
.entry_count()
.context("failed to get inode entry count")?
{
let entry = dir
.get_entry(idx)
.context("failed to get entry for index")?
.ok_or_else(|| anyhow!("no entry found"))?;
let entry_name = entry.name().context("failed to get entry name")?;
let inode = fs
.get_inode(entry.inode())
.context("failed to load file inode")?;
let entry_node = build_tree(fs, inode)?;
entries.insert(entry_name.to_owned(), entry_node);
}
Node::Dir(entries)
}
Unknown => Node::Unknown,
};
Ok(node)
}
#[test]
fn archive_and_read() -> Result<()> {
env_logger::init();
let args = redox_initfs_tools::Args {
destination_path: &Path::new(env!("CARGO_TARGET_TMPDIR")).join("out.img"),
source: Path::new("data"),
bootstrap_code: Path::new("data/foo/bootstrap.elf"),
max_size: redox_initfs_tools::DEFAULT_MAX_SIZE,
};
redox_initfs_tools::archive(&args).context("failed to archive")?;
let data = std::fs::read(args.destination_path).context("failed to read new archive")?;
let filesystem =
redox_initfs::InitFs::new(&data, None).context("failed to parse archive header")?;
let inode = filesystem
.get_inode(filesystem.root_inode())
.ok_or_else(|| anyhow!("Failed to get root inode"))?;
let tree = build_tree(filesystem, inode)?;
let reference_tree = Node::dir([(
b"foo",
Node::dir([
(
b"bootstrap.elf".as_slice(),
Node::file("\x7FELF\x01\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"),
),
(b"file-link.txt".as_slice(), Node::link(b"file.txt")),
(
b"file.txt".as_slice(),
Node::file(b"This is a file meant to be used in a redox-initfs test.\n"),
),
]),
)]);
assert_eq!(tree, reference_tree);
Ok(())
}