Red Bear OS — microkernel OS in Rust, based on Redox
Derivative of Redox OS (https://www.redox-os.org) adding: - AMD GPU driver (amdgpu) via LinuxKPI compat layer - ext4 filesystem support (ext4d scheme daemon) - ACPI fixes for AMD bare metal (x2APIC, DMAR, IVRS, MCFG) - Custom branding (hostname, os-release, boot identity) Build system is full upstream Redox with RBOS overlay in local/. Patches for kernel, base, and relibc are symlinked from local/patches/ and protected from make clean/distclean. Custom recipes live in local/recipes/ with symlinks into the recipes/ search path. Build: make all CONFIG_NAME=redbear-full Sync: ./local/scripts/sync-upstream.sh
This commit is contained in:
@@ -0,0 +1,758 @@
|
||||
use pkg::PackageError;
|
||||
use pkg::{Package, PackageName};
|
||||
|
||||
use crate::config::CookConfig;
|
||||
use crate::cook::package::{package_source_paths, package_target};
|
||||
use crate::cook::pty::PtyOut;
|
||||
use crate::cook::script::*;
|
||||
use crate::cook::{fetch, fs::*};
|
||||
use crate::recipe::Recipe;
|
||||
use crate::recipe::{AutoDeps, CookRecipe};
|
||||
use crate::recipe::{BuildKind, OptionalPackageRecipe};
|
||||
use std::collections::VecDeque;
|
||||
use std::{
|
||||
collections::BTreeSet,
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
process::Command,
|
||||
str,
|
||||
time::SystemTime,
|
||||
};
|
||||
|
||||
use crate::{is_redox, log_to_pty};
|
||||
|
||||
fn auto_deps_from_dynamic_linking(
|
||||
stage_dirs: &[PathBuf],
|
||||
dep_pkgars: &BTreeSet<(PackageName, PathBuf)>,
|
||||
logger: &PtyOut,
|
||||
) -> BTreeSet<PackageName> {
|
||||
let mut paths = BTreeSet::new();
|
||||
let mut visited = BTreeSet::new();
|
||||
let verbose = crate::config::get_config().cook.verbose;
|
||||
// Base directories may need to be updated for packages that place binaries in odd locations.
|
||||
let mut walk = VecDeque::new();
|
||||
|
||||
for stage_dir in stage_dirs {
|
||||
walk.push_back((stage_dir, stage_dir.join("usr/bin")));
|
||||
walk.push_back((stage_dir, stage_dir.join("usr/games")));
|
||||
walk.push_back((stage_dir, stage_dir.join("usr/lib")));
|
||||
walk.push_back((stage_dir, stage_dir.join("usr/libexec")));
|
||||
}
|
||||
|
||||
// Recursively (DFS) walk each directory to ensure nested libs and bins are checked.
|
||||
while let Some((rel_path, dir)) = walk.pop_front() {
|
||||
let Ok(dir) = dir.canonicalize() else {
|
||||
continue;
|
||||
};
|
||||
if visited.contains(&dir) {
|
||||
#[cfg(debug_assertions)]
|
||||
log_to_pty!(
|
||||
logger,
|
||||
"DEBUG: auto_deps => Skipping `{dir:?}` (already visited)"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
assert!(
|
||||
visited.insert(dir.clone()),
|
||||
"Directory `{:?}` should not be in visited\nVisited: {:#?}",
|
||||
dir,
|
||||
visited
|
||||
);
|
||||
|
||||
let Ok(read_dir) = fs::read_dir(&dir) else {
|
||||
continue;
|
||||
};
|
||||
for entry_res in read_dir {
|
||||
let Ok(entry) = entry_res else { continue };
|
||||
let Ok(file_type) = entry.file_type() else {
|
||||
continue;
|
||||
};
|
||||
if file_type.is_file() {
|
||||
paths.insert((rel_path, entry.path()));
|
||||
} else if file_type.is_dir() {
|
||||
walk.push_front((rel_path, entry.path()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut needed = BTreeSet::new();
|
||||
for (rel_path, path) in paths {
|
||||
let Ok(file) = fs::File::open(&path) else {
|
||||
continue;
|
||||
};
|
||||
let read_cache = object::ReadCache::new(file);
|
||||
let Ok(object) = object::build::elf::Builder::read(&read_cache) else {
|
||||
continue;
|
||||
};
|
||||
let Some(dynamic_data) = object.dynamic_data() else {
|
||||
continue;
|
||||
};
|
||||
for dynamic in dynamic_data {
|
||||
let object::build::elf::Dynamic::String { tag, val } = dynamic else {
|
||||
continue;
|
||||
};
|
||||
if *tag == object::elf::DT_NEEDED {
|
||||
let Ok(name) = str::from_utf8(val) else {
|
||||
continue;
|
||||
};
|
||||
if let Ok(relative_path) = path.strip_prefix(rel_path) {
|
||||
if verbose {
|
||||
log_to_pty!(logger, "DEBUG: {} needs {}", relative_path.display(), name);
|
||||
}
|
||||
}
|
||||
needed.insert(name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut missing = needed.clone();
|
||||
// relibc and friends will always be installed
|
||||
for preinstalled in &["libc.so.6", "libgcc_s.so.1", "libstdc++.so.6"] {
|
||||
missing.remove(*preinstalled);
|
||||
}
|
||||
|
||||
let mut deps = BTreeSet::new();
|
||||
if let Ok(key_file) = pkgar_keys::PublicKeyFile::open("build/id_ed25519.pub.toml") {
|
||||
for (dep, archive_path) in dep_pkgars.iter() {
|
||||
let Ok(mut package) = pkgar::PackageFile::new(archive_path, &key_file.pkey) else {
|
||||
continue;
|
||||
};
|
||||
let Ok(entries) = pkgar_core::PackageSrc::read_entries(&mut package) else {
|
||||
continue;
|
||||
};
|
||||
for entry in entries {
|
||||
let Ok(entry_path) = pkgar::ext::EntryExt::check_path(&entry) else {
|
||||
continue;
|
||||
};
|
||||
for prefix in &["lib", "usr/lib"] {
|
||||
let Ok(child_path) = entry_path.strip_prefix(prefix) else {
|
||||
continue;
|
||||
};
|
||||
let Some(child_name) = child_path.to_str() else {
|
||||
continue;
|
||||
};
|
||||
if needed.contains(child_name) {
|
||||
if verbose {
|
||||
log_to_pty!(logger, "DEBUG: {} provides {}", dep, child_name);
|
||||
}
|
||||
deps.insert(dep.with_prefix(pkg::PackagePrefix::Any));
|
||||
missing.remove(child_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if verbose {
|
||||
for name in missing {
|
||||
log_to_pty!(logger, "INFO: {} missing", name);
|
||||
}
|
||||
}
|
||||
|
||||
deps
|
||||
}
|
||||
|
||||
fn auto_deps_from_static_package_deps(
|
||||
build_dep_pkgars: &BTreeSet<(PackageName, PathBuf)>,
|
||||
dynamic_dep_pkgars: &BTreeSet<PackageName>,
|
||||
) -> Result<BTreeSet<PackageName>, PackageError> {
|
||||
let static_dep_pkgars: Vec<PackageName> = build_dep_pkgars
|
||||
.iter()
|
||||
.map(|x| x.0.clone())
|
||||
.filter(|x| !dynamic_dep_pkgars.contains(x))
|
||||
.collect();
|
||||
let pkgs = CookRecipe::get_package_deps_recursive(&static_dep_pkgars, false)?;
|
||||
|
||||
Ok(pkgs.into_iter().collect())
|
||||
}
|
||||
|
||||
pub struct BuildResult {
|
||||
pub stage_dirs: Vec<PathBuf>,
|
||||
pub auto_deps: BTreeSet<PackageName>,
|
||||
pub cached: bool,
|
||||
}
|
||||
|
||||
impl BuildResult {
|
||||
pub fn new(stage_dirs: Vec<PathBuf>, auto_deps: BTreeSet<PackageName>) -> Self {
|
||||
BuildResult {
|
||||
stage_dirs,
|
||||
auto_deps,
|
||||
cached: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cached(stage_dirs: Vec<PathBuf>, auto_deps: BTreeSet<PackageName>) -> Self {
|
||||
BuildResult {
|
||||
stage_dirs,
|
||||
auto_deps,
|
||||
cached: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build(
|
||||
recipe_dir: &Path,
|
||||
source_dir: &Path,
|
||||
target_dir: &Path,
|
||||
cook_recipe: &CookRecipe,
|
||||
cook_config: &CookConfig,
|
||||
logger: &PtyOut,
|
||||
) -> Result<BuildResult, String> {
|
||||
let recipe = &cook_recipe.recipe;
|
||||
let name = &cook_recipe.name;
|
||||
let check_source = !cook_recipe.is_deps;
|
||||
let sysroot_dir = get_sub_target_dir(target_dir, "sysroot");
|
||||
let toolchain_dir = get_sub_target_dir(target_dir, "toolchain");
|
||||
let auto_deps_file = get_sub_target_dir(target_dir, "auto_deps.toml");
|
||||
let stage_dirs = get_stage_dirs(&recipe.optional_packages, target_dir);
|
||||
let stage_pkgars: Vec<PathBuf> = stage_dirs
|
||||
.iter()
|
||||
.map(|p| p.with_added_extension("pkgar"))
|
||||
.collect();
|
||||
let cli_verbose = cook_config.verbose;
|
||||
let cli_jobs = cook_config.jobs;
|
||||
if recipe.build.kind == BuildKind::None {
|
||||
// metapackages don't need to do anything here
|
||||
return Ok(BuildResult::new(stage_dirs, BTreeSet::new()));
|
||||
}
|
||||
|
||||
let mut dep_pkgars = BTreeSet::new();
|
||||
let mut dep_host_pkgars = BTreeSet::new();
|
||||
let build_deps = [
|
||||
&recipe.build.dependencies[..],
|
||||
&recipe.build.dev_dependencies[..],
|
||||
]
|
||||
.concat();
|
||||
let build_deps =
|
||||
CookRecipe::get_build_deps_recursive(&build_deps, false).map_err(|e| format!("{:?}", e))?;
|
||||
for dependency in build_deps.iter() {
|
||||
let (_, pkgar, _) = dependency.stage_paths();
|
||||
if dependency.name.is_host() {
|
||||
dep_host_pkgars.insert((dependency.name.clone(), pkgar));
|
||||
} else {
|
||||
dep_pkgars.insert((dependency.name.clone(), pkgar));
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! make_auto_deps {
|
||||
($cached:expr) => {
|
||||
build_auto_deps(
|
||||
recipe,
|
||||
&auto_deps_file,
|
||||
&stage_dirs,
|
||||
$cached,
|
||||
cook_config,
|
||||
dep_pkgars,
|
||||
logger,
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
if !check_source {
|
||||
// TODO: when stage_dirs does not exist due to clean_target was true, extract from stage.pkgar?
|
||||
let stage_present = stage_pkgars.iter().all(|file| file.is_file());
|
||||
if stage_present && auto_deps_file.is_file() {
|
||||
if cli_verbose {
|
||||
log_to_pty!(logger, "DEBUG: using cached build, not checking source");
|
||||
}
|
||||
let auto_deps = make_auto_deps!(true)?;
|
||||
return Ok(BuildResult::cached(stage_dirs, auto_deps));
|
||||
}
|
||||
}
|
||||
|
||||
let mut source_modified = modified_dir_ignore_git(source_dir).unwrap_or(SystemTime::UNIX_EPOCH);
|
||||
if let Ok(recipe_modified) = modified(&recipe_dir.join("recipe.toml")) {
|
||||
if recipe_modified > source_modified {
|
||||
source_modified = recipe_modified
|
||||
}
|
||||
}
|
||||
|
||||
let deps_modified = modified_all_btree(
|
||||
dep_pkgars.iter().map(|(_dep, pkgar)| pkgar.as_path()),
|
||||
modified,
|
||||
)?;
|
||||
let deps_host_modified = modified_all_btree(
|
||||
dep_host_pkgars.iter().map(|(_dep, pkgar)| pkgar.as_path()),
|
||||
modified,
|
||||
)?;
|
||||
|
||||
// check stage dir modified against pkgar files, any files missing will result in UNIX_EPOCH
|
||||
let stage_modified = modified_all(&stage_pkgars, modified).unwrap_or(SystemTime::UNIX_EPOCH);
|
||||
// Rebuild stage if source is newer
|
||||
if stage_modified < source_modified
|
||||
|| stage_modified < deps_modified
|
||||
|| stage_modified < deps_host_modified
|
||||
|| !auto_deps_file.is_file()
|
||||
{
|
||||
for stage_dir in &stage_dirs {
|
||||
if stage_dir.is_dir() {
|
||||
log_to_pty!(logger, "DEBUG: updating '{}'", stage_dir.display());
|
||||
remove_stage_dir(stage_dir)?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if cli_verbose {
|
||||
log_to_pty!(logger, "DEBUG: using cached build");
|
||||
}
|
||||
// stop early otherwise we'll end up rebuilding
|
||||
let auto_deps = make_auto_deps!(true)?;
|
||||
return Ok(BuildResult::cached(stage_dirs, auto_deps));
|
||||
}
|
||||
|
||||
// Rebuild sysroot if source is newer
|
||||
if recipe.build.kind != BuildKind::Remote {
|
||||
let updated = build_deps_dir(
|
||||
logger,
|
||||
&sysroot_dir,
|
||||
if name.is_host() {
|
||||
&dep_host_pkgars
|
||||
} else {
|
||||
&dep_pkgars
|
||||
},
|
||||
source_modified,
|
||||
deps_modified,
|
||||
)?;
|
||||
if cli_verbose && !updated {
|
||||
log_to_pty!(logger, "DEBUG: using cached sysroot");
|
||||
}
|
||||
}
|
||||
if recipe.build.kind != BuildKind::Remote && !name.is_host() && dep_host_pkgars.len() > 0 {
|
||||
let updated = build_deps_dir(
|
||||
logger,
|
||||
&toolchain_dir,
|
||||
&dep_host_pkgars,
|
||||
source_modified,
|
||||
deps_host_modified,
|
||||
)?;
|
||||
if cli_verbose && !updated {
|
||||
log_to_pty!(logger, "DEBUG: using cached toolchain");
|
||||
}
|
||||
}
|
||||
|
||||
let stage_dir = stage_dirs
|
||||
.last()
|
||||
.expect("Should have atleast one stage dir");
|
||||
let build_dir = get_sub_target_dir(target_dir, "build");
|
||||
if !stage_dir.is_dir() {
|
||||
// Create stage.tmp
|
||||
let stage_dir_tmp = target_dir.join("stage.tmp");
|
||||
create_dir_clean(&stage_dir_tmp)?;
|
||||
|
||||
// Create build dir, if it does not exist
|
||||
if cook_config.clean_build || !build_dir.is_dir() {
|
||||
create_dir_clean(&build_dir)?;
|
||||
}
|
||||
|
||||
let flags_fn = |name, flags: &Vec<String>| {
|
||||
format!(
|
||||
"{name}+=(\n{}\n)\n",
|
||||
flags
|
||||
.iter()
|
||||
.map(|s| format!(" \"{s}\""))
|
||||
.collect::<Vec<String>>()
|
||||
.join("\n")
|
||||
)
|
||||
};
|
||||
|
||||
if recipe.build.kind == BuildKind::Remote {
|
||||
return build_remote(stage_dirs, recipe, target_dir, cook_config);
|
||||
}
|
||||
|
||||
let mut allow_cargo_offline = false;
|
||||
//TODO: better integration with redoxer (library instead of binary)
|
||||
//TODO: configurable target
|
||||
//TODO: Add more configurability, convert scripts to Rust?
|
||||
let script = match &recipe.build.kind {
|
||||
BuildKind::Cargo {
|
||||
cargopath,
|
||||
cargoflags,
|
||||
cargopackages,
|
||||
cargoexamples,
|
||||
} => {
|
||||
allow_cargo_offline = true;
|
||||
let mut script = format!(
|
||||
"DYNAMIC_INIT\n{}\nCOOKBOOK_CARGO_PATH={} ",
|
||||
flags_fn("COOKBOOK_CARGO_FLAGS", cargoflags),
|
||||
cargopath.as_deref().unwrap_or(".")
|
||||
);
|
||||
if cargopackages.len() == 0 && cargoexamples.len() == 0 {
|
||||
script += "cookbook_cargo\n"
|
||||
} else {
|
||||
if cargopackages.len() > 0 {
|
||||
script += "cookbook_cargo_packages";
|
||||
for package in cargopackages {
|
||||
script += " ";
|
||||
script += package;
|
||||
}
|
||||
script += "\n";
|
||||
}
|
||||
if cargoexamples.len() > 0 {
|
||||
script += "cookbook_cargo_examples";
|
||||
for example in cargoexamples {
|
||||
script += " ";
|
||||
script += example;
|
||||
}
|
||||
script += "\n";
|
||||
}
|
||||
}
|
||||
|
||||
script
|
||||
}
|
||||
BuildKind::Configure { configureflags } => format!(
|
||||
"DYNAMIC_INIT\n{}cookbook_configure",
|
||||
flags_fn("COOKBOOK_CONFIGURE_FLAGS", configureflags),
|
||||
),
|
||||
BuildKind::Cmake { cmakeflags } => format!(
|
||||
"DYNAMIC_INIT\n{}cookbook_cmake",
|
||||
flags_fn("COOKBOOK_CMAKE_FLAGS", cmakeflags),
|
||||
),
|
||||
BuildKind::Meson { mesonflags } => format!(
|
||||
"DYNAMIC_INIT\n{}cookbook_meson",
|
||||
flags_fn("COOKBOOK_MESON_FLAGS", mesonflags),
|
||||
),
|
||||
BuildKind::Custom { script } => script.clone(),
|
||||
BuildKind::Remote => unreachable!(),
|
||||
BuildKind::None => "".to_owned(),
|
||||
};
|
||||
|
||||
let command = {
|
||||
//TODO: remove unwraps
|
||||
let cookbook_build = build_dir.canonicalize().unwrap();
|
||||
let cookbook_recipe = recipe_dir.canonicalize().unwrap();
|
||||
let cookbook_root = Path::new(".").canonicalize().unwrap();
|
||||
let cookbook_stage = stage_dir_tmp.canonicalize().unwrap();
|
||||
let cookbook_source = source_dir.canonicalize().unwrap();
|
||||
let cookbook_sysroot = sysroot_dir.canonicalize().unwrap();
|
||||
let cookbook_toolchain = toolchain_dir.canonicalize().ok();
|
||||
let bash_args = if cli_verbose { "-ex" } else { "-e" };
|
||||
let local_redoxer = Path::new("target/release/cookbook_rbos_redoxer");
|
||||
let mut command = if is_redox() && !local_redoxer.is_file() {
|
||||
let mut command = Command::new("cookbook_rbos_redoxer");
|
||||
command.env("COOKBOOK_REDOXER", "cookbook_rbos_redoxer");
|
||||
command
|
||||
} else {
|
||||
let cookbook_redoxer = local_redoxer
|
||||
.canonicalize()
|
||||
.unwrap_or(PathBuf::from("/bin/false"));
|
||||
let mut command = Command::new(&cookbook_redoxer);
|
||||
command.env("COOKBOOK_REDOXER", &cookbook_redoxer);
|
||||
command
|
||||
};
|
||||
command.arg("env").arg("bash").arg(bash_args);
|
||||
command.current_dir(&cookbook_build);
|
||||
command.env("TARGET", package_target(name));
|
||||
command.env("COOKBOOK_BUILD", &cookbook_build);
|
||||
command.env("COOKBOOK_NAME", name.name());
|
||||
command.env("COOKBOOK_HOST_TARGET", redoxer::host_target());
|
||||
command.env("COOKBOOK_RECIPE", &cookbook_recipe);
|
||||
command.env("COOKBOOK_ROOT", &cookbook_root);
|
||||
command.env("COOKBOOK_STAGE", &cookbook_stage);
|
||||
command.env("COOKBOOK_SOURCE", &cookbook_source);
|
||||
command.env("COOKBOOK_SYSROOT", &cookbook_sysroot);
|
||||
if let Some(cookbook_toolchain) = &cookbook_toolchain {
|
||||
command.env("COOKBOOK_TOOLCHAIN", cookbook_toolchain);
|
||||
} else if name.is_host() {
|
||||
command.env("COOKBOOK_TOOLCHAIN", &cookbook_sysroot);
|
||||
}
|
||||
command.env("COOKBOOK_MAKE_JOBS", cli_jobs.to_string());
|
||||
if cli_verbose {
|
||||
command.env("COOKBOOK_VERBOSE", "1");
|
||||
}
|
||||
if cook_config.offline && allow_cargo_offline {
|
||||
command.env("COOKBOOK_OFFLINE", "1");
|
||||
} else {
|
||||
command.env_remove("COOKBOOK_OFFLINE");
|
||||
}
|
||||
if let Ok(ident_source) = fetch::fetch_get_source_info(&cook_recipe) {
|
||||
command.env("COOKBOOK_SOURCE_IDENT", ident_source.source_identifier);
|
||||
command.env("COOKBOOK_COMMIT_IDENT", ident_source.commit_identifier);
|
||||
}
|
||||
command
|
||||
};
|
||||
|
||||
let full_script = format!(
|
||||
"{}\n{}\n{}\n{}",
|
||||
BUILD_PRESCRIPT, SHARED_PRESCRIPT, script, BUILD_POSTSCRIPT
|
||||
);
|
||||
run_command_stdin(command, full_script.as_bytes(), logger)?;
|
||||
|
||||
// Move to each features dir
|
||||
let mut globs = Vec::new();
|
||||
for (i, feat) in recipe.optional_packages.iter().enumerate() {
|
||||
let stage_dir = &stage_dirs[i];
|
||||
create_dir_clean(&stage_dir)?;
|
||||
for path in &feat.files {
|
||||
let glob = globset::Glob::new(&path).map_err(|e| format!("{}", e))?;
|
||||
globs.push((glob.compile_matcher(), stage_dir.clone()));
|
||||
}
|
||||
}
|
||||
move_dir_all_fn(
|
||||
&stage_dir_tmp,
|
||||
&Box::new(|path: PathBuf| {
|
||||
for (glob, dst) in &globs {
|
||||
if glob.is_match(&path) {
|
||||
return Some(dst.as_path());
|
||||
}
|
||||
}
|
||||
None
|
||||
}),
|
||||
)
|
||||
.map_err(|e| format!("Unable to move {e:?}"))?;
|
||||
|
||||
// Move stage.tmp to stage atomically
|
||||
rename(&stage_dir_tmp, &stage_dir)?;
|
||||
}
|
||||
|
||||
if cook_config.clean_target {
|
||||
remove_all(&build_dir)?;
|
||||
remove_all(&sysroot_dir)?;
|
||||
if toolchain_dir.is_dir() {
|
||||
remove_all(&toolchain_dir)?;
|
||||
}
|
||||
// don't remove stage dir yet
|
||||
}
|
||||
|
||||
let auto_deps = make_auto_deps!(false)?;
|
||||
Ok(BuildResult::new(stage_dirs, auto_deps))
|
||||
}
|
||||
|
||||
pub fn remove_stage_dir(stage_dir: &PathBuf) -> crate::Result<()> {
|
||||
if stage_dir.is_dir() {
|
||||
remove_all(&stage_dir)?;
|
||||
}
|
||||
let stage_file = stage_dir.with_added_extension("pkgar");
|
||||
if stage_file.is_file() {
|
||||
remove_all(&stage_file)?;
|
||||
}
|
||||
let stage_meta = stage_dir.with_added_extension("toml");
|
||||
if stage_meta.is_file() {
|
||||
remove_all(&stage_meta)?;
|
||||
}
|
||||
let stage_files = stage_dir.with_added_extension("files");
|
||||
if stage_files.is_file() {
|
||||
remove_all(&stage_files)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_stage_dirs(features: &Vec<OptionalPackageRecipe>, target_dir: &Path) -> Vec<PathBuf> {
|
||||
let mut target_dir = target_dir.to_path_buf();
|
||||
if let Some(cross_target) = crate::cross_target() {
|
||||
// TODO: automatically pass COOKBOOK_CROSS_GNU_TARGET?
|
||||
target_dir = target_dir.join(cross_target)
|
||||
}
|
||||
let mut v = Vec::new();
|
||||
for f in features {
|
||||
v.push(target_dir.join(format!("stage.{}", f.name)));
|
||||
}
|
||||
// intentionally added last as it contains leftover files from package features
|
||||
v.push(target_dir.join("stage"));
|
||||
v
|
||||
}
|
||||
|
||||
pub fn get_sub_target_dir(target_dir: &Path, sub_path: &str) -> PathBuf {
|
||||
let mut target_dir = target_dir.to_path_buf();
|
||||
if let Some(cross_target) = crate::cross_target() {
|
||||
// TODO: automatically pass COOKBOOK_CROSS_GNU_TARGET?
|
||||
target_dir = target_dir.join(cross_target)
|
||||
}
|
||||
target_dir.join(sub_path)
|
||||
}
|
||||
|
||||
fn build_deps_dir(
|
||||
logger: &PtyOut,
|
||||
deps_dir: &PathBuf,
|
||||
dep_pkgars: &BTreeSet<(PackageName, PathBuf)>,
|
||||
source_modified: SystemTime,
|
||||
deps_modified: SystemTime,
|
||||
) -> Result<bool, String> {
|
||||
let deps_dir_tmp = deps_dir.with_added_extension("tmp");
|
||||
if deps_dir.is_dir() {
|
||||
let tags_dir = deps_dir.join(".tags");
|
||||
let sysroot_modified = modified_dir(&tags_dir).unwrap_or(SystemTime::UNIX_EPOCH);
|
||||
if sysroot_modified < source_modified
|
||||
|| sysroot_modified < deps_modified
|
||||
|| !check_files_present(
|
||||
&tags_dir,
|
||||
&dep_pkgars
|
||||
.iter()
|
||||
.map(|(name, _)| name.without_prefix())
|
||||
.collect(),
|
||||
)?
|
||||
{
|
||||
log_to_pty!(logger, "DEBUG: updating '{}'", deps_dir.display());
|
||||
remove_all(deps_dir)?;
|
||||
}
|
||||
}
|
||||
if !deps_dir.is_dir() {
|
||||
// Create sysroot.tmp
|
||||
create_dir_clean(&deps_dir_tmp)?;
|
||||
let tags_dir = deps_dir_tmp.join(".tags");
|
||||
let usr_dir = deps_dir_tmp.join("usr");
|
||||
create_dir(&tags_dir)?;
|
||||
create_dir(&usr_dir)?;
|
||||
|
||||
for folder in &["bin", "include", "lib", "share"] {
|
||||
// Make sure sysroot/usr/$folder exists
|
||||
create_dir(&usr_dir.join(folder))?;
|
||||
|
||||
// Link sysroot/$folder sysroot/usr/$folder
|
||||
symlink(Path::new("usr").join(folder), &deps_dir_tmp.join(folder))?;
|
||||
}
|
||||
|
||||
let pkey_path = "build/id_ed25519.pub.toml";
|
||||
for (name, archive_path) in dep_pkgars {
|
||||
let tag_file = tags_dir.join(name.without_prefix());
|
||||
fs::write(&tag_file, "")
|
||||
.map_err(|e| format!("failed to write tag file {}: {:?}", tag_file.display(), e))?;
|
||||
pkgar::extract(pkey_path, &archive_path, deps_dir_tmp.to_str().unwrap()).map_err(
|
||||
|err| {
|
||||
format!(
|
||||
"failed to install '{}' in '{}': {:?}",
|
||||
archive_path.display(),
|
||||
deps_dir_tmp.display(),
|
||||
err
|
||||
)
|
||||
},
|
||||
)?;
|
||||
}
|
||||
|
||||
// Move sysroot.tmp to sysroot atomically
|
||||
rename(&deps_dir_tmp, deps_dir)?;
|
||||
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/// Calculate automatic dependencies
|
||||
fn build_auto_deps(
|
||||
recipe: &Recipe,
|
||||
auto_deps_path: &Path,
|
||||
stage_dirs: &Vec<PathBuf>,
|
||||
cached: bool,
|
||||
cook_config: &CookConfig,
|
||||
mut dep_pkgars: BTreeSet<(PackageName, PathBuf)>,
|
||||
logger: &PtyOut,
|
||||
) -> Result<BTreeSet<PackageName>, String> {
|
||||
if auto_deps_path.is_file() && !cached {
|
||||
if cook_config.verbose {
|
||||
log_to_pty!(logger, "DEBUG: updating {}", auto_deps_path.display());
|
||||
}
|
||||
remove_all(&auto_deps_path)?;
|
||||
}
|
||||
|
||||
let auto_deps = if auto_deps_path.exists() {
|
||||
let toml_content =
|
||||
fs::read_to_string(&auto_deps_path).map_err(|_| "failed to read cached auto_deps")?;
|
||||
let wrapper: AutoDeps =
|
||||
toml::from_str(&toml_content).map_err(|_| "failed to deserialize cached auto_deps")?;
|
||||
wrapper.packages
|
||||
} else {
|
||||
let mut dynamic_deps = auto_deps_from_dynamic_linking(stage_dirs, &dep_pkgars, logger);
|
||||
dep_pkgars.retain(|x| recipe.build.dependencies.contains(&x.0));
|
||||
let package_deps =
|
||||
auto_deps_from_static_package_deps(&dep_pkgars, &dynamic_deps).unwrap_or_default();
|
||||
dynamic_deps.extend(package_deps);
|
||||
|
||||
let wrapper = AutoDeps {
|
||||
packages: dynamic_deps,
|
||||
};
|
||||
serialize_and_write(&auto_deps_path, &wrapper)?;
|
||||
wrapper.packages
|
||||
};
|
||||
Ok(auto_deps)
|
||||
}
|
||||
|
||||
pub fn build_remote(
|
||||
stage_dirs: Vec<PathBuf>,
|
||||
recipe: &Recipe,
|
||||
target_dir: &Path,
|
||||
cook_config: &CookConfig,
|
||||
) -> Result<BuildResult, String> {
|
||||
let source_toml = target_dir.join("source.toml");
|
||||
let source_pubkey = "build/remotes/pub_key_static.redox-os.org.toml";
|
||||
|
||||
let packages = recipe.get_packages_list();
|
||||
for (i, package) in packages.into_iter().enumerate() {
|
||||
// declare pkg dependencies as autodeps dependency
|
||||
let stage_dir = &stage_dirs[i];
|
||||
|
||||
if cook_config.clean_target && stage_dir.with_added_extension("pkgar").is_file() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !stage_dir.is_dir() {
|
||||
let (_, source_pkgar, _) = package_source_paths(package, &target_dir);
|
||||
let stage_dir_tmp = target_dir.join("stage.tmp");
|
||||
pkgar::extract(&source_pubkey, &source_pkgar, &stage_dir_tmp).map_err(|err| {
|
||||
format!(
|
||||
"failed to install '{}' in '{}': {:?}",
|
||||
source_pkgar.display(),
|
||||
stage_dir_tmp.display(),
|
||||
err
|
||||
)
|
||||
})?;
|
||||
// Move stage.tmp to stage atomically
|
||||
rename(&stage_dir_tmp, &stage_dir)?;
|
||||
}
|
||||
}
|
||||
|
||||
let auto_deps_path = target_dir.join("auto_deps.toml");
|
||||
if auto_deps_path.is_file() && !cook_config.clean_target {
|
||||
if modified(&auto_deps_path)? < modified_all(&stage_dirs, modified)? {
|
||||
remove_all(&auto_deps_path)?
|
||||
}
|
||||
}
|
||||
|
||||
let auto_deps = if auto_deps_path.exists() {
|
||||
let toml_content =
|
||||
fs::read_to_string(&auto_deps_path).map_err(|_| "failed to read cached auto_deps")?;
|
||||
let wrapper: AutoDeps =
|
||||
toml::from_str(&toml_content).map_err(|_| "failed to deserialize cached auto_deps")?;
|
||||
wrapper.packages
|
||||
} else {
|
||||
let toml_content =
|
||||
fs::read_to_string(&source_toml).map_err(|_| "failed to read source.toml")?;
|
||||
let pkg_toml: Package =
|
||||
toml::from_str(&toml_content).map_err(|_| "failed to deserialize source.toml")?;
|
||||
let wrapper = AutoDeps {
|
||||
packages: pkg_toml.depends.into_iter().collect(),
|
||||
};
|
||||
serialize_and_write(&auto_deps_path, &wrapper)?;
|
||||
wrapper.packages
|
||||
};
|
||||
Ok(BuildResult::new(stage_dirs, auto_deps))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::os::unix;
|
||||
|
||||
#[test]
|
||||
fn file_system_loop_no_infinite_loop() {
|
||||
let mut root = std::env::temp_dir();
|
||||
root.push("temp_test_dir_file_system_loop_no_infinite_loop");
|
||||
let _ = std::fs::remove_dir_all(&root);
|
||||
std::fs::create_dir_all(&root).expect("Failed to create temporary root directory");
|
||||
|
||||
// Hierarchy with an infinite loop
|
||||
let dir = root.join("loop");
|
||||
unix::fs::symlink(&root, &dir).expect("Linking {dir:?} to {root:?}");
|
||||
|
||||
// Sanity check that we have a loop
|
||||
assert_eq!(
|
||||
root.canonicalize().unwrap(),
|
||||
dir.canonicalize().unwrap(),
|
||||
"Expected a loop where {dir:?} points to {root:?}"
|
||||
);
|
||||
|
||||
let entries =
|
||||
super::auto_deps_from_dynamic_linking(&vec![root.clone()], &Default::default(), &None);
|
||||
assert!(
|
||||
entries.is_empty(),
|
||||
"auto_deps shouldn't have yielded any libraries"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,850 @@
|
||||
use crate::Error;
|
||||
use crate::Result;
|
||||
use crate::bail_other_err;
|
||||
use crate::config::translate_mirror;
|
||||
use crate::cook::cook_build;
|
||||
use crate::cook::fetch_repo;
|
||||
use crate::cook::fetch_repo::PlainPtyCallback;
|
||||
use crate::cook::fs::*;
|
||||
use crate::cook::package::get_package_name;
|
||||
use crate::cook::package::package_source_paths;
|
||||
use crate::cook::pty::PtyOut;
|
||||
use crate::cook::script::*;
|
||||
use crate::is_redox;
|
||||
use crate::log_to_pty;
|
||||
use crate::recipe::BuildKind;
|
||||
use crate::recipe::CookRecipe;
|
||||
use crate::recipe::SourceRecipe;
|
||||
use crate::wrap_io_err;
|
||||
use crate::wrap_other_err;
|
||||
use pkg::SourceIdentifier;
|
||||
use pkg::net_backend::DownloadBackendWriter;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
use std::rc::Rc;
|
||||
|
||||
pub struct FetchResult {
|
||||
pub source_dir: PathBuf,
|
||||
pub source_ident: String,
|
||||
pub cached: bool,
|
||||
}
|
||||
|
||||
impl FetchResult {
|
||||
pub fn new(source_dir: PathBuf, ident: String, cached: bool) -> Self {
|
||||
Self {
|
||||
source_dir,
|
||||
source_ident: ident,
|
||||
cached,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cached(source_dir: PathBuf, ident: String) -> Self {
|
||||
Self {
|
||||
source_dir,
|
||||
source_ident: ident,
|
||||
cached: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_blake3(path: &PathBuf) -> Result<String> {
|
||||
let mut f = fs::File::open(&path).map_err(wrap_io_err!(path, "Opening file for blake3"))?;
|
||||
let hash = blake3::Hasher::new()
|
||||
.update_reader(&mut f)
|
||||
.map_err(wrap_io_err!(path, "Reading file for blake3"))?
|
||||
.finalize();
|
||||
Ok(hash.to_hex().to_string())
|
||||
}
|
||||
|
||||
pub fn fetch_offline(recipe: &CookRecipe, logger: &PtyOut) -> Result<FetchResult> {
|
||||
let recipe_dir = &recipe.dir;
|
||||
let source_dir = recipe_dir.join("source");
|
||||
match recipe.recipe.build.kind {
|
||||
BuildKind::None => {
|
||||
// the build function doesn't need source dir exists
|
||||
let ident = fetch_apply_source_info(recipe, "".to_string())?;
|
||||
return Ok(FetchResult::cached(source_dir, ident));
|
||||
}
|
||||
BuildKind::Remote => {
|
||||
return fetch_remote(recipe_dir, recipe, true, source_dir, logger);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let result = match &recipe.recipe.source {
|
||||
Some(SourceRecipe::Path { path: _ }) | None => fetch(recipe, true, logger)?,
|
||||
Some(SourceRecipe::SameAs { same_as }) => {
|
||||
let recipe = fetch_resolve_canon(recipe_dir, &same_as, recipe.name.is_host())?;
|
||||
// recursively fetch
|
||||
let r = fetch_offline(&recipe, logger)?;
|
||||
fetch_make_symlink(&source_dir, &same_as)?;
|
||||
r
|
||||
}
|
||||
Some(SourceRecipe::Git {
|
||||
git: _,
|
||||
upstream: _,
|
||||
branch: _,
|
||||
rev: _,
|
||||
patches: _,
|
||||
script: _,
|
||||
shallow_clone: _,
|
||||
}) => {
|
||||
offline_check_exists(&source_dir)?;
|
||||
let (head_rev, _) = get_git_head_rev(&source_dir)?;
|
||||
FetchResult::cached(source_dir, head_rev)
|
||||
}
|
||||
Some(SourceRecipe::Tar {
|
||||
tar: _,
|
||||
blake3,
|
||||
patches,
|
||||
script,
|
||||
}) => {
|
||||
let ident = blake3.clone().unwrap_or("no_tar_blake3_hash_info".into());
|
||||
let cached = source_dir.is_dir();
|
||||
if !cached {
|
||||
let source_tar = recipe_dir.join("source.tar");
|
||||
let source_tar_blake3 = get_blake3(&source_tar)?;
|
||||
if source_tar.exists() {
|
||||
if let Some(blake3) = blake3 {
|
||||
if source_tar_blake3 != *blake3 {
|
||||
bail_other_err!(
|
||||
"The downloaded tar blake3 {source_tar_blake3:?} is not equal to blake3 in recipe.toml"
|
||||
);
|
||||
}
|
||||
create_dir(&source_dir)?;
|
||||
fetch_extract_tar(source_tar, &source_dir, logger)?;
|
||||
fetch_apply_patches(recipe_dir, patches, script, &source_dir, logger)?;
|
||||
} else {
|
||||
// need to trust this tar file
|
||||
bail_other_err!(
|
||||
"Please add blake3 = {source_tar_blake3:?} to {recipe:?}",
|
||||
recipe = recipe_dir.join("recipe.toml").display(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
offline_check_exists(&source_dir)?;
|
||||
FetchResult::new(source_dir, ident, cached)
|
||||
}
|
||||
};
|
||||
|
||||
fetch_apply_source_info(recipe, result.source_ident.clone())?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn fetch(recipe: &CookRecipe, check_source: bool, logger: &PtyOut) -> Result<FetchResult> {
|
||||
let recipe_dir = &recipe.dir;
|
||||
let source_dir = recipe_dir.join("source");
|
||||
match recipe.recipe.build.kind {
|
||||
BuildKind::None => {
|
||||
// the build function doesn't need source dir exists
|
||||
let ident = fetch_apply_source_info(recipe, "".to_string())?;
|
||||
return Ok(FetchResult::cached(source_dir, ident));
|
||||
}
|
||||
BuildKind::Remote => {
|
||||
return fetch_remote(recipe_dir, recipe, false, source_dir, logger);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let result = match &recipe.recipe.source {
|
||||
Some(SourceRecipe::SameAs { same_as }) => {
|
||||
let recipe = fetch_resolve_canon(recipe_dir, &same_as, recipe.name.is_host())?;
|
||||
// recursively fetch
|
||||
let r = fetch(&recipe, check_source, logger)?;
|
||||
fetch_make_symlink(&source_dir, &same_as)?;
|
||||
r
|
||||
}
|
||||
Some(SourceRecipe::Path { path }) => {
|
||||
let path = recipe_dir.join(path);
|
||||
let cached = source_dir.is_dir() && modified_dir(&path)? <= modified_dir(&source_dir)?;
|
||||
if !cached {
|
||||
log_to_pty!(
|
||||
logger,
|
||||
"[DEBUG]: {:?} is newer than {:?}",
|
||||
path.display(),
|
||||
source_dir.display()
|
||||
);
|
||||
copy_dir_all(&path, &source_dir).map_err(wrap_io_err!(
|
||||
&path,
|
||||
source_dir,
|
||||
"Copying source"
|
||||
))?;
|
||||
}
|
||||
FetchResult::new(source_dir, "local_source".to_string(), cached)
|
||||
}
|
||||
Some(SourceRecipe::Git {
|
||||
git,
|
||||
upstream,
|
||||
branch,
|
||||
rev,
|
||||
patches,
|
||||
script,
|
||||
shallow_clone,
|
||||
}) => {
|
||||
//TODO: use libgit?
|
||||
let shallow_clone = *shallow_clone == Some(true);
|
||||
let cached = if !source_dir.is_dir() {
|
||||
// Create source.tmp
|
||||
let source_dir_tmp = recipe_dir.join("source.tmp");
|
||||
create_dir_clean(&source_dir_tmp)?;
|
||||
|
||||
// Clone the repository to source.tmp
|
||||
let mut command = Command::new("git");
|
||||
command
|
||||
.arg("clone")
|
||||
.arg("--recursive")
|
||||
.arg(translate_mirror(&git));
|
||||
if let Some(branch) = branch {
|
||||
command.arg("--branch").arg(branch);
|
||||
}
|
||||
if shallow_clone {
|
||||
command
|
||||
.arg("--filter=tree:0")
|
||||
.arg("--also-filter-submodules");
|
||||
}
|
||||
command.arg(&source_dir_tmp);
|
||||
if let Err(e) = run_command(command, logger) {
|
||||
if !is_redox() {
|
||||
return Err(e);
|
||||
}
|
||||
// TODO: RedoxFS has a race condition problem with `--recursive` and running in multi CPU.
|
||||
// It is appear that running the submodule update separately fixes it. Remove this when
|
||||
// `git clone https://gitlab.redox-os.org/redox-os/relibc --recursive` proven to work in Redox OS.
|
||||
let mut cmds = vec!["update", "--init"];
|
||||
if shallow_clone {
|
||||
cmds.push("--filter=tree:0");
|
||||
}
|
||||
manual_git_recursive_submodule(logger, &source_dir_tmp, cmds)?;
|
||||
}
|
||||
|
||||
// Move source.tmp to source atomically
|
||||
rename(&source_dir_tmp, &source_dir)?;
|
||||
|
||||
false
|
||||
} else if !check_source {
|
||||
true
|
||||
} else {
|
||||
if !source_dir.join(".git").is_dir() {
|
||||
bail_other_err!(
|
||||
"{:?} is not a git repository, but recipe indicated git source",
|
||||
source_dir.display()
|
||||
);
|
||||
}
|
||||
|
||||
// Reset origin
|
||||
let mut command = Command::new("git");
|
||||
command.arg("-C").arg(&source_dir);
|
||||
command.arg("remote").arg("set-url").arg("origin").arg(git);
|
||||
run_command(command, logger)?;
|
||||
|
||||
// Fetch origin
|
||||
let mut command = Command::new("git");
|
||||
command.arg("-C").arg(&source_dir);
|
||||
command.arg("fetch").arg("origin");
|
||||
run_command(command, logger)?;
|
||||
|
||||
let (head_rev, detached_rev) = get_git_head_rev(&source_dir)?;
|
||||
match (rev, detached_rev) {
|
||||
(Some(rev), true) => {
|
||||
if let Ok(exp_rev) = get_git_tag_rev(&source_dir, &rev) {
|
||||
exp_rev == head_rev
|
||||
} else {
|
||||
let mut command = Command::new("git");
|
||||
command.arg("-C").arg(&source_dir);
|
||||
command.arg("gc");
|
||||
run_command(command, logger)?;
|
||||
if let Ok(exp_rev) = get_git_tag_rev(&source_dir, &rev) {
|
||||
exp_rev == head_rev
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
(None, false) => {
|
||||
let (_, remote_branch, remote_name, remote_url) =
|
||||
get_git_remote_tracking(&source_dir)?;
|
||||
// TODO: how to get default branch and compare it here?
|
||||
if let Some(branch) = branch
|
||||
&& branch != &remote_branch
|
||||
{
|
||||
false
|
||||
} else if remote_name != "origin" || &remote_url != chop_dot_git(git) {
|
||||
false
|
||||
} else {
|
||||
match get_git_fetch_rev(&source_dir, &remote_url, &remote_branch) {
|
||||
Ok(fetch_rev) => fetch_rev == head_rev,
|
||||
Err(e) => {
|
||||
log_to_pty!(logger, "{}", e);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
};
|
||||
|
||||
if !cached {
|
||||
if let Some(_upstream) = upstream {
|
||||
//TODO: set upstream URL (is this needed?)
|
||||
// git remote set-url upstream "$GIT_UPSTREAM" &> /dev/null ||
|
||||
// git remote add upstream "$GIT_UPSTREAM"
|
||||
// git fetch upstream
|
||||
}
|
||||
|
||||
if !patches.is_empty() || script.is_some() {
|
||||
// Hard reset
|
||||
let mut command = Command::new("git");
|
||||
command.arg("-C").arg(&source_dir);
|
||||
command.arg("reset").arg("--hard");
|
||||
run_command(command, logger)?;
|
||||
}
|
||||
|
||||
if let Some(rev) = rev {
|
||||
// Check out specified revision
|
||||
let mut command = Command::new("git");
|
||||
command.arg("-C").arg(&source_dir);
|
||||
command.arg("checkout").arg(rev);
|
||||
run_command(command, logger)?;
|
||||
} else if !is_redox() {
|
||||
//TODO: complicated stuff to check and reset branch to origin
|
||||
//TODO: redox can't undestand this (got exit status 1)
|
||||
let mut command = Command::new("bash");
|
||||
command.arg("-c").arg(GIT_RESET_BRANCH);
|
||||
if let Some(branch) = branch {
|
||||
command.env("BRANCH", branch);
|
||||
}
|
||||
command.current_dir(&source_dir);
|
||||
run_command(command, logger)?;
|
||||
}
|
||||
|
||||
// Sync submodules URL
|
||||
let mut command = Command::new("git");
|
||||
command.arg("-C").arg(&source_dir);
|
||||
command.arg("submodule").arg("sync").arg("--recursive");
|
||||
|
||||
if let Err(e) = run_command(command, logger) {
|
||||
if !is_redox() {
|
||||
return Err(e);
|
||||
}
|
||||
manual_git_recursive_submodule(logger, &source_dir, vec!["sync"])?;
|
||||
}
|
||||
|
||||
// Update submodules
|
||||
let mut command = Command::new("git");
|
||||
command.arg("-C").arg(&source_dir);
|
||||
command
|
||||
.arg("submodule")
|
||||
.arg("update")
|
||||
.arg("--init")
|
||||
.arg("--recursive");
|
||||
if shallow_clone {
|
||||
command.arg("--filter=tree:0");
|
||||
}
|
||||
if let Err(e) = run_command(command, logger) {
|
||||
if !is_redox() {
|
||||
return Err(e);
|
||||
}
|
||||
let mut cmds = vec!["update", "--init"];
|
||||
if shallow_clone {
|
||||
cmds.push("--filter=tree:0");
|
||||
}
|
||||
manual_git_recursive_submodule(logger, &source_dir, cmds)?;
|
||||
}
|
||||
|
||||
fetch_apply_patches(recipe_dir, patches, script, &source_dir, logger)?;
|
||||
}
|
||||
|
||||
let (head_rev, _) = get_git_head_rev(&source_dir)?;
|
||||
FetchResult::new(source_dir, head_rev, cached)
|
||||
}
|
||||
Some(SourceRecipe::Tar {
|
||||
tar,
|
||||
blake3,
|
||||
patches,
|
||||
script,
|
||||
}) => {
|
||||
let source_tar = recipe_dir.join("source.tar");
|
||||
let ident = blake3.clone().unwrap_or("no_tar_blake3_hash_info".into());
|
||||
let mut tar_updated = false;
|
||||
loop {
|
||||
if !source_tar.is_file() {
|
||||
tar_updated = true;
|
||||
download_wget(&tar, &source_tar, logger)?;
|
||||
}
|
||||
if !check_source {
|
||||
break;
|
||||
}
|
||||
let source_tar_blake3 = get_blake3(&source_tar)?;
|
||||
if let Some(blake3) = blake3 {
|
||||
if source_tar_blake3 == *blake3 {
|
||||
break;
|
||||
}
|
||||
if tar_updated {
|
||||
bail_other_err!(
|
||||
"The downloaded tar blake3 {source_tar_blake3:?} is not equal to blake3 in recipe.toml"
|
||||
)
|
||||
} else {
|
||||
log_to_pty!(
|
||||
logger,
|
||||
"DEBUG: source tar blake3 is different and need redownload"
|
||||
);
|
||||
remove_all(&source_tar)?;
|
||||
}
|
||||
} else {
|
||||
//TODO: set blake3 hash on the recipe with something like "cook fix"
|
||||
log_to_pty!(
|
||||
logger,
|
||||
"WARNING: set blake3 for '{}' to '{}'",
|
||||
source_tar.display(),
|
||||
source_tar_blake3
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
let mut cached = true;
|
||||
if source_dir.is_dir() {
|
||||
if tar_updated || fetch_is_patches_newer(recipe_dir, patches, &source_dir)? {
|
||||
log_to_pty!(
|
||||
logger,
|
||||
"DEBUG: source tar or patches is newer than the source directory"
|
||||
);
|
||||
remove_all(&source_dir)?
|
||||
}
|
||||
}
|
||||
if !source_dir.is_dir() {
|
||||
// Create source.tmp
|
||||
let source_dir_tmp = recipe_dir.join("source.tmp");
|
||||
create_dir_clean(&source_dir_tmp)?;
|
||||
fetch_extract_tar(source_tar, &source_dir_tmp, logger)?;
|
||||
fetch_apply_patches(recipe_dir, patches, script, &source_dir_tmp, logger)?;
|
||||
|
||||
// Move source.tmp to source atomically
|
||||
rename(&source_dir_tmp, &source_dir)?;
|
||||
cached = false;
|
||||
}
|
||||
FetchResult::new(source_dir, ident, cached)
|
||||
}
|
||||
// Local Sources
|
||||
None => {
|
||||
if !source_dir.is_dir() {
|
||||
log_to_pty!(
|
||||
logger,
|
||||
"WARNING: Recipe without source section expected source dir at '{}'",
|
||||
source_dir.display(),
|
||||
);
|
||||
create_dir(&source_dir)?;
|
||||
}
|
||||
FetchResult::cached(source_dir, "local_source".into())
|
||||
}
|
||||
};
|
||||
|
||||
if let BuildKind::Cargo {
|
||||
cargopath,
|
||||
cargoflags: _,
|
||||
cargopackages: _,
|
||||
cargoexamples: _,
|
||||
} = &recipe.recipe.build.kind
|
||||
{
|
||||
if fetch_will_build(recipe) {
|
||||
fetch_cargo(&result.source_dir, cargopath.as_ref(), logger)?;
|
||||
}
|
||||
}
|
||||
|
||||
fetch_apply_source_info(recipe, result.source_ident.to_string())?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn manual_git_recursive_submodule(
|
||||
logger: &PtyOut,
|
||||
source_dir: &PathBuf,
|
||||
cmd: Vec<&str>,
|
||||
) -> Result<()> {
|
||||
log_to_pty!(
|
||||
logger,
|
||||
"Git submodule {} failed, might be caused by race condition in RedoxFS, retrying without --recursive.",
|
||||
cmd[0]
|
||||
);
|
||||
|
||||
let mut repo_registry: BTreeMap<PathBuf, bool> = BTreeMap::new();
|
||||
|
||||
loop {
|
||||
let mut dirty_git = false;
|
||||
|
||||
let output = Command::new("find")
|
||||
.args(&[".", "-name", ".git"])
|
||||
.current_dir(&source_dir)
|
||||
.output()
|
||||
.map_err(wrap_io_err!("Failed to execute find"))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
|
||||
for line in stdout.lines() {
|
||||
let git_path = PathBuf::from(line);
|
||||
if let Some(repo_root) = git_path.parent() {
|
||||
let repo_root_buf = repo_root.to_path_buf();
|
||||
|
||||
if !repo_registry.contains_key(&repo_root_buf) {
|
||||
repo_registry.insert(repo_root_buf.clone(), false);
|
||||
dirty_git = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !dirty_git {
|
||||
// completed
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let pending_repos: Vec<PathBuf> = repo_registry
|
||||
.iter()
|
||||
.filter(|&(_, &synced)| !synced)
|
||||
.map(|(path, _)| path.clone())
|
||||
.collect();
|
||||
|
||||
if pending_repos.is_empty() {
|
||||
bail_other_err!("No pending repos but dirty");
|
||||
}
|
||||
|
||||
for repo in pending_repos {
|
||||
println!("==> Processing: {:?}", repo);
|
||||
|
||||
let mut command = Command::new("git");
|
||||
command.arg("-C").arg(&repo).current_dir(&source_dir);
|
||||
command.arg("submodule");
|
||||
|
||||
for cmd in &cmd {
|
||||
command.arg(cmd);
|
||||
}
|
||||
run_command(command, logger)?;
|
||||
|
||||
repo_registry.insert(repo, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This does the same check as in cook_build
|
||||
fn fetch_will_build(recipe: &CookRecipe) -> bool {
|
||||
let check_source = !recipe.is_deps;
|
||||
if !check_source {
|
||||
// there could be more check here, but it's heavy so just assume it will build
|
||||
return true;
|
||||
}
|
||||
|
||||
let stage_dirs =
|
||||
cook_build::get_stage_dirs(&recipe.recipe.optional_packages, &recipe.target_dir());
|
||||
let stage_pkgars: Vec<PathBuf> = stage_dirs
|
||||
.iter()
|
||||
.map(|p| p.with_added_extension("pkgar"))
|
||||
.collect();
|
||||
let stage_present = stage_pkgars.iter().all(|file| file.is_file());
|
||||
!stage_present
|
||||
}
|
||||
|
||||
pub(crate) fn fetch_make_symlink(source_dir: &PathBuf, same_as: &String) -> Result<()> {
|
||||
let target_dir = Path::new(same_as).join("source");
|
||||
if !source_dir.is_symlink() {
|
||||
if source_dir.is_dir() {
|
||||
bail_other_err!(
|
||||
"'{dir:?}' is a directory, but recipe indicated a symlink. \n\
|
||||
try removing '{dir:?}' if you haven't made any changes that would be lost",
|
||||
dir = source_dir.display(),
|
||||
)
|
||||
}
|
||||
std::os::unix::fs::symlink(&target_dir, source_dir).map_err(|err| {
|
||||
format!(
|
||||
"failed to symlink '{}' to '{}': {}\n{:?}",
|
||||
target_dir.display(),
|
||||
source_dir.display(),
|
||||
err,
|
||||
err
|
||||
)
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn fetch_resolve_canon(
|
||||
recipe_dir: &Path,
|
||||
same_as: &String,
|
||||
is_host: bool,
|
||||
) -> Result<CookRecipe> {
|
||||
let canon_dir = Path::new(recipe_dir).join(same_as);
|
||||
if canon_dir
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.chars()
|
||||
.filter(|c| *c == '/')
|
||||
.count()
|
||||
> 50
|
||||
{
|
||||
bail_other_err!("Infinite loop detected");
|
||||
}
|
||||
if !canon_dir.exists() {
|
||||
bail_other_err!("{dir:?} is not exists", dir = canon_dir.display());
|
||||
}
|
||||
CookRecipe::from_path(canon_dir.as_path(), true, is_host).map_err(Error::from)
|
||||
}
|
||||
|
||||
pub(crate) fn fetch_extract_tar(
|
||||
source_tar: PathBuf,
|
||||
source_dir_tmp: &PathBuf,
|
||||
logger: &PtyOut,
|
||||
) -> Result<()> {
|
||||
let mut command = Command::new("tar");
|
||||
let verbose = crate::config::get_config().cook.verbose;
|
||||
if is_redox() {
|
||||
command.arg(if verbose { "xvf" } else { "xf" });
|
||||
} else {
|
||||
command.arg("--extract");
|
||||
command.arg("--no-same-owner");
|
||||
if verbose {
|
||||
command.arg("--verbose");
|
||||
}
|
||||
command.arg("--file");
|
||||
}
|
||||
command.arg(&source_tar);
|
||||
command.arg("--directory").arg(source_dir_tmp);
|
||||
command.arg("--strip-components").arg("1");
|
||||
run_command(command, logger)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn fetch_cargo(
|
||||
source_dir: &PathBuf,
|
||||
cargopath: Option<&String>,
|
||||
logger: &PtyOut,
|
||||
) -> Result<()> {
|
||||
let mut source_dir = source_dir.clone();
|
||||
if let Some(cargopath) = cargopath {
|
||||
source_dir = source_dir.join(cargopath);
|
||||
}
|
||||
|
||||
let local_redoxer = Path::new("target/release/cookbook_rbos_redoxer");
|
||||
let mut command = if is_redox() && !local_redoxer.is_file() {
|
||||
Command::new("cookbook_rbos_redoxer")
|
||||
} else {
|
||||
let cookbook_redoxer = local_redoxer
|
||||
.canonicalize()
|
||||
.unwrap_or(PathBuf::from("cargo"));
|
||||
Command::new(&cookbook_redoxer)
|
||||
};
|
||||
command.arg("fetch");
|
||||
command.arg("--manifest-path");
|
||||
command.arg(source_dir.join("Cargo.toml").into_os_string());
|
||||
run_command(command, logger)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn fetch_remote(
|
||||
recipe_dir: &Path,
|
||||
recipe: &CookRecipe,
|
||||
offline_mode: bool,
|
||||
source_dir: PathBuf,
|
||||
logger: &PtyOut,
|
||||
) -> Result<FetchResult> {
|
||||
let (mut manager, repository) = fetch_repo::get_binary_repo();
|
||||
let target_dir = create_target_dir(recipe_dir, recipe.target)?;
|
||||
if logger.is_some() {
|
||||
let writer = logger.as_ref().unwrap().1.try_clone().unwrap();
|
||||
manager.set_callback(Rc::new(RefCell::new(PlainPtyCallback::new(writer))));
|
||||
}
|
||||
let packages = recipe.recipe.get_packages_list();
|
||||
|
||||
let name = recipe_dir
|
||||
.file_name()
|
||||
.ok_or("Unable to get recipe name")?
|
||||
.to_str()
|
||||
.unwrap();
|
||||
|
||||
let mut result = None;
|
||||
let mut cached = true;
|
||||
|
||||
for package in packages {
|
||||
let (_, source_pkgar, source_toml) = package_source_paths(package, &target_dir);
|
||||
let source_name = get_package_name(name, package);
|
||||
let Some(repo_blake3) = repository.packages.get(&source_name) else {
|
||||
bail_other_err!("Package {source_name} does not exist in server repository")
|
||||
};
|
||||
|
||||
if !offline_mode {
|
||||
if source_toml.is_file() {
|
||||
let pkg_toml = read_source_toml(&source_toml)?;
|
||||
if &pkg_toml.blake3 != repo_blake3 {
|
||||
log_to_pty!(logger, "DEBUG: Updating source binaries");
|
||||
remove_all(&source_toml)?;
|
||||
if source_pkgar.is_file() {
|
||||
remove_all(&source_pkgar)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !source_toml.is_file() {
|
||||
{
|
||||
let toml_file = File::create(&source_toml)
|
||||
.map_err(|e| format!("Unable to create source.toml: {e:?}"))?;
|
||||
let mut writer = DownloadBackendWriter::ToFile(toml_file);
|
||||
manager
|
||||
.download(&format!("{}.toml", &source_name), None, &mut writer)
|
||||
.map_err(|e| format!("Unable to download source.toml: {e:?}"))?;
|
||||
}
|
||||
let pkg_toml = read_source_toml(&source_toml)?;
|
||||
let pkgar_file = File::create(&source_pkgar)
|
||||
.map_err(|e| format!("Unable to create source.pkgar: {e:?}"))?;
|
||||
let mut writer = DownloadBackendWriter::ToFile(pkgar_file);
|
||||
manager
|
||||
.download(
|
||||
&format!("{}.pkgar", &source_name),
|
||||
Some(pkg_toml.network_size),
|
||||
&mut writer,
|
||||
)
|
||||
.map_err(|e| format!("Unable to download source.pkgar: {e:?}"))?;
|
||||
|
||||
cached = false;
|
||||
}
|
||||
|
||||
// manager.download(file, 0, dest)
|
||||
} else {
|
||||
offline_check_exists(&source_pkgar)?;
|
||||
offline_check_exists(&source_toml)?;
|
||||
}
|
||||
|
||||
// guaranteed to exist once and last in iteration
|
||||
if package.is_none() {
|
||||
let pkg_toml = read_source_toml(&source_toml)?;
|
||||
|
||||
fetch_apply_source_info_from_remote(
|
||||
recipe,
|
||||
&SourceIdentifier {
|
||||
commit_identifier: pkg_toml.commit_identifier.clone(),
|
||||
source_identifier: pkg_toml.source_identifier.clone(),
|
||||
time_identifier: pkg_toml.time_identifier.clone(),
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
|
||||
result = Some(FetchResult::new(
|
||||
source_dir.clone(),
|
||||
pkg_toml.source_identifier,
|
||||
cached,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
result.ok_or_else(wrap_other_err!("There's no mandatory package in remote"))
|
||||
}
|
||||
|
||||
fn read_source_toml(source_toml: &Path) -> Result<pkg::Package> {
|
||||
let mut file =
|
||||
File::open(source_toml).map_err(|e| format!("Unable to open source.toml: {e:?}"))?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)
|
||||
.map_err(|e| format!("Unable to read source.toml: {e:?}"))?;
|
||||
let pkg_toml = pkg::Package::from_toml(&contents)
|
||||
.map_err(|e| format!("Unable to parse source.toml: {e:?}"))?;
|
||||
Ok(pkg_toml)
|
||||
}
|
||||
|
||||
pub(crate) fn fetch_is_patches_newer(
|
||||
recipe_dir: &Path,
|
||||
patches: &Vec<String>,
|
||||
source_dir: &PathBuf,
|
||||
) -> Result<bool> {
|
||||
// don't check source files inside as it can be mixed with user patches
|
||||
let source_time = modified(&source_dir)?;
|
||||
for patch_name in patches {
|
||||
let patch_file = recipe_dir.join(patch_name);
|
||||
if !patch_file.is_file() {
|
||||
bail_other_err!("Failed to find patch file {:?}", patch_file.display());
|
||||
}
|
||||
|
||||
let patch_time = modified(&patch_file)?;
|
||||
if patch_time > source_time {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
pub(crate) fn fetch_apply_patches(
|
||||
recipe_dir: &Path,
|
||||
patches: &Vec<String>,
|
||||
script: &Option<String>,
|
||||
source_dir_tmp: &PathBuf,
|
||||
logger: &PtyOut,
|
||||
) -> Result<()> {
|
||||
for patch_name in patches {
|
||||
let patch_file = recipe_dir.join(patch_name);
|
||||
if !patch_file.is_file() {
|
||||
bail_other_err!("Failed to find patch file {:?}", patch_file.display());
|
||||
}
|
||||
|
||||
let patch = fs::read_to_string(&patch_file).map_err(|err| {
|
||||
format!(
|
||||
"failed to read patch file '{}': {}\n{:#?}",
|
||||
patch_file.display(),
|
||||
err,
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
let mut command = Command::new("patch");
|
||||
command.arg("--directory").arg(source_dir_tmp);
|
||||
command.arg("--strip=1");
|
||||
run_command_stdin(command, patch.as_bytes(), logger)?;
|
||||
}
|
||||
Ok(if let Some(script) = script {
|
||||
let mut command = Command::new("bash");
|
||||
command.arg("-ex");
|
||||
command.current_dir(source_dir_tmp);
|
||||
run_command_stdin(
|
||||
command,
|
||||
format!("{SHARED_PRESCRIPT}\n{script}").as_bytes(),
|
||||
logger,
|
||||
)?;
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn fetch_apply_source_info(
|
||||
recipe: &CookRecipe,
|
||||
source_identifier: String,
|
||||
) -> Result<String> {
|
||||
let ident = crate::cook::ident::get_ident();
|
||||
let info = SourceIdentifier {
|
||||
commit_identifier: ident.commit.to_string(),
|
||||
time_identifier: ident.time.to_string(),
|
||||
source_identifier: source_identifier,
|
||||
};
|
||||
|
||||
fetch_apply_source_info_from_remote(&recipe, &info)?;
|
||||
|
||||
Ok(info.source_identifier)
|
||||
}
|
||||
|
||||
pub(crate) fn fetch_apply_source_info_from_remote(
|
||||
recipe: &CookRecipe,
|
||||
info: &SourceIdentifier,
|
||||
) -> Result<()> {
|
||||
let target_dir = create_target_dir(&recipe.dir, recipe.target)?;
|
||||
let source_toml_path = target_dir.join("source_info.toml");
|
||||
serialize_and_write(&source_toml_path, &info)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn fetch_get_source_info(recipe: &CookRecipe) -> Result<SourceIdentifier> {
|
||||
let target_dir = recipe.target_dir();
|
||||
let source_toml_path = target_dir.join("source_info.toml");
|
||||
let toml_content = fs::read_to_string(source_toml_path)
|
||||
.map_err(|e| format!("Unable to read source_info.toml: {:?}", e))?;
|
||||
let parsed = toml::from_str(&toml_content)
|
||||
.map_err(|e| format!("Unable to parse source_info.toml: {:?}", e))?;
|
||||
Ok(parsed)
|
||||
}
|
||||
@@ -0,0 +1,204 @@
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
io::{PipeWriter, Write},
|
||||
path::{Path, PathBuf},
|
||||
rc::Rc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use pkg::{
|
||||
PackageName, RemotePackage, RepoManager, Repository,
|
||||
callback::{Callback, SilentCallback},
|
||||
net_backend::{CurlBackend, DownloadBackend},
|
||||
};
|
||||
|
||||
// TODO: This is a workaround, but as long as whole
|
||||
// fetch operation is in single thread, this is ok
|
||||
thread_local! {
|
||||
static BINARY_REPO: RefCell<Option<(RepoManager, Repository)>> = RefCell::new(None);
|
||||
}
|
||||
|
||||
fn load_cached_repo(path: &Path) -> Option<Repository> {
|
||||
let metadata = std::fs::metadata(path).ok()?;
|
||||
|
||||
if !crate::config::get_config().cook.offline {
|
||||
let yesterday = std::time::SystemTime::now().checked_sub(Duration::from_secs(24 * 3600))?;
|
||||
if metadata.modified().ok()? < yesterday {
|
||||
// stale cache
|
||||
let _ = std::fs::remove_file(path);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
let toml_str = std::fs::read_to_string(path).ok()?;
|
||||
Repository::from_toml(&toml_str).ok()
|
||||
}
|
||||
|
||||
fn init_binary_repo() -> (RepoManager, Repository) {
|
||||
let callback = Rc::new(RefCell::new(SilentCallback::new()));
|
||||
let download_backend = CurlBackend::new().expect("Curl not found");
|
||||
let mut repo = RepoManager::new(callback, Box::new(download_backend));
|
||||
|
||||
repo.add_remote(crate::REMOTE_PKG_SOURCE, redoxer::target())
|
||||
.expect("Unable to add remote");
|
||||
|
||||
let repo_path = PathBuf::from("build/remotes");
|
||||
repo.set_download_path(repo_path.clone());
|
||||
repo.sync_keys().expect("Unable to sync keys");
|
||||
|
||||
let repo_toml = load_cached_repo(&repo_path.join("repo.toml")).unwrap_or_else(|| {
|
||||
let (toml_str, _) = repo
|
||||
.get_package_toml(&PackageName::new("repo").unwrap())
|
||||
.expect("Failed to fetch repo.toml");
|
||||
Repository::from_toml(&toml_str).expect("Fetched repo.toml is invalid")
|
||||
});
|
||||
|
||||
(repo, repo_toml)
|
||||
}
|
||||
|
||||
pub fn get_binary_repo() -> (RepoManager, Repository) {
|
||||
BINARY_REPO.with(|cell| {
|
||||
let mut opt = cell.borrow_mut();
|
||||
if opt.is_none() {
|
||||
*opt = Some(init_binary_repo());
|
||||
}
|
||||
let (repo, repo_toml) = opt.as_ref().unwrap();
|
||||
((*repo).clone(), repo_toml.clone())
|
||||
})
|
||||
}
|
||||
|
||||
pub struct PlainPtyCallback {
|
||||
size: u64,
|
||||
unknown_size: bool,
|
||||
pos: u64,
|
||||
fetch_processed: usize,
|
||||
fetch_total: usize,
|
||||
interactive: bool,
|
||||
download_file: Option<String>,
|
||||
pty: PipeWriter,
|
||||
}
|
||||
|
||||
impl PlainPtyCallback {
|
||||
pub fn new(pty: PipeWriter) -> Self {
|
||||
Self {
|
||||
size: 0,
|
||||
unknown_size: false,
|
||||
pos: 0,
|
||||
fetch_processed: 0,
|
||||
fetch_total: 0,
|
||||
interactive: false,
|
||||
download_file: None,
|
||||
pty,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set if user require to agree on terminal
|
||||
pub fn set_interactive(&mut self, enabled: bool) {
|
||||
self.interactive = enabled;
|
||||
}
|
||||
|
||||
fn flush(&self) {
|
||||
let _ = std::io::stderr().flush();
|
||||
}
|
||||
|
||||
pub fn format_size(bytes: u64) -> String {
|
||||
if bytes == 0 {
|
||||
return "0 B".to_string();
|
||||
}
|
||||
const UNITS: [&str; 5] = ["B", "KiB", "MiB", "GiB", "TiB"];
|
||||
let i = (bytes as f64).log(1024.0).floor() as usize;
|
||||
let size = bytes as f64 / 1024.0_f64.powi(i as i32);
|
||||
format!("{:.2} {}", size, UNITS[i])
|
||||
}
|
||||
|
||||
fn downloading_str(&self) -> &'static str {
|
||||
"Downloading"
|
||||
}
|
||||
}
|
||||
|
||||
const RESET_LINE: &str = "\r\x1b[2K";
|
||||
|
||||
impl Callback for PlainPtyCallback {
|
||||
fn fetch_start(&mut self, initial_count: usize) {
|
||||
self.fetch_total = 0;
|
||||
self.fetch_processed = 0;
|
||||
self.fetch_package_increment(0, initial_count);
|
||||
}
|
||||
|
||||
fn fetch_package_name(&mut self, pkg_name: &PackageName) {
|
||||
// resuming after fetch_package_increment
|
||||
let _ = write!(&self.pty, " {}", pkg_name.as_str());
|
||||
self.flush();
|
||||
}
|
||||
|
||||
fn fetch_package_increment(&mut self, added_processed: usize, added_count: usize) {
|
||||
self.fetch_processed += added_processed;
|
||||
self.fetch_total += added_count;
|
||||
|
||||
let _ = write!(
|
||||
&self.pty,
|
||||
"{RESET_LINE}Fetching: [{}/{}]",
|
||||
self.fetch_processed, self.fetch_total
|
||||
);
|
||||
self.flush();
|
||||
}
|
||||
|
||||
fn fetch_end(&mut self) {
|
||||
if self.fetch_processed == self.fetch_total {
|
||||
let _ = writeln!(&self.pty, "{RESET_LINE}Fetch complete.");
|
||||
} else {
|
||||
let _ = writeln!(&self.pty, "{RESET_LINE}Fetch incomplete.");
|
||||
}
|
||||
}
|
||||
|
||||
fn download_start(&mut self, length: u64, file: &str) {
|
||||
self.size = length;
|
||||
self.unknown_size = length == 0;
|
||||
self.pos = 0;
|
||||
if !self.unknown_size {
|
||||
let _ = write!(&self.pty, "{RESET_LINE}{} {file}", self.downloading_str());
|
||||
self.download_file = Some(file.to_string());
|
||||
self.flush();
|
||||
}
|
||||
}
|
||||
|
||||
fn download_increment(&mut self, downloaded: u64) {
|
||||
self.pos += downloaded;
|
||||
if self.unknown_size {
|
||||
self.size += downloaded;
|
||||
}
|
||||
if self.unknown_size {
|
||||
return;
|
||||
}
|
||||
|
||||
// keep using MB for consistency
|
||||
let pos_mb = self.pos as f64 / 1_048_576.0;
|
||||
let size_mb = self.size as f64 / 1_048_576.0;
|
||||
let file_name = self
|
||||
.download_file
|
||||
.as_ref()
|
||||
.map(|s| s.as_str())
|
||||
.unwrap_or("");
|
||||
let _ = write!(
|
||||
&self.pty,
|
||||
"{RESET_LINE}{} {} [{:.2} MB / {:.2} MB]",
|
||||
self.downloading_str(),
|
||||
file_name,
|
||||
pos_mb,
|
||||
size_mb
|
||||
);
|
||||
self.flush();
|
||||
}
|
||||
|
||||
fn download_end(&mut self) {
|
||||
if !self.unknown_size {
|
||||
let _ = writeln!(&self.pty, "");
|
||||
self.download_file = None;
|
||||
}
|
||||
}
|
||||
|
||||
fn install_extract(&mut self, remote_pkg: &RemotePackage) {
|
||||
let _ = writeln!(&self.pty, "Extracting {}...", remote_pkg.package.name);
|
||||
self.flush();
|
||||
}
|
||||
}
|
||||
+456
@@ -0,0 +1,456 @@
|
||||
use serde::Serialize;
|
||||
use std::{
|
||||
collections::BTreeSet,
|
||||
fs,
|
||||
io::{self, Write},
|
||||
path::{Path, PathBuf},
|
||||
process::{self, Command, Stdio},
|
||||
time::SystemTime,
|
||||
};
|
||||
use walkdir::{DirEntry, WalkDir};
|
||||
|
||||
use crate::{
|
||||
Error, Result, bail_other_err,
|
||||
config::translate_mirror,
|
||||
cook::pty::{PtyOut, spawn_to_pipe},
|
||||
wrap_io_err, wrap_other_err,
|
||||
};
|
||||
|
||||
//TODO: pub(crate) for all of these functions
|
||||
|
||||
pub fn remove_all(path: &Path) -> Result<()> {
|
||||
if path.is_dir() {
|
||||
fs::remove_dir_all(path)
|
||||
} else {
|
||||
fs::remove_file(path)
|
||||
}
|
||||
.map_err(wrap_io_err!(path, "Removing all"))
|
||||
}
|
||||
|
||||
pub fn create_dir(dir: &Path) -> Result<()> {
|
||||
fs::create_dir_all(dir).map_err(wrap_io_err!(dir, "Recursively creating dir"))
|
||||
}
|
||||
|
||||
pub fn create_dir_clean(dir: &Path) -> Result<()> {
|
||||
if dir.is_dir() {
|
||||
remove_all(dir)?;
|
||||
}
|
||||
create_dir(dir)
|
||||
}
|
||||
|
||||
pub fn create_target_dir(recipe_dir: &Path, target: &'static str) -> Result<PathBuf> {
|
||||
let target_dir = recipe_dir.join("target").join(target);
|
||||
if !target_dir.is_dir() {
|
||||
create_dir(&target_dir)?;
|
||||
}
|
||||
Ok(target_dir)
|
||||
}
|
||||
|
||||
pub fn copy_dir_all(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> io::Result<()> {
|
||||
fs::create_dir_all(&dst)?;
|
||||
for entry in fs::read_dir(src)? {
|
||||
let entry = entry?;
|
||||
let ty = entry.file_type()?;
|
||||
if ty.is_dir() {
|
||||
copy_dir_all(entry.path(), dst.as_ref().join(entry.file_name()))?;
|
||||
} else {
|
||||
fs::copy(entry.path(), dst.as_ref().join(entry.file_name()))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn move_dir_all_fn<'a>(
|
||||
src: impl AsRef<Path>,
|
||||
mv: &'a Box<impl Fn(PathBuf) -> Option<&'a Path>>,
|
||||
) -> io::Result<()> {
|
||||
move_dir_all_inner_fn(&src, &src, mv)
|
||||
}
|
||||
|
||||
fn move_dir_all_inner_fn<'a>(
|
||||
src: impl AsRef<Path>,
|
||||
srcrel: impl AsRef<Path>,
|
||||
mv: &'a Box<impl Fn(PathBuf) -> Option<&'a Path>>,
|
||||
) -> io::Result<()> {
|
||||
let mut files = Vec::new();
|
||||
for entry in fs::read_dir(&src)? {
|
||||
let entry = entry?;
|
||||
let ty = entry.file_type()?;
|
||||
if ty.is_dir() {
|
||||
move_dir_all_inner_fn(entry.path(), srcrel.as_ref(), mv)?;
|
||||
} else {
|
||||
let path: PathBuf = entry.path();
|
||||
let Ok(relpath) = path.strip_prefix(&srcrel) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if let Some(dst) = mv(relpath.to_path_buf()) {
|
||||
files.push((entry.path(), relpath.to_path_buf(), dst.to_owned()));
|
||||
}
|
||||
}
|
||||
}
|
||||
for (src, srcrel, dst) in files {
|
||||
let path = dst.join(&srcrel);
|
||||
fs::create_dir_all(&path.parent().unwrap())?;
|
||||
std::fs::rename(&src, &path)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn symlink(original: impl AsRef<Path>, link: impl AsRef<Path>) -> Result<()> {
|
||||
std::os::unix::fs::symlink(&original, &link)
|
||||
.map_err(wrap_io_err!(link.as_ref(), "Creating symlink"))
|
||||
}
|
||||
|
||||
fn modified_inner(path: &Path, metadata: fs::Metadata) -> Result<SystemTime> {
|
||||
metadata
|
||||
.modified()
|
||||
.map_err(wrap_io_err!(path, "Reading modified time"))
|
||||
}
|
||||
|
||||
pub fn modified(path: &Path) -> Result<SystemTime> {
|
||||
let metadata = fs::metadata(path).map_err(wrap_io_err!(path, "Reading metadata"))?;
|
||||
modified_inner(path, metadata)
|
||||
}
|
||||
|
||||
pub fn modified_all(
|
||||
path: &Vec<PathBuf>,
|
||||
func: fn(path: &Path) -> Result<SystemTime>,
|
||||
) -> Result<SystemTime> {
|
||||
let mut newest = SystemTime::UNIX_EPOCH;
|
||||
for entry_res in path {
|
||||
let modified = func(entry_res)?;
|
||||
if modified > newest {
|
||||
newest = modified;
|
||||
}
|
||||
}
|
||||
Ok(newest)
|
||||
}
|
||||
|
||||
pub fn modified_all_btree<'a>(
|
||||
path: impl Iterator<Item = &'a Path>,
|
||||
func: fn(path: &Path) -> Result<SystemTime>,
|
||||
) -> Result<SystemTime> {
|
||||
let mut newest = SystemTime::UNIX_EPOCH;
|
||||
for entry_res in path {
|
||||
let modified = func(entry_res)?;
|
||||
if modified > newest {
|
||||
newest = modified;
|
||||
}
|
||||
}
|
||||
Ok(newest)
|
||||
}
|
||||
|
||||
fn modified_dir_inner<F: FnMut(&DirEntry) -> bool>(dir: &Path, filter: F) -> Result<SystemTime> {
|
||||
let mut newest = modified(dir)?;
|
||||
for entry_res in WalkDir::new(dir).into_iter().filter_entry(filter) {
|
||||
let entry = entry_res?;
|
||||
let modified = modified_inner(entry.path(), entry.metadata()?)?;
|
||||
if modified > newest {
|
||||
newest = modified;
|
||||
}
|
||||
}
|
||||
Ok(newest)
|
||||
}
|
||||
|
||||
pub fn modified_dir(dir: &Path) -> Result<SystemTime> {
|
||||
modified_dir_inner(dir, |_| true)
|
||||
}
|
||||
|
||||
pub fn modified_dir_ignore_git(dir: &Path) -> Result<SystemTime> {
|
||||
modified_dir_inner(dir, |entry| {
|
||||
entry
|
||||
.file_name()
|
||||
.to_str()
|
||||
.map(|s| s != ".git")
|
||||
.unwrap_or(true)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn check_files_present(dir: &Path, expected_files: &BTreeSet<&str>) -> Result<bool> {
|
||||
let entries = fs::read_dir(dir).map_err(wrap_io_err!(dir, "Reading list files"))?;
|
||||
|
||||
let mut matches = 0;
|
||||
for entry_res in entries {
|
||||
let entry = entry_res.map_err(wrap_io_err!(dir, "Reading file entry"))?;
|
||||
|
||||
let filename = entry.file_name();
|
||||
let Some(filename) = filename.to_str() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if expected_files.contains(&filename) {
|
||||
matches += 1
|
||||
} else if filename.starts_with('.') {
|
||||
continue;
|
||||
} else {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(matches == expected_files.len())
|
||||
}
|
||||
|
||||
pub fn rename(src: &Path, dst: &Path) -> Result<()> {
|
||||
fs::rename(src, dst).map_err(wrap_io_err!(src, dst, "Renaming"))
|
||||
}
|
||||
|
||||
pub fn run_command(mut command: process::Command, stdout_pipe: &PtyOut) -> Result<()> {
|
||||
let status = spawn_to_pipe(&mut command, stdout_pipe)?
|
||||
.wait()
|
||||
.map_err(wrap_io_err!("waiting to exit"))?;
|
||||
|
||||
if !status.success() {
|
||||
return Err(Error::Command(command, status));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run_command_stdin(
|
||||
mut command: process::Command,
|
||||
stdin_data: &[u8],
|
||||
stdout_pipe: &PtyOut,
|
||||
) -> Result<()> {
|
||||
command.stdin(Stdio::piped());
|
||||
let mut child = spawn_to_pipe(&mut command, stdout_pipe)?;
|
||||
|
||||
if let Some(ref mut stdin) = child.stdin {
|
||||
stdin
|
||||
.write_all(stdin_data)
|
||||
.map_err(wrap_io_err!("Writing to stdin"))?;
|
||||
} else {
|
||||
bail_other_err!("stdin is not captured");
|
||||
}
|
||||
|
||||
let status = child.wait().map_err(wrap_io_err!("Spawning"))?;
|
||||
|
||||
if !status.success() {
|
||||
return Err(Error::Command(command, status));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn serialize_and_write<T: Serialize>(file_path: &Path, content: &T) -> Result<()> {
|
||||
let toml_content = toml::to_string(content).map_err(|err| {
|
||||
wrap_other_err!(
|
||||
"Failed to serialize content for {:?}: {}",
|
||||
file_path.display(),
|
||||
err
|
||||
)()
|
||||
})?;
|
||||
|
||||
fs::write(file_path, toml_content).map_err(wrap_io_err!(file_path, "Writing to file"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn offline_check_exists(path: &PathBuf) -> Result<()> {
|
||||
if !path.exists() {
|
||||
bail_other_err!(
|
||||
"{path:?} is not exist and unable to continue in offline mode",
|
||||
path = path.display(),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn download_wget(url: &str, dest: &PathBuf, logger: &PtyOut) -> Result<()> {
|
||||
if !dest.is_file() {
|
||||
let dest_tmp = PathBuf::from(format!("{}.tmp", dest.display()));
|
||||
let mut command = Command::new("wget");
|
||||
command.arg(translate_mirror(url));
|
||||
command.arg("--continue").arg("-O").arg(&dest_tmp);
|
||||
run_command(command, logger)?;
|
||||
rename(&dest_tmp, &dest)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn read_to_string(path: &Path) -> Result<String> {
|
||||
fs::read_to_string(path).map_err(wrap_io_err!(path, "Reading file"))
|
||||
}
|
||||
|
||||
/// get commit rev and return if it's detached or not
|
||||
pub fn get_git_head_rev(dir: &PathBuf) -> Result<(String, bool)> {
|
||||
let git_head = dir.join(".git/HEAD");
|
||||
let head_str = read_to_string(&git_head)?;
|
||||
if head_str.starts_with("ref: ") {
|
||||
let entry = head_str["ref: ".len()..].trim_end();
|
||||
let git_ref = dir.join(".git").join(entry);
|
||||
let ref_str = if git_ref.is_file() {
|
||||
read_to_string(&git_ref)?
|
||||
} else {
|
||||
get_git_ref_entry(dir, entry)?
|
||||
};
|
||||
Ok((ref_str.trim().to_string(), false))
|
||||
} else {
|
||||
Ok((head_str.trim().to_string(), true))
|
||||
}
|
||||
}
|
||||
|
||||
/// get commit from "rev" which either a full commit hash or a tag name
|
||||
pub fn get_git_tag_rev(dir: &PathBuf, tag: &str) -> Result<String> {
|
||||
if tag.len() == 40 && tag.chars().all(|f| f.is_ascii_hexdigit()) {
|
||||
return Ok(tag.to_string());
|
||||
}
|
||||
get_git_ref_entry(dir, &format!("refs/tags/{tag}"))
|
||||
}
|
||||
|
||||
pub fn get_git_ref_entry(dir: &PathBuf, entry: &str) -> Result<String> {
|
||||
// https://git-scm.com/book/en/v2/Git-Internals-Maintenance-and-Data-Recovery
|
||||
let git_refs = dir.join(".git/packed-refs");
|
||||
let refs_str = read_to_string(&git_refs)?;
|
||||
let mut lines = refs_str.lines();
|
||||
while let Some(line) = lines.next() {
|
||||
if line.contains(entry) {
|
||||
let mut sha = line
|
||||
.split_whitespace()
|
||||
.next()
|
||||
.ok_or_else(wrap_other_err!("Packed-refs line is malformed"))?;
|
||||
if let Some(next_line) = lines.next() {
|
||||
if next_line.starts_with('^') {
|
||||
sha = &next_line[1..];
|
||||
}
|
||||
}
|
||||
return Ok(sha.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Err(wrap_other_err!("Could not find a rev for {}", entry)())
|
||||
}
|
||||
|
||||
/// get commit rev after fetch
|
||||
pub fn get_git_fetch_rev(dir: &PathBuf, remote_url: &str, remote_branch: &str) -> Result<String> {
|
||||
let git_fetch_head = dir.join(".git/FETCH_HEAD");
|
||||
|
||||
let fetch_head_content = read_to_string(&git_fetch_head)?;
|
||||
|
||||
let expected_comment_part = format!("branch '{}' of {}", remote_branch, remote_url);
|
||||
|
||||
for line in fetch_head_content.lines() {
|
||||
if line.contains(&expected_comment_part) && !line.contains("not-for-merge") {
|
||||
let sha = line
|
||||
.split_whitespace()
|
||||
.next()
|
||||
.ok_or_else(wrap_other_err!("FETCH_HEAD line is malformed"))?;
|
||||
|
||||
return Ok(sha.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Err(wrap_other_err!(
|
||||
"Could not find a fetch target for tracking {}",
|
||||
expected_comment_part
|
||||
)())
|
||||
}
|
||||
|
||||
/// (local_branch_name, remote_branch, remote_name, remote_url)
|
||||
/// -> ("fix_stuff", "master", "origin", "https://gitlab.redox-os.org/willnode/redox")
|
||||
pub fn get_git_remote_tracking(dir: &PathBuf) -> Result<(String, String, String, String)> {
|
||||
let git_head = dir.join(".git/HEAD");
|
||||
let git_config = dir.join(".git/config");
|
||||
|
||||
let head_content = read_to_string(&git_head)?;
|
||||
|
||||
if !head_content.starts_with("ref: ") {
|
||||
let sha = head_content.trim_end().to_string();
|
||||
return Ok((sha, "".to_string(), "".to_string(), "".to_string()));
|
||||
}
|
||||
|
||||
let local_branch_path = head_content["ref: ".len()..].trim_end();
|
||||
let local_branch_name = get_git_branch_name(local_branch_path)?;
|
||||
|
||||
let config_content = read_to_string(&git_config)?;
|
||||
|
||||
let branch_section = format!("[branch \"{}\"]", local_branch_name);
|
||||
let mut remote_name: Option<String> = None;
|
||||
let mut remote_branch: Option<String> = None;
|
||||
let mut parsing_branch_section = false;
|
||||
|
||||
for line in config_content.lines().map(|l| l.trim()) {
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if line == branch_section {
|
||||
parsing_branch_section = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if parsing_branch_section {
|
||||
if line.starts_with('[') {
|
||||
break;
|
||||
}
|
||||
if line.starts_with("remote = ") {
|
||||
remote_name = Some(line["remote = ".len()..].trim().to_string());
|
||||
}
|
||||
if line.starts_with("merge = ") {
|
||||
remote_branch = Some(get_git_branch_name(line["merge = ".len()..].trim())?);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let remote_name_str = remote_name.ok_or_else(wrap_other_err!(
|
||||
"Branch {:?} is not tracking a remote",
|
||||
local_branch_name
|
||||
))?;
|
||||
let remote_branch_str = remote_branch.unwrap_or("".into());
|
||||
|
||||
let remote_section = format!("[remote \"{}\"]", remote_name_str);
|
||||
let mut remote_url: Option<String> = None;
|
||||
let mut parsing_remote_section = false;
|
||||
|
||||
for line in config_content.lines().map(|l| l.trim()) {
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if line == remote_section {
|
||||
parsing_remote_section = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if parsing_remote_section {
|
||||
if line.starts_with('[') {
|
||||
break;
|
||||
}
|
||||
if line.starts_with("url = ") {
|
||||
let mut url = line["url = ".len()..].trim();
|
||||
url = chop_dot_git(url);
|
||||
remote_url = Some(url.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let remote_url_str = remote_url.ok_or_else(wrap_other_err!(
|
||||
"Could not find URL for remote {:?} in .git/config.",
|
||||
remote_name_str
|
||||
))?;
|
||||
|
||||
Ok((
|
||||
local_branch_name,
|
||||
remote_branch_str,
|
||||
remote_name_str,
|
||||
remote_url_str,
|
||||
))
|
||||
}
|
||||
|
||||
pub(crate) fn chop_dot_git(url: &str) -> &str {
|
||||
if url.ends_with(".git") {
|
||||
return &url[..url.len() - ".git".len()];
|
||||
}
|
||||
url
|
||||
}
|
||||
|
||||
fn get_git_branch_name(local_branch_path: &str) -> Result<String> {
|
||||
// TODO: incorrectly handle branch with slashes
|
||||
Ok(local_branch_path
|
||||
.split('/')
|
||||
.last()
|
||||
.ok_or_else(wrap_other_err!(
|
||||
"Failed to parse branch name of {:?}",
|
||||
local_branch_path
|
||||
))?
|
||||
.to_string())
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
use std::{
|
||||
process::{Command, Stdio},
|
||||
sync::OnceLock,
|
||||
};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct IdentifierConfig {
|
||||
pub commit: String,
|
||||
pub time: String,
|
||||
}
|
||||
|
||||
impl IdentifierConfig {
|
||||
fn new() -> Self {
|
||||
let (commit, _) = crate::cook::fs::get_git_head_rev(
|
||||
&std::env::current_dir().expect("unable to get $PWD"),
|
||||
)
|
||||
.unwrap_or(("".into(), false));
|
||||
// better than importing heavy deps like chrono
|
||||
let time = String::from_utf8_lossy(
|
||||
&Command::new("date")
|
||||
.arg("-u")
|
||||
.arg("+%Y-%m-%dT%H:%M:%SZ")
|
||||
.stdout(Stdio::piped())
|
||||
.output()
|
||||
.expect("Failed to get current ISO-formatted time")
|
||||
.stdout
|
||||
.trim_ascii(),
|
||||
)
|
||||
.into();
|
||||
IdentifierConfig { commit, time }
|
||||
}
|
||||
}
|
||||
|
||||
static IDENTIFIER_CONFIG: OnceLock<IdentifierConfig> = OnceLock::new();
|
||||
|
||||
pub fn get_ident() -> &'static IdentifierConfig {
|
||||
IDENTIFIER_CONFIG
|
||||
.get()
|
||||
.expect("Identifier is not initialized")
|
||||
}
|
||||
|
||||
pub fn init_ident() {
|
||||
IDENTIFIER_CONFIG
|
||||
.set(IdentifierConfig::new())
|
||||
.expect("Identifier is initialized twice")
|
||||
}
|
||||
@@ -0,0 +1,310 @@
|
||||
use std::{
|
||||
collections::BTreeSet,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use pkg::{InstallState, Package, PackageName, PackagePrefix, PackageState};
|
||||
use pkgar::ext::PackageSrcExt;
|
||||
use pkgar_core::HeaderFlags;
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
config::CookConfig,
|
||||
cook::{cook_build::BuildResult, fetch, fs::*, pty::PtyOut},
|
||||
log_to_pty,
|
||||
recipe::{BuildKind, CookRecipe, OptionalPackageRecipe},
|
||||
};
|
||||
|
||||
pub fn package(
|
||||
recipe: &CookRecipe,
|
||||
build_result: &BuildResult,
|
||||
cook_config: &CookConfig,
|
||||
logger: &PtyOut,
|
||||
) -> Result<(), String> {
|
||||
let name = &recipe.name;
|
||||
let target_dir = &recipe.target_dir();
|
||||
let auto_deps = &build_result.auto_deps;
|
||||
if recipe.recipe.build.kind == BuildKind::None {
|
||||
// metapackages don't have stage dir and optional packages
|
||||
package_toml(
|
||||
target_dir.join("stage.toml"),
|
||||
recipe,
|
||||
None,
|
||||
None,
|
||||
recipe.recipe.package.dependencies.clone(),
|
||||
&auto_deps,
|
||||
)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let secret_path = "build/id_ed25519.toml";
|
||||
let public_path = "build/id_ed25519.pub.toml";
|
||||
if !Path::new(secret_path).is_file() || !Path::new(public_path).is_file() {
|
||||
if !Path::new("build").is_dir() {
|
||||
create_dir(Path::new("build"))?;
|
||||
}
|
||||
let (public_key, secret_key) = pkgar_keys::SecretKeyFile::new();
|
||||
public_key
|
||||
.save(public_path)
|
||||
.map_err(|err| format!("failed to save pkgar public key: {:?}", err))?;
|
||||
secret_key
|
||||
.save(secret_path)
|
||||
.map_err(|err| format!("failed to save pkgar secret key: {:?}", err))?;
|
||||
}
|
||||
|
||||
let packages = recipe.recipe.get_packages_list();
|
||||
|
||||
for package in packages {
|
||||
let (stage_dir, package_file, package_meta) = package_stage_paths(package, target_dir);
|
||||
// Rebuild package if stage is newer
|
||||
if package_file.is_file() && !build_result.cached {
|
||||
log_to_pty!(logger, "DEBUG: updating '{}'", package_file.display());
|
||||
remove_all(&package_file)?;
|
||||
if package_meta.is_file() {
|
||||
remove_all(&package_meta)?;
|
||||
}
|
||||
}
|
||||
|
||||
if !package_file.is_file() {
|
||||
pkgar::create_with_flags(
|
||||
secret_path,
|
||||
package_file.to_str().unwrap(),
|
||||
stage_dir.to_str().unwrap(),
|
||||
HeaderFlags::latest(
|
||||
pkgar_core::Architecture::Independent,
|
||||
match cook_config.compressed {
|
||||
true => pkgar_core::Packaging::LZMA2,
|
||||
false => pkgar_core::Packaging::Uncompressed,
|
||||
},
|
||||
),
|
||||
)
|
||||
.map_err(|err| format!("failed to create pkgar archive: {:?}", err))?;
|
||||
}
|
||||
|
||||
let deps = if package.is_some() {
|
||||
BTreeSet::from([name.with_prefix(PackagePrefix::Any)])
|
||||
} else {
|
||||
auto_deps.clone()
|
||||
};
|
||||
|
||||
if !package_meta.is_file() {
|
||||
let name = match package {
|
||||
Some(p) => PackageName::new(format!("{}.{}", name.name(), p.name))
|
||||
.map_err(|e| format!("{}", e))?,
|
||||
None => name.clone(),
|
||||
};
|
||||
let package_deps = match package {
|
||||
Some(p) => p
|
||||
.dependencies
|
||||
.iter()
|
||||
.map(|dep| {
|
||||
if dep.name().is_empty() {
|
||||
name.with_suffix(dep.suffix())
|
||||
} else {
|
||||
dep.clone()
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
None => recipe.recipe.package.dependencies.clone(),
|
||||
};
|
||||
package_toml(
|
||||
package_meta,
|
||||
recipe,
|
||||
Some((Path::new(public_path), &package_file)),
|
||||
package,
|
||||
package_deps,
|
||||
&deps,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn package_toml(
|
||||
toml_path: PathBuf,
|
||||
recipe: &CookRecipe,
|
||||
package_file: Option<(&Path, &PathBuf)>,
|
||||
package_suffix: Option<&OptionalPackageRecipe>,
|
||||
mut package_deps: Vec<PackageName>,
|
||||
auto_deps: &BTreeSet<PackageName>,
|
||||
) -> Result<(), String> {
|
||||
for dep in auto_deps.iter() {
|
||||
if !package_deps.contains(dep) {
|
||||
package_deps.push(dep.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let (hash, network_size, storage_size) = if let Some((pkey_path, archive_path)) = package_file {
|
||||
use pkgar_core::PackageSrc;
|
||||
let pkey = pkgar_keys::PublicKeyFile::open(pkey_path)
|
||||
.map_err(|e| format!("Unable to read public key: {e:?}"))?
|
||||
.pkey;
|
||||
let mut package = pkgar::PackageFile::new(archive_path, &pkey).map_err(|e| {
|
||||
format!(
|
||||
"Unable to read packaged pkgar file {}: {e:?}",
|
||||
archive_path.display(),
|
||||
)
|
||||
})?;
|
||||
let mt = std::fs::metadata(archive_path).map_err(|e| {
|
||||
format!(
|
||||
"Unable to read packaged pkgar file {}: {e:?}",
|
||||
archive_path.display(),
|
||||
)
|
||||
})?;
|
||||
let package_size = mt.len();
|
||||
let header = package.header();
|
||||
let storage_size = match header.flags.packaging() {
|
||||
pkgar_core::Packaging::LZMA2 => {
|
||||
let mut size = header
|
||||
.total_size()
|
||||
.map_err(|e| Error::Pkgar(pkgar::Error::Core(e)))?
|
||||
as u64;
|
||||
let entries = package
|
||||
.read_entries()
|
||||
.map_err(|e| format!("Unable to get lzma entry: {e}"))?;
|
||||
for entry in entries {
|
||||
let data_reader = package
|
||||
.data_reader(&entry)
|
||||
.map_err(|e| format!("Unable to read lzma entry: {e}"))?;
|
||||
size += data_reader.unpacked_size;
|
||||
package
|
||||
.restore_reader(data_reader.into_inner())
|
||||
.map_err(|e| format!("Unable to put lzma entry: {e}"))?;
|
||||
}
|
||||
size
|
||||
}
|
||||
_ => package_size,
|
||||
};
|
||||
|
||||
(
|
||||
blake3::Hash::from_bytes(package.header().blake3)
|
||||
.to_hex()
|
||||
.to_string(),
|
||||
package_size,
|
||||
storage_size,
|
||||
)
|
||||
} else {
|
||||
("".into(), 0, 0)
|
||||
};
|
||||
|
||||
let ident_source = fetch::fetch_get_source_info(recipe)?;
|
||||
|
||||
let package = Package {
|
||||
name: PackageName::new(get_package_name(
|
||||
recipe.name.without_prefix(),
|
||||
package_suffix,
|
||||
))
|
||||
.unwrap(),
|
||||
version: recipe.guess_version().unwrap_or("TODO".into()),
|
||||
target: recipe.target.to_string(),
|
||||
blake3: hash,
|
||||
network_size,
|
||||
storage_size,
|
||||
depends: package_deps,
|
||||
commit_identifier: ident_source.commit_identifier,
|
||||
source_identifier: ident_source.source_identifier,
|
||||
time_identifier: ident_source.time_identifier,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
serialize_and_write(&toml_path, &package)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
pub fn package_target(name: &PackageName) -> &'static str {
|
||||
if name.is_host() {
|
||||
redoxer::host_target()
|
||||
} else {
|
||||
redoxer::target()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn package_stage_paths(
|
||||
package: Option<&OptionalPackageRecipe>,
|
||||
target_dir: &Path,
|
||||
) -> (PathBuf, PathBuf, PathBuf) {
|
||||
let mut target_dir = target_dir.to_path_buf();
|
||||
if let Some(cross_target) = crate::cross_target() {
|
||||
// TODO: automatically pass COOKBOOK_CROSS_GNU_TARGET?
|
||||
target_dir = target_dir.join(cross_target)
|
||||
}
|
||||
package_name_paths(package, &target_dir, "stage")
|
||||
}
|
||||
|
||||
pub fn package_source_paths(
|
||||
package: Option<&OptionalPackageRecipe>,
|
||||
target_dir: &Path,
|
||||
) -> (PathBuf, PathBuf, PathBuf) {
|
||||
package_name_paths(package, target_dir, "source")
|
||||
}
|
||||
|
||||
fn package_name_paths(
|
||||
package: Option<&OptionalPackageRecipe>,
|
||||
target_dir: &Path,
|
||||
name: &str,
|
||||
) -> (PathBuf, PathBuf, PathBuf) {
|
||||
let prefix_name = get_package_name(name, package);
|
||||
let package_stage = target_dir.join(&prefix_name);
|
||||
let package_file = package_stage.with_added_extension("pkgar");
|
||||
let package_meta = package_stage.with_added_extension("toml");
|
||||
(package_stage, package_file, package_meta)
|
||||
}
|
||||
|
||||
pub fn get_package_name(name: &str, package: Option<&OptionalPackageRecipe>) -> String {
|
||||
get_package_name_inner(name, package.map(|p| p.name.as_str()))
|
||||
}
|
||||
|
||||
fn get_package_name_inner(name: &str, package: Option<&str>) -> String {
|
||||
let mut prefix_name = name.to_string();
|
||||
if let Some(package) = package {
|
||||
prefix_name.push('.');
|
||||
prefix_name.push_str(package);
|
||||
}
|
||||
prefix_name
|
||||
}
|
||||
|
||||
pub fn package_handle_push(
|
||||
state: &mut PackageState,
|
||||
archive_path: &Path,
|
||||
sysroot_dir: &Path,
|
||||
reinstall: bool,
|
||||
) -> crate::Result<bool> {
|
||||
let archive_toml = archive_path.with_extension("toml");
|
||||
let pkey_path = "build/id_ed25519.pub.toml";
|
||||
let pkg_toml = Package::from_file(&archive_toml)?;
|
||||
match state.installed.get(&pkg_toml.name) {
|
||||
Some(s) if !reinstall && pkg_toml.blake3 == s.blake3 => Ok(true),
|
||||
Some(s) => {
|
||||
// "local" is what remote name from installer is hardcoded into
|
||||
let remote_name = "local".to_string();
|
||||
|
||||
let install_state =
|
||||
InstallState::from_package(&pkg_toml, remote_name, s.manual, s.dependents.clone());
|
||||
|
||||
// TODO: use pkgar::replace unless forced reinstall
|
||||
pkgar::extract(pkey_path, &archive_path, sysroot_dir)?;
|
||||
|
||||
state.installed.insert(pkg_toml.name.clone(), install_state);
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
None => {
|
||||
// "local" is what remote name from installer is hardcoded into
|
||||
let remote_name = "local".to_string();
|
||||
|
||||
// TODO: Handle manual & depedents
|
||||
let install_state =
|
||||
InstallState::from_package(&pkg_toml, remote_name, true, BTreeSet::new());
|
||||
|
||||
pkgar::extract(pkey_path, &archive_path, sysroot_dir)?;
|
||||
|
||||
// TODO: Inject dependencies
|
||||
// TODO: Check if we need to inject remote key
|
||||
|
||||
state.installed.insert(pkg_toml.name.clone(), install_state);
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
+348
@@ -0,0 +1,348 @@
|
||||
use libc::{self, winsize};
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Write};
|
||||
use std::os::fd::FromRawFd;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::os::unix::process::CommandExt;
|
||||
use std::process::Child;
|
||||
use std::time::Duration;
|
||||
use std::{io, mem, ptr};
|
||||
use std::{
|
||||
io::{PipeReader, PipeWriter},
|
||||
process::Command,
|
||||
};
|
||||
|
||||
pub use std::os::unix::io::RawFd;
|
||||
|
||||
use crate::{Error, Result, wrap_io_err};
|
||||
|
||||
macro_rules! log_to_pty {
|
||||
($logger:expr, $($arg:tt)+) => {
|
||||
if $logger.is_some() {
|
||||
use std::io::Write;
|
||||
let mut logfd = $logger.as_ref().unwrap().1.try_clone().unwrap();
|
||||
let _ = logfd.write(format!($($arg)+).as_bytes());
|
||||
let _ = logfd.write(&[b'\n']);
|
||||
} else {
|
||||
eprintln!($($arg)+);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use log_to_pty;
|
||||
|
||||
pub type PtyOut<'a> = Option<(&'a mut UnixSlavePty, &'a mut PipeWriter)>;
|
||||
|
||||
pub fn setup_pty() -> (
|
||||
Box<dyn Read + Send>,
|
||||
PipeReader,
|
||||
(UnixSlavePty, std::io::PipeWriter),
|
||||
) {
|
||||
let pty_system = UnixPtySystem::default();
|
||||
let pair = pty_system
|
||||
.openpty(PtySize {
|
||||
rows: 24, // Standard terminal size
|
||||
cols: 80, // Standard terminal size
|
||||
..Default::default()
|
||||
})
|
||||
.expect("Unable to open pty");
|
||||
|
||||
// TODO: There's no way to handle stdin
|
||||
let pty_reader = pair
|
||||
.master
|
||||
.try_clone_reader()
|
||||
.expect("Unable to clone pty reader");
|
||||
|
||||
let (log_reader, log_writer) = std::io::pipe().expect("Failed to create log pipe");
|
||||
let pipes = (pair.slave, log_writer);
|
||||
(pty_reader, log_reader, pipes)
|
||||
}
|
||||
|
||||
pub fn flush_pty(logger: &mut PtyOut) {
|
||||
let Some((pty, file)) = logger else {
|
||||
return;
|
||||
};
|
||||
// Not sure if flush actually working
|
||||
let _ = pty.flush();
|
||||
std::thread::sleep(Duration::from_millis(10));
|
||||
let _ = file.flush();
|
||||
}
|
||||
|
||||
pub fn spawn_to_pipe(command: &mut Command, stdout_pipe: &PtyOut) -> Result<Child> {
|
||||
match stdout_pipe {
|
||||
Some(stdout) => stdout.0.spawn_command(command.into()),
|
||||
None => Ok(command.spawn().map_err(wrap_io_err!("Spawning"))?),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_to_pty(pty: &PtyOut, text: &str) {
|
||||
log_to_pty!(pty, "{}", text);
|
||||
}
|
||||
|
||||
//
|
||||
// based on portable-pty crate
|
||||
// copied here since it isn't flexible enough
|
||||
//
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct UnixPtySystem {}
|
||||
|
||||
/// Represents the size of the visible display area in the pty
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct PtySize {
|
||||
/// The number of lines of text
|
||||
pub rows: u16,
|
||||
/// The number of columns of text
|
||||
pub cols: u16,
|
||||
/// The width of a cell in pixels. Note that some systems never
|
||||
/// fill this value and ignore it.
|
||||
pub pixel_width: u16,
|
||||
/// The height of a cell in pixels. Note that some systems never
|
||||
/// fill this value and ignore it.
|
||||
pub pixel_height: u16,
|
||||
}
|
||||
|
||||
impl Default for PtySize {
|
||||
fn default() -> Self {
|
||||
PtySize {
|
||||
rows: 24,
|
||||
cols: 80,
|
||||
pixel_width: 0,
|
||||
pixel_height: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn openpty(size: PtySize) -> Result<(UnixMasterPty, UnixSlavePty)> {
|
||||
let mut master: RawFd = -1;
|
||||
let mut slave: RawFd = -1;
|
||||
|
||||
let mut size = winsize {
|
||||
ws_row: size.rows,
|
||||
ws_col: size.cols,
|
||||
ws_xpixel: size.pixel_width,
|
||||
ws_ypixel: size.pixel_height,
|
||||
};
|
||||
|
||||
let result = unsafe {
|
||||
// BSDish systems may require mut pointers to some args
|
||||
#[allow(clippy::unnecessary_mut_passed)]
|
||||
libc::openpty(
|
||||
&mut master,
|
||||
&mut slave,
|
||||
ptr::null_mut(),
|
||||
ptr::null_mut(),
|
||||
&mut size,
|
||||
)
|
||||
};
|
||||
|
||||
if result != 0 {
|
||||
return Err(Error::from_last_io_error("Opening openpty"));
|
||||
}
|
||||
|
||||
let master = UnixMasterPty {
|
||||
fd: PtyFd(unsafe { File::from_raw_fd(master) }),
|
||||
};
|
||||
let slave = UnixSlavePty {
|
||||
fd: PtyFd(unsafe { File::from_raw_fd(slave) }),
|
||||
};
|
||||
|
||||
// Ensure that these descriptors will get closed when we execute
|
||||
// the child process. This is done after constructing the Pty
|
||||
// instances so that we ensure that the Ptys get drop()'d if
|
||||
// the cloexec() functions fail (unlikely!).
|
||||
cloexec(master.fd.as_raw_fd())?;
|
||||
cloexec(slave.fd.as_raw_fd())?;
|
||||
|
||||
Ok((master, slave))
|
||||
}
|
||||
|
||||
pub struct PtyPair {
|
||||
// slave is listed first so that it is dropped first.
|
||||
// The drop order is stable and specified by rust rfc 1857
|
||||
pub slave: UnixSlavePty,
|
||||
pub master: UnixMasterPty,
|
||||
}
|
||||
|
||||
impl UnixPtySystem {
|
||||
fn openpty(&self, size: PtySize) -> Result<PtyPair> {
|
||||
let (master, slave) = openpty(size)?;
|
||||
Ok(PtyPair {
|
||||
master: master,
|
||||
slave: slave,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct PtyFd(pub File);
|
||||
impl std::ops::Deref for PtyFd {
|
||||
type Target = File;
|
||||
fn deref(&self) -> &File {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for PtyFd {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
match self.0.read(buf) {
|
||||
Err(ref e) if e.raw_os_error() == Some(libc::EIO) => {
|
||||
// EIO indicates that the slave pty has been closed.
|
||||
// Treat this as EOF so that std::io::Read::read_to_string
|
||||
// and similar functions gracefully terminate when they
|
||||
// encounter this condition
|
||||
Ok(0)
|
||||
}
|
||||
x => x,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PtyFd {
|
||||
fn resize(&self, size: PtySize) -> Result<()> {
|
||||
let ws_size = winsize {
|
||||
ws_row: size.rows,
|
||||
ws_col: size.cols,
|
||||
ws_xpixel: size.pixel_width,
|
||||
ws_ypixel: size.pixel_height,
|
||||
};
|
||||
|
||||
if unsafe {
|
||||
libc::ioctl(
|
||||
self.0.as_raw_fd(),
|
||||
libc::TIOCSWINSZ as _,
|
||||
&ws_size as *const _,
|
||||
)
|
||||
} != 0
|
||||
{
|
||||
return Err(Error::from_last_io_error("ioctl resize (TIOCSWINSZ)"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_size(&self) -> Result<PtySize> {
|
||||
let mut size: winsize = unsafe { mem::zeroed() };
|
||||
if unsafe {
|
||||
libc::ioctl(
|
||||
self.0.as_raw_fd(),
|
||||
libc::TIOCGWINSZ as _,
|
||||
&mut size as *mut _,
|
||||
)
|
||||
} != 0
|
||||
{
|
||||
return Err(Error::from_last_io_error("ioctl get size (TIOCGWINSZ)"));
|
||||
}
|
||||
Ok(PtySize {
|
||||
rows: size.ws_row,
|
||||
cols: size.ws_col,
|
||||
pixel_width: size.ws_xpixel,
|
||||
pixel_height: size.ws_ypixel,
|
||||
})
|
||||
}
|
||||
|
||||
fn spawn_command(&self, cmd: &mut Command) -> Result<std::process::Child> {
|
||||
unsafe {
|
||||
cmd
|
||||
// .stdin(self.as_stdio()?)
|
||||
.stdout(self.try_clone().map_err(wrap_io_err!("Cloning pty"))?)
|
||||
.stderr(self.try_clone().map_err(wrap_io_err!("Cloning pty"))?)
|
||||
.pre_exec(move || {
|
||||
// Clean up a few things before we exec the program
|
||||
// Clear out any potentially problematic signal
|
||||
// dispositions that we might have inherited
|
||||
for signo in &[
|
||||
libc::SIGCHLD,
|
||||
libc::SIGHUP,
|
||||
libc::SIGINT,
|
||||
libc::SIGQUIT,
|
||||
libc::SIGTERM,
|
||||
libc::SIGALRM,
|
||||
] {
|
||||
libc::signal(*signo, libc::SIG_DFL);
|
||||
}
|
||||
|
||||
let empty_set: libc::sigset_t = std::mem::zeroed();
|
||||
libc::sigprocmask(libc::SIG_SETMASK, &empty_set, std::ptr::null_mut());
|
||||
|
||||
// Establish ourselves as a session leader.
|
||||
if libc::setsid() == -1 {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
};
|
||||
|
||||
let mut child = cmd.spawn().map_err(wrap_io_err!("Spawning cmd"))?;
|
||||
|
||||
// Ensure that we close out the slave fds that Child retains;
|
||||
// they are not what we need (we need the master side to reference
|
||||
// them) and won't work in the usual way anyway.
|
||||
// In practice these are None, but it seems best to be move them
|
||||
// out in case the behavior of Command changes in the future.
|
||||
// child.stdin.take();
|
||||
child.stdout.take();
|
||||
child.stderr.take();
|
||||
|
||||
Ok(child)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<()> {
|
||||
self.0.flush().map_err(wrap_io_err!("Flushing pty"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the master end of a pty.
|
||||
/// The file descriptor will be closed when the Pty is dropped.
|
||||
pub struct UnixMasterPty {
|
||||
fd: PtyFd,
|
||||
}
|
||||
|
||||
/// Represents the slave end of a pty.
|
||||
/// The file descriptor will be closed when the Pty is dropped.
|
||||
pub struct UnixSlavePty {
|
||||
fd: PtyFd,
|
||||
}
|
||||
|
||||
/// Helper function to set the close-on-exec flag for a raw descriptor
|
||||
fn cloexec(fd: RawFd) -> Result<()> {
|
||||
let flags = unsafe { libc::fcntl(fd, libc::F_GETFD) };
|
||||
if flags == -1 {
|
||||
return Err(Error::from_last_io_error("fcntl to read flags"));
|
||||
}
|
||||
let result = unsafe { libc::fcntl(fd, libc::F_SETFD, flags | libc::FD_CLOEXEC) };
|
||||
if result == -1 {
|
||||
return Err(Error::from_last_io_error("fcntl to set CLOEXEC"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl UnixSlavePty {
|
||||
fn spawn_command(&self, builder: &mut Command) -> Result<std::process::Child> {
|
||||
Ok(self.fd.spawn_command(builder)?)
|
||||
}
|
||||
fn flush(&mut self) -> Result<()> {
|
||||
self.fd.flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl UnixMasterPty {
|
||||
#[allow(unused)]
|
||||
fn resize(&self, size: PtySize) -> Result<()> {
|
||||
self.fd.resize(size)
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
fn get_size(&self) -> Result<PtySize> {
|
||||
self.fd.get_size()
|
||||
}
|
||||
|
||||
fn try_clone_reader(&self) -> Result<Box<dyn Read + Send>> {
|
||||
let fd = PtyFd(
|
||||
self.fd
|
||||
.try_clone()
|
||||
.map_err(wrap_io_err!("Cloning pty fd"))?,
|
||||
);
|
||||
Ok(Box::new(fd))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,417 @@
|
||||
// Scripts here is executed using "cookbook_redoxer env" where CC, RUSTFLAGS, etc. defined.
|
||||
// Look up redoxer env script if you want to see how they work.
|
||||
|
||||
pub(crate) static SHARED_PRESCRIPT: &str = r#"
|
||||
# Build dynamically
|
||||
function DYNAMIC_INIT {
|
||||
case "${TARGET}" in
|
||||
"i586-unknown-redox" | "riscv64gc-unknown-redox")
|
||||
[ -z "${COOKBOOK_VERBOSE}" ] || echo "WARN: ${TARGET} does not support dynamic linking." >&2
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
[ -z "${COOKBOOK_VERBOSE}" ] || echo "DEBUG: Program is being compiled dynamically."
|
||||
|
||||
COOKBOOK_CONFIGURE_FLAGS=(
|
||||
--host="${GNU_TARGET}"
|
||||
--prefix="/usr"
|
||||
--enable-shared
|
||||
--disable-static
|
||||
)
|
||||
|
||||
COOKBOOK_CMAKE_FLAGS=(
|
||||
-DBUILD_SHARED_LIBS=True
|
||||
-DENABLE_SHARED=True
|
||||
-DENABLE_STATIC=False
|
||||
)
|
||||
|
||||
COOKBOOK_MESON_FLAGS=(
|
||||
--buildtype release
|
||||
--wrap-mode nofallback
|
||||
-Ddefault_library=shared
|
||||
-Dprefix=/usr
|
||||
)
|
||||
|
||||
# TODO: check paths for spaces
|
||||
export LDFLAGS="${USER_LDFLAGS}-Wl,-rpath-link,${COOKBOOK_SYSROOT}/lib -L${COOKBOOK_SYSROOT}/lib"
|
||||
export RUSTFLAGS="-C target-feature=-crt-static -L native=${COOKBOOK_SYSROOT}/lib -C link-arg=-Wl,-rpath-link,${COOKBOOK_SYSROOT}/lib"
|
||||
export COOKBOOK_DYNAMIC=1
|
||||
|
||||
if [ function = $(type -t reexport_flags) ]; then
|
||||
reexport_flags
|
||||
fi
|
||||
}
|
||||
|
||||
COOKBOOK_AUTORECONF="autoreconf"
|
||||
autotools_recursive_regenerate() {
|
||||
for f in $(find . -name configure.ac -o -name configure.in -type f | sort); do
|
||||
echo "* autotools regen in '$(dirname $f)'..."
|
||||
( cd "$(dirname "$f")" && "${COOKBOOK_AUTORECONF}" -fvi "$@" -I${COOKBOOK_HOST_SYSROOT}/share/aclocal )
|
||||
done
|
||||
}
|
||||
|
||||
# Build both dynamically and statically
|
||||
function DYNAMIC_STATIC_INIT {
|
||||
DYNAMIC_INIT
|
||||
if [ "${COOKBOOK_DYNAMIC}" == "1" ]
|
||||
then
|
||||
COOKBOOK_CONFIGURE_FLAGS=(
|
||||
--host="${GNU_TARGET}"
|
||||
--prefix="/usr"
|
||||
--enable-shared
|
||||
--enable-static
|
||||
)
|
||||
|
||||
COOKBOOK_CMAKE_FLAGS=(
|
||||
-DBUILD_SHARED_LIBS=True
|
||||
-DENABLE_SHARED=True
|
||||
-DENABLE_STATIC=True
|
||||
)
|
||||
|
||||
COOKBOOK_MESON_FLAGS=(
|
||||
--buildtype release
|
||||
--wrap-mode nofallback
|
||||
-Ddefault_library=both
|
||||
-Dprefix=/usr
|
||||
)
|
||||
fi
|
||||
}
|
||||
|
||||
function GNU_CONFIG_GET {
|
||||
wget -O "$1" "https://gitlab.redox-os.org/redox-os/gnu-config/-/raw/master/config.sub?inline=false"
|
||||
}
|
||||
"#;
|
||||
|
||||
pub(crate) static BUILD_PRESCRIPT: &str = r#"
|
||||
# Add cookbook bins to path
|
||||
export PATH="${COOKBOOK_ROOT}/bin:${PATH}"
|
||||
|
||||
# Add toolchain dir to path if exists
|
||||
if [ ! -z "${COOKBOOK_TOOLCHAIN}" ]
|
||||
then
|
||||
export PATH="${COOKBOOK_TOOLCHAIN}/bin:${PATH}"
|
||||
export LD_LIBRARY_PATH="${COOKBOOK_TOOLCHAIN}/lib:${LD_LIBRARY_PATH}"
|
||||
fi
|
||||
|
||||
# This puts cargo build artifacts in the build directory
|
||||
export CARGO_TARGET_DIR="${COOKBOOK_BUILD}/target"
|
||||
|
||||
# This adds the sysroot includes for most C compilation
|
||||
#TODO: check paths for spaces!
|
||||
export CPPFLAGS="${CPPFLAGS:+$CPPFLAGS }-I${COOKBOOK_SYSROOT}/include"
|
||||
|
||||
# This adds the sysroot libraries and compiles binaries statically for most C compilation
|
||||
#TODO: check paths for spaces!
|
||||
USER_LDFLAGS="${LDFLAGS:+$LDFLAGS }"
|
||||
export LDFLAGS="${USER_LDFLAGS}-L${COOKBOOK_SYSROOT}/lib --static"
|
||||
|
||||
# This reexport C variables into custom build script that can be consumed by cc crate
|
||||
function reexport_flags {
|
||||
target=${TARGET//-/_}
|
||||
export CFLAGS_${target}="${CFLAGS:+$CFLAGS }${CPPFLAGS}"
|
||||
export CXXFLAGS_${target}="${CXXFLAGS:+$CXXFLAGS }${CPPFLAGS}"
|
||||
export LDFLAGS_${target}="${LDFLAGS}"
|
||||
}
|
||||
|
||||
# These ensure that pkg-config gets the right flags from the sysroot
|
||||
if [ "${TARGET}" != "${COOKBOOK_HOST_TARGET}" ]
|
||||
then
|
||||
export PKG_CONFIG_ALLOW_CROSS=1
|
||||
export PKG_CONFIG_PATH=
|
||||
export PKG_CONFIG_LIBDIR="${COOKBOOK_SYSROOT}/lib/pkgconfig"
|
||||
export PKG_CONFIG_SYSROOT_DIR="${COOKBOOK_SYSROOT}"
|
||||
fi
|
||||
|
||||
# To build the debug version of a Cargo program, add COOKBOOK_DEBUG=true, and
|
||||
# to not strip symbols from the final package, add COOKBOOK_NOSTRIP=true to the recipe
|
||||
# (or to your environment) before calling cookbook_cargo or cookbook_cargo_packages
|
||||
build_type=release
|
||||
install_flags=--no-track
|
||||
build_flags=--release
|
||||
if [ ! -z "${COOKBOOK_DEBUG}" ]
|
||||
then
|
||||
install_flags+=" --debug"
|
||||
build_flags=
|
||||
build_type=debug
|
||||
export CPPFLAGS="${CPPFLAGS} -g"
|
||||
fi
|
||||
|
||||
if [ ! -z "${COOKBOOK_OFFLINE}" ]
|
||||
then
|
||||
build_flags+=" --offline"
|
||||
install_flags+=" --offline"
|
||||
fi
|
||||
|
||||
reexport_flags
|
||||
|
||||
COOKBOOK_CARGO="${COOKBOOK_REDOXER}"
|
||||
COOKBOOK_CARGO_FLAGS=(
|
||||
--locked
|
||||
)
|
||||
# cargo template using cargo install
|
||||
function cookbook_cargo {
|
||||
"${COOKBOOK_CARGO}" install \
|
||||
--path "${COOKBOOK_SOURCE}${COOKBOOK_CARGO_PATH:+/$COOKBOOK_CARGO_PATH}" \
|
||||
--root "${COOKBOOK_STAGE}/usr" \
|
||||
-j "${COOKBOOK_MAKE_JOBS}" ${install_flags} \
|
||||
${COOKBOOK_CARGO_FLAGS[@]} "$@"
|
||||
}
|
||||
|
||||
# cargo template using cargo build (prefixed name)
|
||||
function cookbook_cargo_build {
|
||||
recipe="${recipe:-$(basename "${COOKBOOK_RECIPE}")}"
|
||||
bin_dir="${bin_dir:-.}"
|
||||
bin_flags="${bin_flags:-}"
|
||||
bin_name="${bin_name:-$(basename "${COOKBOOK_CARGO_PATH}")}"
|
||||
bin_final_name="${bin_final_name:-${recipe}_${bin_name//_/-}}"
|
||||
mkdir -pv "${COOKBOOK_STAGE}/usr/bin"
|
||||
"${COOKBOOK_CARGO}" build \
|
||||
--manifest-path "${COOKBOOK_SOURCE}${COOKBOOK_CARGO_PATH:+/$COOKBOOK_CARGO_PATH}/Cargo.toml" \
|
||||
${bin_flags} ${build_flags} -j "${COOKBOOK_MAKE_JOBS}" ${COOKBOOK_CARGO_FLAGS[@]}
|
||||
cp -v \
|
||||
"target/${TARGET}/${build_type}/${bin_dir}/${bin_name}" \
|
||||
"${COOKBOOK_STAGE}/usr/bin/${bin_final_name}"
|
||||
unset bin_name bin_flags bin_dir bin_final_name
|
||||
}
|
||||
|
||||
# helper for installing binaries that are cargo examples
|
||||
function cookbook_cargo_examples {
|
||||
recipe="$(basename "${COOKBOOK_RECIPE}")"
|
||||
for example in "$@"
|
||||
do
|
||||
bin_dir="examples" bin_name="${example}" bin_flags="--example ${example}" cookbook_cargo_build
|
||||
done
|
||||
}
|
||||
|
||||
# helper for installing binaries that are cargo packages
|
||||
function cookbook_cargo_packages {
|
||||
recipe="$(basename "${COOKBOOK_RECIPE}")"
|
||||
mkdir -pv "${COOKBOOK_STAGE}/usr/bin"
|
||||
for package in "$@"
|
||||
do
|
||||
bin_name="${package}" bin_flags="--package ${package}" bin_final_name="${package//_/-}" cookbook_cargo_build
|
||||
done
|
||||
}
|
||||
|
||||
# configure template
|
||||
COOKBOOK_CONFIGURE="${COOKBOOK_SOURCE}/configure"
|
||||
COOKBOOK_CONFIGURE_FLAGS=(
|
||||
--host="${GNU_TARGET}"
|
||||
--prefix="/usr"
|
||||
--disable-shared
|
||||
--enable-static
|
||||
)
|
||||
COOKBOOK_MAKE="make"
|
||||
|
||||
function cookbook_configure {
|
||||
"${COOKBOOK_CONFIGURE}" "${COOKBOOK_CONFIGURE_FLAGS[@]}" "$@"
|
||||
"${COOKBOOK_MAKE}" -j "${COOKBOOK_MAKE_JOBS}"
|
||||
"${COOKBOOK_MAKE}" install DESTDIR="${COOKBOOK_STAGE}"
|
||||
}
|
||||
|
||||
COOKBOOK_CMAKE="cmake"
|
||||
COOKBOOK_NINJA="ninja"
|
||||
COOKBOOK_CMAKE_FLAGS=(
|
||||
-DBUILD_SHARED_LIBS=False
|
||||
-DENABLE_SHARED=False
|
||||
-DENABLE_STATIC=True
|
||||
)
|
||||
|
||||
function generate_cookbook_cmake_file {
|
||||
target=$1
|
||||
gcc_prefix=$2
|
||||
sysroot=$3
|
||||
file=$4
|
||||
arch=$(echo "$target" | cut -d - -f1)
|
||||
os=$(echo "$target" | cut -d - -f3)
|
||||
|
||||
if [ "$os" = "linux" ]; then
|
||||
SYSTEM_NAME="Linux"
|
||||
else
|
||||
SYSTEM_NAME="UnixPaths"
|
||||
fi
|
||||
|
||||
cat > $file <<EOF
|
||||
set(CMAKE_AR ${gcc_prefix}ar)
|
||||
set(CMAKE_CXX_COMPILER ${gcc_prefix}g++)
|
||||
set(CMAKE_C_COMPILER ${gcc_prefix}gcc)
|
||||
set(CMAKE_FIND_ROOT_PATH ${sysroot})
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_PLATFORM_USES_PATH_WHEN_NO_SONAME 1)
|
||||
set(CMAKE_PREFIX_PATH, ${sysroot})
|
||||
set(CMAKE_RANLIB ${gcc_prefix}ranlib)
|
||||
set(CMAKE_SHARED_LIBRARY_SONAME_C_FLAG "-Wl,-soname,")
|
||||
set(CMAKE_SYSTEM_NAME ${SYSTEM_NAME})
|
||||
set(CMAKE_SYSTEM_PROCESSOR ${arch})
|
||||
EOF
|
||||
|
||||
if [ "$target" = "$TARGET" ]
|
||||
then
|
||||
echo "set(CMAKE_C_FLAGS \"${CFLAGS} ${CPPFLAGS}\")" >> $file
|
||||
echo "set(CMAKE_CXX_FLAGS \"${CFLAGS} ${CPPFLAGS}\")" >> $file
|
||||
fi
|
||||
|
||||
if [ -n "${CC_WRAPPER}" ]
|
||||
then
|
||||
echo "set(CMAKE_C_COMPILER_LAUNCHER ${CC_WRAPPER})" >> $file
|
||||
echo "set(CMAKE_CXX_COMPILER_LAUNCHER ${CC_WRAPPER})" >> $file
|
||||
fi
|
||||
}
|
||||
|
||||
function cookbook_cmake {
|
||||
|
||||
if [ "$TARGET" = "$COOKBOOK_HOST_TARGET" ]; then
|
||||
GCC_PREFIX=
|
||||
else
|
||||
GCC_PREFIX=$GNU_TARGET-
|
||||
fi
|
||||
generate_cookbook_cmake_file "$TARGET" "$GCC_PREFIX" "$COOKBOOK_SYSROOT" cross_file.cmake
|
||||
|
||||
"${COOKBOOK_CMAKE}" "${COOKBOOK_SOURCE}" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CROSSCOMPILING=True \
|
||||
-DCMAKE_INSTALL_INCLUDEDIR=include \
|
||||
-DCMAKE_INSTALL_LIBDIR=lib \
|
||||
-DCMAKE_INSTALL_OLDINCLUDEDIR=/include \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr \
|
||||
-DCMAKE_INSTALL_SBINDIR=bin \
|
||||
-DCMAKE_TOOLCHAIN_FILE=cross_file.cmake \
|
||||
-GNinja \
|
||||
-Wno-dev \
|
||||
"${COOKBOOK_CMAKE_FLAGS[@]}" \
|
||||
"$@"
|
||||
|
||||
"${COOKBOOK_NINJA}" -j"${COOKBOOK_MAKE_JOBS}"
|
||||
DESTDIR="${COOKBOOK_STAGE}" "${COOKBOOK_NINJA}" install -j"${COOKBOOK_MAKE_JOBS}"
|
||||
}
|
||||
|
||||
COOKBOOK_MESON="meson"
|
||||
COOKBOOK_MESON_FLAGS=(
|
||||
--buildtype release
|
||||
--wrap-mode nofallback
|
||||
-Ddefault_library=static
|
||||
-Dprefix=/usr
|
||||
)
|
||||
function cookbook_meson {
|
||||
# TODO: do this in rust, to handle path spaces as well
|
||||
function format_flags {
|
||||
local flags=($1)
|
||||
local formatted=""
|
||||
for i in "${!flags[@]}"; do
|
||||
formatted+="'${flags[$i]}'"
|
||||
if [ $i -lt $((${#flags[@]} - 1)) ]; then
|
||||
formatted+=", "
|
||||
fi
|
||||
done
|
||||
echo "$formatted"
|
||||
}
|
||||
|
||||
echo "[binaries]" > cross_file.txt
|
||||
echo "c = [$(printf "'%s', " $CC | sed 's/, $//')]" >> cross_file.txt
|
||||
echo "cpp = [$(printf "'%s', " $CXX | sed 's/, $//')]" >> cross_file.txt
|
||||
echo "ar = '${AR}'" >> cross_file.txt
|
||||
echo "strip = '${STRIP}'" >> cross_file.txt
|
||||
echo "pkg-config = '${PKG_CONFIG}'" >> cross_file.txt
|
||||
echo "llvm-config = '${TARGET}-llvm-config'" >> cross_file.txt
|
||||
echo "glib-compile-resources = 'glib-compile-resources'" >> cross_file.txt
|
||||
echo "glib-compile-schemas = 'glib-compile-schemas'" >> cross_file.txt
|
||||
|
||||
echo "[host_machine]" >> cross_file.txt
|
||||
echo "system = '$(echo "${TARGET}" | cut -d - -f3)'" >> cross_file.txt
|
||||
echo "cpu_family = '$(echo "${TARGET}" | cut -d - -f1)'" >> cross_file.txt
|
||||
echo "cpu = '$(echo "${TARGET}" | cut -d - -f1)'" >> cross_file.txt
|
||||
echo "endian = 'little'" >> cross_file.txt
|
||||
|
||||
echo "[built-in options]" >> cross_file.txt
|
||||
echo "prefix = '/usr'" >> cross_file.txt
|
||||
echo "libdir = 'lib'" >> cross_file.txt
|
||||
echo "bindir = 'bin'" >> cross_file.txt
|
||||
echo "c_args = [$(format_flags "$CFLAGS $CPPFLAGS")]" >> cross_file.txt
|
||||
echo "cpp_args = [$(format_flags "$CXXFLAGS $CPPFLAGS")]" >> cross_file.txt
|
||||
echo "c_link_args = [$(format_flags "$LDFLAGS")]" >> cross_file.txt
|
||||
|
||||
echo "[properties]" >> cross_file.txt
|
||||
echo "needs_exe_wrapper = true" >> cross_file.txt
|
||||
echo "sys_root = '${COOKBOOK_SYSROOT}'" >> cross_file.txt
|
||||
|
||||
unset AR AS CC CXX LD NM OBJCOPY OBJDUMP PKG_CONFIG RANLIB READELF STRIP
|
||||
|
||||
"${COOKBOOK_MESON}" setup \
|
||||
"${COOKBOOK_SOURCE}" \
|
||||
. \
|
||||
--cross-file cross_file.txt \
|
||||
"${COOKBOOK_MESON_FLAGS[@]}" \
|
||||
"$@"
|
||||
"${COOKBOOK_NINJA}" -j"${COOKBOOK_MAKE_JOBS}"
|
||||
DESTDIR="${COOKBOOK_STAGE}" "${COOKBOOK_NINJA}" install -j"${COOKBOOK_MAKE_JOBS}"
|
||||
}
|
||||
"#;
|
||||
|
||||
pub(crate) static BUILD_POSTSCRIPT: &str = r#"
|
||||
# Strip binaries
|
||||
for dir in "${COOKBOOK_STAGE}/bin" "${COOKBOOK_STAGE}/usr/bin" "${COOKBOOK_STAGE}/libexec" "${COOKBOOK_STAGE}/usr/libexec"
|
||||
do
|
||||
if [ -d "${dir}" ] && [ -z "${COOKBOOK_NOSTRIP}" ]
|
||||
then
|
||||
find "${dir}" -type f -exec "${GNU_TARGET}-strip" -v {} ';'
|
||||
fi
|
||||
done
|
||||
|
||||
# Remove libtool files
|
||||
for dir in "${COOKBOOK_STAGE}/lib" "${COOKBOOK_STAGE}/usr/lib"
|
||||
do
|
||||
if [ -d "${dir}" ]
|
||||
then
|
||||
find "${dir}" -type f -name '*.la' -exec rm -fv {} ';'
|
||||
fi
|
||||
done
|
||||
|
||||
# Remove cargo install files
|
||||
for file in .crates.toml .crates2.json
|
||||
do
|
||||
if [ -f "${COOKBOOK_STAGE}/${file}" ]
|
||||
then
|
||||
rm -v "${COOKBOOK_STAGE}/${file}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Add pkgname to appstream metadata
|
||||
for dir in "${COOKBOOK_STAGE}/share/metainfo" "${COOKBOOK_STAGE}/usr/share/metainfo"
|
||||
do
|
||||
if [ -d "${dir}" ]
|
||||
then
|
||||
find "${dir}" -type f -name '*.xml' -exec sed -i 's|</component>|<pkgname>'"${COOKBOOK_NAME}"'</pkgname></component>|g' {} ';'
|
||||
fi
|
||||
done
|
||||
"#;
|
||||
|
||||
pub(crate) static GIT_RESET_BRANCH: &str = r#"
|
||||
ORIGIN_BRANCH="$(git branch --remotes | grep '^ origin/HEAD -> ' | cut -d ' ' -f 5-)"
|
||||
if [ -n "$BRANCH" ]
|
||||
then
|
||||
ORIGIN_BRANCH="origin/$BRANCH"
|
||||
fi
|
||||
|
||||
if [ "$(git rev-parse HEAD)" != "$(git rev-parse $ORIGIN_BRANCH)" ]
|
||||
then
|
||||
git checkout -B "$(echo "$ORIGIN_BRANCH" | cut -d / -f 2-)" "$ORIGIN_BRANCH"
|
||||
fi"#;
|
||||
|
||||
pub static KILL_ALL_PID: &str = r#"
|
||||
THISPID=$$
|
||||
CHILDREN=$(ps -o pid= --ppid $PID | grep -v $THISPID);
|
||||
|
||||
ALL_DESCENDANTS='';
|
||||
|
||||
while [ -n "$CHILDREN" ]; do
|
||||
ALL_DESCENDANTS="$ALL_DESCENDANTS $CHILDREN";
|
||||
CHILDREN=$(ps -o pid= --ppid $(echo $CHILDREN) | tr '\n' ' ');
|
||||
done;
|
||||
|
||||
if [ -n "$ALL_DESCENDANTS" ]; then
|
||||
kill -9 $ALL_DESCENDANTS;
|
||||
fi
|
||||
"#;
|
||||
@@ -0,0 +1,196 @@
|
||||
use anyhow::Context;
|
||||
use pkg::{Package, PackageName};
|
||||
use std::fmt::Write as _;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
fs::read_to_string,
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use crate::recipe::CookRecipe;
|
||||
|
||||
pub enum WalkTreeEntry<'a> {
|
||||
Built(&'a PathBuf, u64),
|
||||
NotBuilt,
|
||||
Deduped,
|
||||
Missing,
|
||||
}
|
||||
|
||||
pub fn display_tree_entry(
|
||||
package_name: &PackageName,
|
||||
recipe_map: &HashMap<&PackageName, &CookRecipe>,
|
||||
prefix: &str,
|
||||
is_last: bool,
|
||||
is_build_tree: bool,
|
||||
visited: &mut HashSet<PackageName>,
|
||||
total_size: &mut u64,
|
||||
total_count: &mut u64,
|
||||
) -> anyhow::Result<()> {
|
||||
walk_tree_entry(
|
||||
package_name,
|
||||
recipe_map,
|
||||
prefix,
|
||||
is_last,
|
||||
is_build_tree,
|
||||
visited,
|
||||
total_size,
|
||||
total_count,
|
||||
display_pkg_fn,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn walk_tree_entry(
|
||||
package_name: &PackageName,
|
||||
recipe_map: &HashMap<&PackageName, &CookRecipe>,
|
||||
prefix: &str,
|
||||
is_last: bool,
|
||||
is_build_tree: bool,
|
||||
visited: &mut HashSet<PackageName>,
|
||||
total_size: &mut u64,
|
||||
total_count: &mut u64,
|
||||
op: fn(&PackageName, &str, bool, &WalkTreeEntry) -> anyhow::Result<bool>,
|
||||
) -> anyhow::Result<()> {
|
||||
let cook_recipe = match recipe_map.get(package_name) {
|
||||
Some(r) => r,
|
||||
None => {
|
||||
// Data not provided, will not be processed by the build system
|
||||
op(package_name, prefix, is_last, &WalkTreeEntry::Missing)?;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let (_, pkg_path, pkg_toml) = cook_recipe.stage_paths();
|
||||
|
||||
let deduped = visited.contains(package_name);
|
||||
let entry = match (std::fs::metadata(&pkg_path), deduped) {
|
||||
(_, true) => WalkTreeEntry::Deduped,
|
||||
(Ok(meta), _) => WalkTreeEntry::Built(&pkg_path, meta.len()),
|
||||
(Err(_), _) => WalkTreeEntry::NotBuilt,
|
||||
};
|
||||
|
||||
let cached = op(package_name, prefix, is_last, &entry)?;
|
||||
|
||||
if deduped || cached {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
visited.insert(package_name.clone());
|
||||
if !cached {
|
||||
if is_build_tree {
|
||||
if matches!(entry, WalkTreeEntry::NotBuilt) {
|
||||
*total_size += 1;
|
||||
}
|
||||
} else {
|
||||
if let WalkTreeEntry::Built(_p, pkg_size) = &entry {
|
||||
*total_size += pkg_size;
|
||||
}
|
||||
}
|
||||
*total_count += 1;
|
||||
}
|
||||
let pkg_meta: Package;
|
||||
|
||||
let mut all_deps_set: HashSet<&PackageName> = HashSet::new();
|
||||
if is_build_tree {
|
||||
all_deps_set.extend(cook_recipe.recipe.build.dependencies.iter());
|
||||
all_deps_set.extend(cook_recipe.recipe.package.dependencies.iter());
|
||||
} else {
|
||||
if let Ok(pkg_toml_str) = read_to_string(&pkg_toml) {
|
||||
// more accurate with auto deps
|
||||
pkg_meta = toml::from_str(&pkg_toml_str)
|
||||
.context(format!("Unable to parse {}", pkg_toml.display()))?;
|
||||
all_deps_set.extend(pkg_meta.depends.iter());
|
||||
}
|
||||
}
|
||||
|
||||
if all_deps_set.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let sorted_deps: Vec<&PackageName> = all_deps_set.into_iter().collect();
|
||||
let deps_count = sorted_deps.len();
|
||||
let child_prefix = if is_last { " " } else { "│ " };
|
||||
for (i, dep_name) in sorted_deps.iter().enumerate() {
|
||||
walk_tree_entry(
|
||||
dep_name,
|
||||
recipe_map,
|
||||
&format!("{}{}", prefix, child_prefix),
|
||||
i == deps_count - 1,
|
||||
is_build_tree,
|
||||
visited,
|
||||
total_size,
|
||||
total_count,
|
||||
op,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn display_pkg_fn(
|
||||
package_name: &PackageName,
|
||||
prefix: &str,
|
||||
is_last: bool,
|
||||
entry: &WalkTreeEntry,
|
||||
) -> anyhow::Result<bool> {
|
||||
let size_str = match entry {
|
||||
WalkTreeEntry::Built(_path_buf, size) => format!("[{}]", format_size(*size)),
|
||||
WalkTreeEntry::NotBuilt => "(not built)".to_string(),
|
||||
WalkTreeEntry::Deduped => "".to_string(),
|
||||
WalkTreeEntry::Missing => "(omitted)".to_string(),
|
||||
};
|
||||
let line_prefix = if is_last { "└── " } else { "├── " };
|
||||
println!("{}{}{} {}", prefix, line_prefix, package_name, size_str);
|
||||
// TODO: check dirty build by checking source ident
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
pub fn walk_file_tree(dir: &PathBuf, prefix: &str, buffer: &mut String) -> std::io::Result<u64> {
|
||||
if !dir.is_dir() {
|
||||
return Ok(0);
|
||||
}
|
||||
let fmt_err = std::io::Error::other;
|
||||
let mut entries: Vec<_> = std::fs::read_dir(dir)?.filter_map(|e| e.ok()).collect();
|
||||
entries.sort_by(|a, b| a.file_name().cmp(&b.file_name()));
|
||||
let mut total_size = 0;
|
||||
for (index, entry) in entries.iter().enumerate() {
|
||||
let path = entry.path();
|
||||
let metadata = entry.metadata()?;
|
||||
let is_last = index == entries.len() - 1;
|
||||
|
||||
let line_prefix = if is_last { "└── " } else { "├── " };
|
||||
let file_name = path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or("Unknown");
|
||||
|
||||
if path.is_dir() {
|
||||
writeln!(buffer, "{}{}{}/", prefix, line_prefix, file_name).map_err(fmt_err)?;
|
||||
let new_prefix = format!("{}{}", prefix, if is_last { " " } else { "│ " });
|
||||
walk_file_tree(&path, &new_prefix, buffer)?;
|
||||
} else {
|
||||
let size = metadata.len();
|
||||
total_size += size;
|
||||
writeln!(
|
||||
buffer,
|
||||
"{}{}{} ({})",
|
||||
prefix,
|
||||
line_prefix,
|
||||
file_name,
|
||||
format_size(size)
|
||||
)
|
||||
.map_err(fmt_err)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(total_size)
|
||||
}
|
||||
|
||||
pub fn format_size(bytes: u64) -> String {
|
||||
if bytes == 0 {
|
||||
return "0 B".to_string();
|
||||
}
|
||||
const UNITS: [&str; 5] = ["B", "KiB", "MiB", "GiB", "TiB"];
|
||||
let i = (bytes as f64).log(1024.0).floor() as usize;
|
||||
let size = bytes as f64 / 1024.0_f64.powi(i as i32);
|
||||
format!("{:.2} {}", size, UNITS[i])
|
||||
}
|
||||
Reference in New Issue
Block a user