Files
RedBear-OS/sources/redbear-0.1.0/patches/P4-supplementary-groups.patch
T
vasilito 5851974b20 feat: build system transition to release fork + archive hardening
Release fork infrastructure:
- REDBEAR_RELEASE=0.1.1 with offline enforcement (fetch/distclean/unfetch blocked)
- 195 BLAKE3-verified source archives in standard format
- Atomic provisioning via provision-release.sh (staging + .complete sentry)
- 5-phase improvement plan: restore format auto-detection, source tree
  validation (validate-source-trees.py), archive-map.json, REPO_BINARY fallback

Archive normalization:
- Removed 87 duplicate/unversioned archives from shared pool
- Regenerated all archives in consistent format with source/ + recipe.toml
- BLAKE3SUMS and manifest.json generated from stable tarball set

Patch management:
- verify-patches.sh: pre-sync dry-run report (OK/REVERSED/CONFLICT)
- 121 upstream-absorbed patches moved to absorbed/ directories
- 43 active patches verified clean against rebased sources
- Stress test: base updated to upstream HEAD, relibc reset and patched

Compilation fixes:
- relibc: Vec imports in redox-rt (proc.rs, lib.rs, sys.rs)
- relibc: unsafe from_raw_parts in mod.rs (2024 edition)
- fetch.rs: rev comparison handles short/full hash prefixes
- kibi recipe: corrected rev mismatch

New scripts: restore-sources.sh, provision-release.sh, verify-sources-archived.sh,
check-upstream-releases.sh, validate-source-trees.py, verify-patches.sh,
repair-archive-format.sh, generate-manifest.py

Documentation: AGENTS.md, README.md, local/AGENTS.md updated for release fork model
2026-05-02 01:41:17 +01:00

138 lines
5.1 KiB
Diff

diff --git a/src/context/context.rs b/src/context/context.rs
index c97c516..6d723f4 100644
--- a/src/context/context.rs
+++ b/src/context/context.rs
@@ -148,6 +148,8 @@ pub struct Context {
pub euid: u32,
pub egid: u32,
pub pid: usize,
+ /// Supplementary group IDs for access control decisions.
+ pub groups: Vec<u32>,
// See [`PreemptGuard`]
//
@@ -204,6 +206,7 @@ impl Context {
euid: 0,
egid: 0,
pid: 0,
+ groups: Vec::new(),
#[cfg(feature = "syscall_debug")]
syscall_debug_info: crate::syscall::debug::SyscallDebugInfo::default(),
@@ -479,6 +482,7 @@ impl Context {
uid: self.euid,
gid: self.egid,
pid: self.pid,
+ groups: self.groups.clone(),
}
}
}
diff --git a/src/scheme/mod.rs b/src/scheme/mod.rs
index d30272c..9da2b28 100644
--- a/src/scheme/mod.rs
+++ b/src/scheme/mod.rs
@@ -777,6 +777,7 @@ pub struct CallerCtx {
pub pid: usize,
pub uid: u32,
pub gid: u32,
+ pub groups: alloc::vec::Vec<u32>,
}
impl CallerCtx {
pub fn filter_uid_gid(self, euid: u32, egid: u32) -> Self {
@@ -785,6 +786,7 @@ impl CallerCtx {
pid: self.pid,
uid: euid,
gid: egid,
+ groups: self.groups,
}
} else {
self
diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs
index 47588e1..6ffb256 100644
--- a/src/scheme/proc.rs
+++ b/src/scheme/proc.rs
@@ -105,6 +105,7 @@ enum ContextHandle {
// Attr handles, to set ens/euid/egid/pid.
Authority,
Attr,
+ Groups,
Status {
privileged: bool,
@@ -261,6 +262,7 @@ impl ProcScheme {
let handle = match actual_name {
"attrs" => ContextHandle::Attr,
"status" => ContextHandle::Status { privileged: true },
+ "groups" => ContextHandle::Groups,
_ => return Err(Error::new(ENOENT)),
};
@@ -306,6 +308,11 @@ impl ProcScheme {
let id = NonZeroUsize::new(NEXT_ID.fetch_add(1, Ordering::Relaxed))
.ok_or(Error::new(EMFILE))?;
let context = context::spawn(true, Some(id), ret, token)?;
+ {
+ let parent_groups =
+ context::current().read(token.token()).groups.clone();
+ context.write(token.token()).groups = parent_groups;
+ }
HANDLES.write(token.token()).insert(
id.get(),
Handle {
@@ -1271,6 +1278,39 @@ impl ContextHandle {
guard.prio = (info.prio as usize).min(39);
Ok(size_of::<ProcSchemeAttrs>())
}
+ Self::Groups => {
+ const NGROUPS_MAX: usize = 65536;
+ if buf.len() % size_of::<u32>() != 0 {
+ return Err(Error::new(EINVAL));
+ }
+ let count = buf.len() / size_of::<u32>();
+ if count > NGROUPS_MAX {
+ return Err(Error::new(EINVAL));
+ }
+ let mut groups = Vec::with_capacity(count);
+ for chunk in buf.in_exact_chunks(size_of::<u32>()).take(count) {
+ groups.push(chunk.read_u32()?);
+ }
+ let proc_id = {
+ let guard = context.read(token.token());
+ guard.owner_proc_id
+ };
+ {
+ let mut guard = context.write(token.token());
+ guard.groups = groups.clone();
+ }
+ if let Some(pid) = proc_id {
+ let mut contexts = context::contexts(token.downgrade());
+ let (contexts, mut t) = contexts.token_split();
+ for context_ref in contexts.iter() {
+ let mut ctx = context_ref.write(t.token());
+ if ctx.owner_proc_id == Some(pid) {
+ ctx.groups = groups.clone();
+ }
+ }
+ }
+ Ok(count * size_of::<u32>())
+ }
ContextHandle::OpenViaDup => {
let mut args = buf.usizes();
@@ -1475,6 +1515,15 @@ impl ContextHandle {
debug_name,
})
}
+ Self::Groups => {
+ let c = &context.read(token.token());
+ let max = buf.len() / size_of::<u32>();
+ let count = c.groups.len().min(max);
+ for (chunk, gid) in buf.in_exact_chunks(size_of::<u32>()).zip(&c.groups).take(count) {
+ chunk.copy_from_slice(&gid.to_ne_bytes())?;
+ }
+ Ok(count * size_of::<u32>())
+ }
ContextHandle::Sighandler => {
let data = match context.read(token.token()).sig {
Some(ref sig) => SetSighandlerData {