Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • martin/relibc
  • ashton/relibc
  • vincent/relibc
  • boomshroom/relibc
  • njskalski/relibc
  • bjorn3/relibc
  • microcolonel/relibc
  • gmacd/relibc
  • 4lDO2/relibc
  • feliwir/relibc
  • devnexen/relibc
  • jamesgraves/relibc
  • oddcoder/relibc
  • andar1an/relibc
  • gugz0r/relibc
  • matijaskala/relibc
  • zen3ger/relibc
  • Majoneza/relibc
  • enygmator/relibc
  • JustAnotherDev/relibc
  • doriancodes/relibc
  • adamantinum/relibc
  • wiredtv/relibc
  • stratact/relibc
  • Ramla-I/relibc
  • bitstr0m/relibc
  • bpisch/relibc
  • henritel/relibc
  • smckay/relibc
  • xTibor/relibc
  • devajithvs/relibc
  • andypython/relibc
  • t-nil/relibc
  • DataTriny/relibc
  • SteveLauC/relibc
  • dlrobertson/relibc
  • AgostonSzepessy/relibc
  • TheDarkula/relibc
  • willnode/relibc
  • bamontan/relibc
  • redoxeon/relibc
  • ayf/relibc
  • heghe/relibc
  • Ivan/relibc
  • hasheddan/relibc
  • dahc/relibc
  • auwardoctor/relibc
  • kodicraft/relibc
  • arthurpaulino/relibc
  • jasonhansel/relibc
  • kel/relibc
  • GrayJack/relibc
  • darley/relibc
  • sahitpj/relibc
  • plimkilde/relibc
  • BjornTheProgrammer/relibc
  • defra/relibc
  • Schyrsivochter/relibc
  • ebalalic/relibc
  • adchacon/relibc
  • aaronjanse/relibc
  • josh_williams/relibc
  • 8tab/relibc
  • josh/relibc
  • nicoan/relibc
  • athei/relibc
  • carrot93/relibc
  • RA_GM1/relibc
  • zhaozhao/relibc
  • JCake/relibc
  • KGrewal1/relibc
  • emturner/relibc
  • LuigiPiucco/relibc
  • bfrascher/relibc
  • starsheriff/relibc
  • kcired/relibc
  • jamespcfrancis/relibc
  • neallred/relibc
  • omar-mohamed-khallaf/relibc
  • jD91mZM2/relibc
  • rw_van/relibc
  • Skallwar/relibc
  • matt-vdv/relibc
  • mati865/relibc
  • SoyaOhnishi/relibc
  • ArniDagur/relibc
  • tlam/relibc
  • glongo/relibc
  • kamirr/relibc
  • abdullah/relibc
  • saeedtabrizi/relibc
  • sajattack/relibc
  • lmiskiew/relibc
  • seanpk/relibc
  • MaikuZ/relibc
  • jamadazi/relibc
  • coolreader18/relibc
  • wt/relibc
  • lebensterben/relibc
  • uuuvn/relibc
  • vadorovsky/relibc
  • ids1024/relibc
  • raffaeleragni/relibc
  • freewilll/relibc
  • LLeny/relibc
  • alfredoyang/relibc
  • batonius/relibc
  • TornaxO7/relibc
  • Arcterus/relibc
  • Tommoa/relibc
  • samuela/relibc
  • mindriot101/relibc
  • redox-os/relibc
  • lygstate/relibc
114 results
Show changes
use core::{
mem::offset_of,
ptr::NonNull,
sync::atomic::{AtomicU8, Ordering},
};
use syscall::{
data::{SigProcControl, Sigcontrol},
error::*,
RtSigInfo,
};
use crate::{
proc::{fork_inner, FdGuard},
signal::{get_sigaltstack, inner_c, PosixStackt, RtSigarea, SigStack, PROC_CONTROL_STRUCT},
Tcb,
};
// Setup a stack starting from the very end of the address space, and then growing downwards.
pub(crate) const STACK_TOP: usize = 1 << 47;
pub(crate) const STACK_SIZE: usize = 1024 * 1024;
#[derive(Debug, Default)]
#[repr(C)]
pub struct SigArea {
pub tmp_rip: usize,
pub tmp_rsp: usize,
pub tmp_rax: usize,
pub tmp_rdx: usize,
pub tmp_rdi: usize,
pub tmp_rsi: usize,
pub tmp_rt_inf: RtSigInfo,
pub tmp_id_inf: u64,
pub altstack_top: usize,
pub altstack_bottom: usize,
pub disable_signals_depth: u64,
pub last_sig_was_restart: bool,
pub last_sigstack: Option<NonNull<SigStack>>,
}
#[repr(C, align(16))]
#[derive(Debug, Default)]
pub struct ArchIntRegs {
pub ymm_upper: [u128; 16],
pub fxsave: [u128; 29],
pub r15: usize, // fxsave "available" +0
pub r14: usize, // available +8
pub r13: usize, // available +16
pub r12: usize, // available +24
pub rbp: usize, // available +32
pub rbx: usize, // available +40
pub r11: usize, // outside fxsave, and so on
pub r10: usize,
pub r9: usize,
pub r8: usize,
pub rax: usize,
pub rcx: usize,
pub rdx: usize,
pub rsi: usize,
pub rdi: usize,
pub rflags: usize,
pub rip: usize,
pub rsp: usize,
}
/// Deactive TLS, used before exec() on Redox to not trick target executable into thinking TLS
/// is already initialized as if it was a thread.
pub unsafe fn deactivate_tcb(open_via_dup: usize) -> Result<()> {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(syscall::dup(open_via_dup, b"regs/env")?);
env.fsbase = 0;
env.gsbase = 0;
let _ = syscall::write(*file, &mut env)?;
Ok(())
}
pub fn copy_env_regs(cur_pid_fd: usize, new_pid_fd: usize) -> Result<()> {
// Copy environment registers.
{
let cur_env_regs_fd = FdGuard::new(syscall::dup(cur_pid_fd, b"regs/env")?);
let new_env_regs_fd = FdGuard::new(syscall::dup(new_pid_fd, b"regs/env")?);
let mut env_regs = syscall::EnvRegisters::default();
let _ = syscall::read(*cur_env_regs_fd, &mut env_regs)?;
let _ = syscall::write(*new_env_regs_fd, &env_regs)?;
}
Ok(())
}
unsafe extern "sysv64" fn fork_impl(initial_rsp: *mut usize) -> usize {
Error::mux(fork_inner(initial_rsp))
}
unsafe extern "sysv64" fn child_hook(cur_filetable_fd: usize, new_pid_fd: usize) {
let _ = syscall::close(cur_filetable_fd);
crate::child_hook_common(FdGuard::new(new_pid_fd));
}
asmfunction!(__relibc_internal_fork_wrapper -> usize: ["
push rbp
mov rbp, rsp
push rbx
push rbp
push r12
push r13
push r14
push r15
sub rsp, 32
stmxcsr [rsp+16]
fnstcw [rsp+24]
mov rdi, rsp
call {fork_impl}
add rsp, 80
pop rbp
ret
"] <= [fork_impl = sym fork_impl]);
asmfunction!(__relibc_internal_fork_ret: ["
mov rdi, [rsp]
mov rsi, [rsp + 8]
call {child_hook}
ldmxcsr [rsp + 16]
fldcw [rsp + 24]
xor rax, rax
add rsp, 32
pop r15
pop r14
pop r13
pop r12
pop rbp
pop rbx
pop rbp
ret
"] <= [child_hook = sym child_hook]);
asmfunction!(__relibc_internal_rlct_clone_ret: ["
# Load registers
pop rax
pop rdi
pop rsi
pop rdx
pop rcx
pop r8
pop r9
mov DWORD PTR [rsp - 8], 0x00001F80
ldmxcsr [rsp - 8]
mov WORD PTR [rsp - 8], 0x037F
fldcw [rsp - 8]
# Call entry point
call rax
ret
"] <= []);
asmfunction!(__relibc_internal_sigentry: ["
// Save some registers
mov fs:[{tcb_sa_off} + {sa_tmp_rsp}], rsp
mov fs:[{tcb_sa_off} + {sa_tmp_rax}], rax
mov fs:[{tcb_sa_off} + {sa_tmp_rdx}], rdx
mov fs:[{tcb_sa_off} + {sa_tmp_rdi}], rdi
mov fs:[{tcb_sa_off} + {sa_tmp_rsi}], rsi
// First, select signal, always pick first available bit
1:
// Read standard signal word - first targeting this thread
mov rax, fs:[{tcb_sc_off} + {sc_word}]
mov rdx, rax
shr rdx, 32
and eax, edx
bsf eax, eax
jnz 2f
// If no unblocked thread signal was found, check for process.
// This is competitive; we need to atomically check if *we* cleared the process-wide pending
// bit, otherwise restart.
mov eax, [rip + {pctl} + {pctl_off_pending}]
and eax, edx
bsf eax, eax
jz 8f
lea rdi, [rip + {pctl} + {pctl_off_sender_infos}]
mov rdi, [rdi + rax * 8]
lock btr [rip + {pctl} + {pctl_off_pending}], eax
mov fs:[{tcb_sa_off} + {sa_tmp_id_inf}], rdi
jc 9f
8:
// Read second signal word - both process and thread simultaneously.
// This must be done since POSIX requires low realtime signals to be picked first.
mov edx, fs:[{tcb_sc_off} + {sc_word} + 8]
mov eax, [rip + {pctl} + {pctl_off_pending} + 4]
or eax, edx
and eax, fs:[{tcb_sc_off} + {sc_word} + 12]
bsf eax, eax
jz 7f
bt edx, eax // check if signal was sent to thread specifically
jc 2f // if so, continue as usual
// otherwise, try (competitively) dequeueing realtime signal
mov esi, eax
mov eax, {SYS_SIGDEQUEUE}
mov rdi, fs:[0]
add rdi, {tcb_sa_off} + {sa_tmp_rt_inf} // out pointer of dequeued realtime sig
syscall
test eax, eax
jnz 1b // assumes error can only be EAGAIN
lea eax, [esi + 32]
jmp 9f
2:
mov edx, eax
shr edx, 5
mov rdi, fs:[{tcb_sc_off} + {sc_sender_infos} + eax * 8]
lock btr fs:[{tcb_sc_off} + {sc_word} + edx * 4], eax
mov fs:[{tcb_sa_off} + {sa_tmp_id_inf}], rdi
add eax, 64 // indicate signal was targeted at thread
9:
sub rsp, {REDZONE_SIZE}
and rsp, -{STACK_ALIGN}
// By now we have selected a signal, stored in eax (6-bit). We now need to choose whether or
// not to switch to the alternate signal stack. If SA_ONSTACK is clear for this signal, then
// skip the sigaltstack logic.
lea rdx, [rip + {pctl} + {pctl_off_actions}]
mov ecx, eax
and ecx, 63
// LEA doesn't support 16x, so just do two x8s.
lea rdx, [rdx + 8 * rcx]
lea rdx, [rdx + 8 * rcx]
bt qword ptr [rdx], {SA_ONSTACK_BIT}
jnc 4f
// Otherwise, the altstack is already active. The sigaltstack being disabled, is equivalent
// to setting 'top' to usize::MAX and 'bottom' to 0.
// If current RSP is above altstack region, switch to altstack
mov rdx, fs:[{tcb_sa_off} + {sa_altstack_top}]
cmp rsp, rdx
cmova rsp, rdx
// If current RSP is below altstack region, also switch to altstack
cmp rsp, fs:[{tcb_sa_off} + {sa_altstack_bottom}]
cmovbe rsp, rdx
.p2align 4
4:
// Now that we have a stack, we can finally start initializing the signal stack!
push fs:[{tcb_sa_off} + {sa_tmp_rsp}]
push fs:[{tcb_sc_off} + {sc_saved_rip}]
push fs:[{tcb_sc_off} + {sc_saved_rflags}]
push fs:[{tcb_sa_off} + {sa_tmp_rdi}]
push fs:[{tcb_sa_off} + {sa_tmp_rsi}]
push fs:[{tcb_sa_off} + {sa_tmp_rdx}]
push rcx
push fs:[{tcb_sa_off} + {sa_tmp_rax}]
push r8
push r9
push r10
push r11
push rbx
push rbp
push r12
push r13
push r14
push r15
sub rsp, (29 + 16) * 16 // fxsave region minus available bytes
fxsave64 [rsp + 16 * 16]
// TODO: self-modifying?
cmp byte ptr [rip + {supports_avx}], 0
je 5f
// Prefer vextractf128 over vextracti128 since the former only requires AVX version 1.
vextractf128 [rsp + 15 * 16], ymm0, 1
vextractf128 [rsp + 14 * 16], ymm1, 1
vextractf128 [rsp + 13 * 16], ymm2, 1
vextractf128 [rsp + 12 * 16], ymm3, 1
vextractf128 [rsp + 11 * 16], ymm4, 1
vextractf128 [rsp + 10 * 16], ymm5, 1
vextractf128 [rsp + 9 * 16], ymm6, 1
vextractf128 [rsp + 8 * 16], ymm7, 1
vextractf128 [rsp + 7 * 16], ymm8, 1
vextractf128 [rsp + 6 * 16], ymm9, 1
vextractf128 [rsp + 5 * 16], ymm10, 1
vextractf128 [rsp + 4 * 16], ymm11, 1
vextractf128 [rsp + 3 * 16], ymm12, 1
vextractf128 [rsp + 2 * 16], ymm13, 1
vextractf128 [rsp + 16], ymm14, 1
vextractf128 [rsp], ymm15, 1
5:
mov [rsp - 4], eax
sub rsp, 64 // alloc space for ucontext fields
mov rdi, rsp
call {inner}
add rsp, 64
fxrstor64 [rsp + 16 * 16]
cmp byte ptr [rip + {supports_avx}], 0
je 6f
vinsertf128 ymm0, ymm0, [rsp + 15 * 16], 1
vinsertf128 ymm1, ymm1, [rsp + 14 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 13 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 12 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 11 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 10 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 9 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 8 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 7 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 6 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 5 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 4 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 3 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 2 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 16], 1
vinsertf128 ymm2, ymm2, [rsp], 1
6:
add rsp, (29 + 16) * 16
pop r15
pop r14
pop r13
pop r12
pop rbp
pop rbx
pop r11
pop r10
pop r9
pop r8
pop rax
pop rcx
pop rdx
pop rsi
pop rdi
popfq
pop qword ptr fs:[{tcb_sa_off} + {sa_tmp_rip}]
// x86 lacks atomic instructions for setting both the stack and instruction pointer
// simultaneously, except the slow microcoded IRETQ instruction. Thus, we let the arch_pre
// function emulate atomicity between the pop rsp and indirect jump.
.globl __relibc_internal_sigentry_crit_first
__relibc_internal_sigentry_crit_first:
pop rsp
.globl __relibc_internal_sigentry_crit_second
__relibc_internal_sigentry_crit_second:
jmp qword ptr fs:[{tcb_sa_off} + {sa_tmp_rip}]
7:
// A spurious signal occurred. Signals are still disabled here, but will need to be re-enabled.
// restore flags
mov rax, fs:[0] // load FS base
// TODO: Use lahf/sahf rather than pushfq/popfq?
lea rsp, [rax + {tcb_sc_off} + {sc_saved_rflags}]
popfq
// restore stack
mov rsp, fs:[{tcb_sa_off} + {sa_tmp_rsp}]
// move saved RIP away from control, allowing arch_pre to save us if interrupted.
mov rax, fs:[{tcb_sc_off} + {sc_saved_rip}]
mov fs:[{tcb_sa_off} + {sa_tmp_rip}], rax
// restore regs
mov rax, fs:[{tcb_sa_off} + {sa_tmp_rax}]
mov rdx, fs:[{tcb_sa_off} + {sa_tmp_rdx}]
// Re-enable signals. This code can be interrupted after this signal, so we need to define
// 'crit_third'.
and qword ptr fs:[{tcb_sc_off} + {sc_control}], ~1
.globl __relibc_internal_sigentry_crit_third
__relibc_internal_sigentry_crit_third:
jmp qword ptr fs:[{tcb_sa_off} + {sa_tmp_rip}]
"] <= [
inner = sym inner_c,
sa_tmp_rip = const offset_of!(SigArea, tmp_rip),
sa_tmp_rsp = const offset_of!(SigArea, tmp_rsp),
sa_tmp_rax = const offset_of!(SigArea, tmp_rax),
sa_tmp_rdx = const offset_of!(SigArea, tmp_rdx),
sa_tmp_rdi = const offset_of!(SigArea, tmp_rdi),
sa_tmp_rsi = const offset_of!(SigArea, tmp_rsi),
sa_tmp_rt_inf = const offset_of!(SigArea, tmp_rt_inf),
sa_tmp_id_inf = const offset_of!(SigArea, tmp_id_inf),
sa_altstack_top = const offset_of!(SigArea, altstack_top),
sa_altstack_bottom = const offset_of!(SigArea, altstack_bottom),
sc_saved_rflags = const offset_of!(Sigcontrol, saved_archdep_reg),
sc_saved_rip = const offset_of!(Sigcontrol, saved_ip),
sc_word = const offset_of!(Sigcontrol, word),
sc_sender_infos = const offset_of!(Sigcontrol, sender_infos),
sc_control = const offset_of!(Sigcontrol, control_flags),
tcb_sa_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, arch),
tcb_sc_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control),
pctl_off_actions = const offset_of!(SigProcControl, actions),
pctl_off_pending = const offset_of!(SigProcControl, pending),
pctl_off_sender_infos = const offset_of!(SigProcControl, sender_infos),
pctl = sym PROC_CONTROL_STRUCT,
supports_avx = sym SUPPORTS_AVX,
REDZONE_SIZE = const 128,
STACK_ALIGN = const 16,
SA_ONSTACK_BIT = const 58, // (1 << 58) >> 32 = 0x0400_0000
SYS_SIGDEQUEUE = const syscall::SYS_SIGDEQUEUE,
]);
extern "C" {
fn __relibc_internal_sigentry_crit_first();
fn __relibc_internal_sigentry_crit_second();
fn __relibc_internal_sigentry_crit_third();
}
/// Fixes some edge cases, and calculates the value for uc_stack.
pub unsafe fn arch_pre(stack: &mut SigStack, area: &mut SigArea) -> PosixStackt {
// It is impossible to update RSP and RIP atomically on x86_64, without using IRETQ, which is
// almost as slow as calling a SIGRETURN syscall would be. Instead, we abuse the fact that
// signals are disabled in the prologue of the signal trampoline, which allows us to emulate
// atomicity inside the critical section, consisting of one instruction at 'crit_first', one at
// 'crit_second', and one at 'crit_third', see asm.
if stack.regs.rip == __relibc_internal_sigentry_crit_first as usize {
// Reexecute pop rsp and jump steps. This case needs to be different from the one below,
// since rsp has not been overwritten with the previous context's stack, just yet. At this
// point, we know [rsp+0] contains the saved RSP, and [rsp-8] contains the saved RIP.
let stack_ptr = stack.regs.rsp as *const usize;
stack.regs.rsp = stack_ptr.read();
stack.regs.rip = stack_ptr.sub(1).read();
} else if stack.regs.rip == __relibc_internal_sigentry_crit_second as usize
|| stack.regs.rip == __relibc_internal_sigentry_crit_third as usize
{
// Almost finished, just reexecute the jump before tmp_rip is overwritten by this
// deeper-level signal.
stack.regs.rip = area.tmp_rip;
}
get_sigaltstack(area, stack.regs.rsp).into()
}
pub(crate) static SUPPORTS_AVX: AtomicU8 = AtomicU8::new(0);
// __relibc will be prepended to the name, so no_mangle is fine
#[no_mangle]
pub unsafe fn manually_enter_trampoline() {
let c = &Tcb::current().unwrap().os_specific.control;
c.control_flags.store(
c.control_flags.load(Ordering::Relaxed) | syscall::flag::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
c.saved_archdep_reg.set(0); // TODO: Just reset DF on x86?
core::arch::asm!("
lea rax, [rip + 2f]
mov fs:[{tcb_sc_off} + {sc_saved_rip}], rax
jmp __relibc_internal_sigentry
2:
",
out("rax") _,
tcb_sc_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control),
sc_saved_rip = const offset_of!(Sigcontrol, saved_ip),
);
}
/// Get current stack pointer, weak granularity guarantees.
pub fn current_sp() -> usize {
let sp: usize;
unsafe {
core::arch::asm!("mov {}, rsp", out(reg) sp);
}
sp
}
#![no_std]
#![feature(
asm_const,
array_chunks,
int_roundings,
let_chains,
slice_ptr_get,
sync_unsafe_cell
)]
#![forbid(unreachable_patterns)]
use core::cell::{SyncUnsafeCell, UnsafeCell};
use generic_rt::{ExpectTlsFree, GenericTcb};
use syscall::{Sigcontrol, O_CLOEXEC};
use self::proc::FdGuard;
extern crate alloc;
#[macro_export]
macro_rules! asmfunction(
($name:ident $(-> $ret:ty)? : [$($asmstmt:expr),*$(,)?] <= [$($decl:ident = $(sym $symname:ident)?$(const $constval:expr)?),*$(,)?]$(,)? ) => {
::core::arch::global_asm!(concat!("
.p2align 4
.section .text.", stringify!($name), ", \"ax\", @progbits
.globl ", stringify!($name), "
.type ", stringify!($name), ", @function
", stringify!($name), ":
", $($asmstmt, "\n",)* "
.size ", stringify!($name), ", . - ", stringify!($name), "
"), $($decl = $(sym $symname)?$(const $constval)?),*);
extern "C" {
pub fn $name() $(-> $ret)?;
}
}
);
pub mod arch;
pub mod proc;
// TODO: Replace auxvs with a non-stack-based interface, but keep getauxval for compatibility
#[path = "../../src/platform/auxv_defs.rs"]
pub mod auxv_defs;
pub mod signal;
pub mod sync;
pub mod sys;
pub mod thread;
#[derive(Debug, Default)]
pub struct RtTcb {
pub control: Sigcontrol,
pub arch: UnsafeCell<crate::arch::SigArea>,
pub thr_fd: UnsafeCell<Option<FdGuard>>,
}
impl RtTcb {
pub fn current() -> &'static Self {
unsafe { &Tcb::current().unwrap().os_specific }
}
pub fn thread_fd(&self) -> &FdGuard {
unsafe {
if (&*self.thr_fd.get()).is_none() {
self.thr_fd.get().write(Some(FdGuard::new(
syscall::open("/scheme/thisproc/current/open_via_dup", O_CLOEXEC).unwrap(),
)));
}
(&*self.thr_fd.get()).as_ref().unwrap()
}
}
}
pub type Tcb = GenericTcb<RtTcb>;
/// OS and architecture specific code to activate TLS - Redox aarch64
#[cfg(target_arch = "aarch64")]
pub unsafe fn tcb_activate(_tcb: &RtTcb, tls_end: usize, tls_len: usize) {
// Uses ABI page
let abi_ptr = tls_end - tls_len - 16;
core::ptr::write(abi_ptr as *mut usize, tls_end);
core::arch::asm!(
"msr tpidr_el0, {}",
in(reg) abi_ptr,
);
}
/// OS and architecture specific code to activate TLS - Redox x86
#[cfg(target_arch = "x86")]
pub unsafe fn tcb_activate(tcb: &RtTcb, tls_end: usize, _tls_len: usize) {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(
syscall::dup(**tcb.thread_fd(), b"regs/env")
.expect_notls("failed to open handle for process registers"),
);
let _ = syscall::read(*file, &mut env).expect_notls("failed to read gsbase");
env.gsbase = tls_end as u32;
let _ = syscall::write(*file, &env).expect_notls("failed to write gsbase");
}
/// OS and architecture specific code to activate TLS - Redox x86_64
#[cfg(target_arch = "x86_64")]
pub unsafe fn tcb_activate(tcb: &RtTcb, tls_end_and_tcb_start: usize, _tls_len: usize) {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(
syscall::dup(**tcb.thread_fd(), b"regs/env")
.expect_notls("failed to open handle for process registers"),
);
let _ = syscall::read(*file, &mut env).expect_notls("failed to read fsbase");
env.fsbase = tls_end_and_tcb_start as u64;
let _ = syscall::write(*file, &env).expect_notls("failed to write fsbase");
}
/// OS and architecture specific code to activate TLS - Redox riscv64
#[cfg(target_arch = "riscv64")]
pub unsafe fn tcb_activate(_tcb: &RtTcb, tls_end: usize, tls_len: usize) {
// tp points to static tls block
// FIXME limited to a single initial master
let tls_start = tls_end - tls_len;
let abi_ptr = tls_start - 8;
core::ptr::write(abi_ptr as *mut usize, tls_end);
core::arch::asm!(
"mv tp, {}",
in(reg) tls_start
);
}
/// Initialize redox-rt in situations where relibc is not used
pub unsafe fn initialize_freestanding() {
// TODO: This code is a hack! Integrate the ld_so TCB code into generic-rt, and then use that
// (this function will need pointers to the ELF structs normally passed in auxvs), so the TCB
// is initialized properly.
// TODO: TLS
let page = {
&mut *(syscall::fmap(
!0,
&syscall::Map {
offset: 0,
size: syscall::PAGE_SIZE,
flags: syscall::MapFlags::PROT_READ
| syscall::MapFlags::PROT_WRITE
| syscall::MapFlags::MAP_PRIVATE,
address: 0,
},
)
.unwrap() as *mut Tcb)
};
page.tcb_ptr = page;
page.tcb_len = syscall::PAGE_SIZE;
page.tls_end = (page as *mut Tcb).cast();
// Make sure to use ptr::write to prevent dropping the existing FdGuard
core::ptr::write(page.os_specific.thr_fd.get(), None);
#[cfg(not(any(target_arch = "aarch64", target_arch = "riscv64")))]
unsafe {
let tcb_addr = page as *mut Tcb as usize;
tcb_activate(&page.os_specific, tcb_addr, 0)
}
#[cfg(target_arch = "aarch64")]
unsafe {
let abi_ptr = core::ptr::addr_of_mut!(page.tcb_ptr);
core::arch::asm!("msr tpidr_el0, {}", in(reg) abi_ptr);
}
#[cfg(target_arch = "riscv64")]
unsafe {
let abi_ptr = core::ptr::addr_of_mut!(page.tcb_ptr) as usize;
core::arch::asm!("mv tp, {}", in(reg) (abi_ptr + 8));
}
initialize();
}
pub unsafe fn initialize() {
THIS_PID
.get()
.write(Some(syscall::getpid().unwrap().try_into().unwrap()).unwrap());
}
static THIS_PID: SyncUnsafeCell<u32> = SyncUnsafeCell::new(0);
unsafe fn child_hook_common(new_pid_fd: FdGuard) {
// TODO: Currently pidfd == threadfd, but this will not be the case later.
RtTcb::current().thr_fd.get().write(Some(new_pid_fd));
THIS_PID
.get()
.write(Some(syscall::getpid().unwrap().try_into().unwrap()).unwrap());
}
use core::{fmt::Debug, mem::size_of};
use crate::{arch::*, auxv_defs::*};
use alloc::{boxed::Box, collections::BTreeMap, vec};
//TODO: allow use of either 32-bit or 64-bit programs
#[cfg(target_pointer_width = "32")]
use goblin::elf32::{
header::Header,
program_header::program_header32::{ProgramHeader, PF_W, PF_X, PT_INTERP, PT_LOAD},
};
#[cfg(target_pointer_width = "64")]
use goblin::elf64::{
header::Header,
program_header::program_header64::{ProgramHeader, PF_W, PF_X, PT_INTERP, PT_LOAD},
};
use syscall::{
error::*,
flag::{MapFlags, SEEK_SET},
GrantDesc, GrantFlags, Map, SetSighandlerData, MAP_FIXED_NOREPLACE, MAP_SHARED, O_CLOEXEC,
PAGE_SIZE, PROT_EXEC, PROT_READ, PROT_WRITE,
};
pub enum FexecResult {
Normal {
addrspace_handle: FdGuard,
},
Interp {
path: Box<[u8]>,
image_file: FdGuard,
open_via_dup: FdGuard,
interp_override: InterpOverride,
},
}
pub struct InterpOverride {
phs: Box<[u8]>,
at_entry: usize,
at_phnum: usize,
at_phent: usize,
name: Box<[u8]>,
tree: BTreeMap<usize, usize>,
}
pub struct ExtraInfo<'a> {
pub cwd: Option<&'a [u8]>,
// Default scheme for the process
pub default_scheme: Option<&'a [u8]>,
// POSIX states that while sigactions are reset, ignored sigactions will remain ignored.
pub sigignmask: u64,
// POSIX also states that the sigprocmask must be preserved across execs.
pub sigprocmask: u64,
/// File mode creation mask (POSIX)
pub umask: u32,
}
pub fn fexec_impl<A, E>(
image_file: FdGuard,
open_via_dup: FdGuard,
memory_scheme_fd: &FdGuard,
path: &[u8],
args: A,
envs: E,
total_args_envs_size: usize,
extrainfo: &ExtraInfo,
mut interp_override: Option<InterpOverride>,
) -> Result<FexecResult>
where
A: IntoIterator,
E: IntoIterator,
A::Item: AsRef<[u8]>,
E::Item: AsRef<[u8]>,
{
// Here, we do the minimum part of loading an application, which is what the kernel used to do.
// We load the executable into memory (albeit at different offsets in this executable), fix
// some misalignments, and then execute the SYS_EXEC syscall to replace the program memory
// entirely.
let mut header_bytes = [0_u8; size_of::<Header>()];
read_all(*image_file, Some(0), &mut header_bytes)?;
let header = Header::from_bytes(&header_bytes);
let grants_fd = {
let current_addrspace_fd = FdGuard::new(syscall::dup(*open_via_dup, b"addrspace")?);
FdGuard::new(syscall::dup(*current_addrspace_fd, b"empty")?)
};
// Never allow more than 1 MiB of program headers.
const MAX_PH_SIZE: usize = 1024 * 1024;
let phentsize = u64::from(header.e_phentsize) as usize;
let phnum = u64::from(header.e_phnum) as usize;
let pheaders_size = phentsize
.saturating_mul(phnum)
.saturating_add(size_of::<Header>());
if pheaders_size > MAX_PH_SIZE {
return Err(Error::new(E2BIG));
}
let mut phs_raw = vec![0_u8; pheaders_size];
phs_raw[..size_of::<Header>()].copy_from_slice(&header_bytes);
let phs = &mut phs_raw[size_of::<Header>()..];
// TODO: Remove clone, but this would require more as_refs and as_muts
let mut tree = interp_override.as_mut().map_or_else(
|| core::iter::once((0, PAGE_SIZE)).collect::<BTreeMap<_, _>>(),
|o| core::mem::take(&mut o.tree),
);
read_all(*image_file as usize, Some(header.e_phoff as u64), phs)
.map_err(|_| Error::new(EIO))?;
for ph_idx in 0..phnum {
let ph_bytes = &phs[ph_idx * phentsize..(ph_idx + 1) * phentsize];
let segment: &ProgramHeader =
plain::from_bytes(ph_bytes).map_err(|_| Error::new(EINVAL))?;
let mut flags = syscall::PROT_READ;
// W ^ X. If it is executable, do not allow it to be writable, even if requested
if segment.p_flags & PF_X == PF_X {
flags |= syscall::PROT_EXEC;
} else if segment.p_flags & PF_W == PF_W {
flags |= syscall::PROT_WRITE;
}
match segment.p_type {
// PT_INTERP must come before any PT_LOAD, so we don't have to iterate twice.
PT_INTERP => {
let mut interp = vec![0_u8; segment.p_filesz as usize];
read_all(
*image_file as usize,
Some(segment.p_offset as u64),
&mut interp,
)?;
return Ok(FexecResult::Interp {
path: interp.into_boxed_slice(),
image_file,
open_via_dup,
interp_override: InterpOverride {
at_entry: header.e_entry as usize,
at_phnum: phnum,
at_phent: phentsize,
phs: phs_raw.into_boxed_slice(),
name: path.into(),
tree,
},
});
}
PT_LOAD => {
let voff = segment.p_vaddr as usize % PAGE_SIZE;
let vaddr = segment.p_vaddr as usize - voff;
let filesz = segment.p_filesz as usize;
let total_page_count = (segment.p_memsz as usize + voff).div_ceil(PAGE_SIZE);
// The case where segments overlap so that they share one page, is not handled.
// TODO: Should it be?
if segment.p_filesz > segment.p_memsz {
return Err(Error::new(ENOEXEC));
}
allocate_remote(
&grants_fd,
memory_scheme_fd,
vaddr,
total_page_count * PAGE_SIZE,
flags,
)?;
syscall::lseek(*image_file, segment.p_offset as isize, SEEK_SET)
.map_err(|_| Error::new(EIO))?;
// If unaligned, read the head page separately.
let (first_aligned_page, remaining_filesz) = if voff > 0 {
let bytes_to_next_page = PAGE_SIZE - voff;
let (_guard, dst_page) =
unsafe { MmapGuard::map_mut_anywhere(*grants_fd, vaddr, PAGE_SIZE)? };
let length = core::cmp::min(bytes_to_next_page, filesz);
read_all(*image_file, None, &mut dst_page[voff..][..length])?;
(vaddr + PAGE_SIZE, filesz - length)
} else {
(vaddr, filesz)
};
let remaining_page_count = remaining_filesz.div_floor(PAGE_SIZE);
let tail_bytes = remaining_filesz % PAGE_SIZE;
// TODO: Unless the calling process if *very* memory-constrained, the max amount of
// pages per iteration has no limit other than the time it takes to setup page
// tables.
//
// TODO: Reserve PAGES_PER_ITER "scratch pages" of virtual memory for that type of
// situation?
const PAGES_PER_ITER: usize = 64;
// TODO: Before this loop, attempt to mmap with MAP_PRIVATE directly from the image
// file.
for page_idx in (0..remaining_page_count).step_by(PAGES_PER_ITER) {
// Use commented out lines to trigger kernel bug (FIXME).
//let pages_in_this_group = core::cmp::min(PAGES_PER_ITER, file_page_count - page_idx * PAGES_PER_ITER);
let pages_in_this_group =
core::cmp::min(PAGES_PER_ITER, remaining_page_count - page_idx);
if pages_in_this_group == 0 {
break;
}
// TODO: MAP_FIXED to optimize away funmap?
let (_guard, dst_memory) = unsafe {
MmapGuard::map_mut_anywhere(
*grants_fd,
first_aligned_page + page_idx * PAGE_SIZE, // offset
pages_in_this_group * PAGE_SIZE, // size
)?
};
// TODO: Are &mut [u8] and &mut [[u8; PAGE_SIZE]] interchangeable (if the
// lengths are aligned, obviously)?
read_all(*image_file, None, dst_memory)?;
}
if tail_bytes > 0 {
let (_guard, dst_page) = unsafe {
MmapGuard::map_mut_anywhere(
*grants_fd,
first_aligned_page + remaining_page_count * PAGE_SIZE,
PAGE_SIZE,
)?
};
read_all(*image_file, None, &mut dst_page[..tail_bytes])?;
}
// file_page_count..file_page_count + zero_page_count are already zero-initialized
// by the kernel.
if !tree
.range(..=vaddr)
.next_back()
.filter(|(start, size)| **start + **size > vaddr)
.is_some()
{
tree.insert(vaddr, total_page_count * PAGE_SIZE);
}
}
_ => continue,
}
}
allocate_remote(
&grants_fd,
memory_scheme_fd,
STACK_TOP - STACK_SIZE,
STACK_SIZE,
MapFlags::PROT_READ | MapFlags::PROT_WRITE,
)?;
tree.insert(STACK_TOP - STACK_SIZE, STACK_SIZE);
let mut sp = STACK_TOP;
let mut stack_page = Option::<MmapGuard>::None;
let mut push = |word: usize| {
let old_page_no = sp / PAGE_SIZE;
sp -= size_of::<usize>();
let new_page_no = sp / PAGE_SIZE;
let new_page_off = sp % PAGE_SIZE;
let page = if let Some(ref mut page) = stack_page
&& old_page_no == new_page_no
{
page
} else if let Some(ref mut stack_page) = stack_page {
stack_page.remap(new_page_no * PAGE_SIZE, PROT_WRITE)?;
stack_page
} else {
let new = MmapGuard::map(
*grants_fd,
&Map {
offset: new_page_no * PAGE_SIZE,
size: PAGE_SIZE,
flags: PROT_WRITE,
address: 0, // let kernel decide
},
)?;
stack_page.insert(new)
};
unsafe {
page.as_mut_ptr_slice()
.as_mut_ptr()
.add(new_page_off)
.cast::<usize>()
.write(word);
}
Ok(())
};
let pheaders_to_convey = if let Some(ref r#override) = interp_override {
&*r#override.phs
} else {
&*phs_raw
};
let pheaders_size_aligned = pheaders_to_convey.len().next_multiple_of(PAGE_SIZE);
let pheaders = find_free_target_addr(&tree, pheaders_size_aligned).ok_or(Error::new(ENOMEM))?;
tree.insert(pheaders, pheaders_size_aligned);
allocate_remote(
&grants_fd,
memory_scheme_fd,
pheaders,
pheaders_size_aligned,
MapFlags::PROT_READ | MapFlags::PROT_WRITE,
)?;
unsafe {
let (_guard, memory) =
MmapGuard::map_mut_anywhere(*grants_fd, pheaders, pheaders_size_aligned)?;
memory[..pheaders_to_convey.len()].copy_from_slice(pheaders_to_convey);
}
mprotect_remote(
&grants_fd,
pheaders,
pheaders_size_aligned,
MapFlags::PROT_READ,
)?;
push(0)?;
push(AT_NULL)?;
push(header.e_entry as usize)?;
if let Some(ref r#override) = interp_override {
push(AT_BASE)?;
push(r#override.at_entry)?;
}
push(AT_ENTRY)?;
push(pheaders + size_of::<Header>())?;
push(AT_PHDR)?;
push(
interp_override
.as_ref()
.map_or(header.e_phnum as usize, |o| o.at_phnum),
)?;
push(AT_PHNUM)?;
push(
interp_override
.as_ref()
.map_or(header.e_phentsize as usize, |o| o.at_phent),
)?;
push(AT_PHENT)?;
let total_args_envs_auxvpointee_size = total_args_envs_size
+ extrainfo.cwd.map_or(0, |s| s.len() + 1)
+ extrainfo.default_scheme.map_or(0, |s| s.len() + 1);
let args_envs_size_aligned = total_args_envs_auxvpointee_size.next_multiple_of(PAGE_SIZE);
let target_args_env_address =
find_free_target_addr(&tree, args_envs_size_aligned).ok_or(Error::new(ENOMEM))?;
allocate_remote(
&grants_fd,
memory_scheme_fd,
target_args_env_address,
args_envs_size_aligned,
MapFlags::PROT_READ | MapFlags::PROT_WRITE,
)?;
tree.insert(target_args_env_address, args_envs_size_aligned);
let mut offset = 0;
let mut argc = 0;
{
let mut append = |source_slice: &[u8]| {
// TODO
let address = target_args_env_address + offset;
if !source_slice.is_empty() {
let containing_page = address.div_floor(PAGE_SIZE) * PAGE_SIZE;
let displacement = address - containing_page;
let size = source_slice.len() + displacement;
let aligned_size = size.next_multiple_of(PAGE_SIZE);
let (_guard, memory) = unsafe {
MmapGuard::map_mut_anywhere(*grants_fd, containing_page, aligned_size)?
};
memory[displacement..][..source_slice.len()].copy_from_slice(source_slice);
}
offset += source_slice.len() + 1;
Ok(address)
};
if let Some(cwd) = extrainfo.cwd {
push(append(cwd)?)?;
push(AT_REDOX_INITIAL_CWD_PTR)?;
push(cwd.len())?;
push(AT_REDOX_INITIAL_CWD_LEN)?;
}
if let Some(default_scheme) = extrainfo.default_scheme {
push(append(default_scheme)?)?;
push(AT_REDOX_INITIAL_DEFAULT_SCHEME_PTR)?;
push(default_scheme.len())?;
push(AT_REDOX_INITIAL_DEFAULT_SCHEME_LEN)?;
}
#[cfg(target_pointer_width = "32")]
{
push((extrainfo.sigignmask >> 32) as usize)?;
push(AT_REDOX_INHERITED_SIGIGNMASK_HI)?;
}
push(extrainfo.sigignmask as usize)?;
push(AT_REDOX_INHERITED_SIGIGNMASK)?;
#[cfg(target_pointer_width = "32")]
{
push((extrainfo.sigprocmask >> 32) as usize)?;
push(AT_REDOX_INHERITED_SIGPROCMASK_HI)?;
}
push(extrainfo.sigprocmask as usize)?;
push(AT_REDOX_INHERITED_SIGPROCMASK)?;
push(extrainfo.umask as usize)?;
push(AT_REDOX_UMASK);
push(0)?;
for env in envs {
push(append(env.as_ref())?)?;
}
push(0)?;
for arg in args {
push(append(arg.as_ref())?)?;
argc += 1;
}
}
push(argc)?;
if let Ok(sighandler_fd) = syscall::dup(*open_via_dup, b"sighandler").map(FdGuard::new) {
let _ = syscall::write(
*sighandler_fd,
&SetSighandlerData {
user_handler: 0,
excp_handler: 0,
thread_control_addr: 0,
proc_control_addr: 0,
},
);
}
unsafe {
deactivate_tcb(*open_via_dup)?;
}
// TODO: Restore old name if exec failed?
if let Ok(name_fd) = syscall::dup(*open_via_dup, b"name").map(FdGuard::new) {
let _ = syscall::write(*name_fd, interp_override.as_ref().map_or(path, |o| &o.name));
}
if interp_override.is_some() {
let mmap_min_fd = FdGuard::new(syscall::dup(*grants_fd, b"mmap-min-addr")?);
let last_addr = tree.iter().rev().nth(1).map_or(0, |(off, len)| *off + *len);
let aligned_last_addr = last_addr.next_multiple_of(PAGE_SIZE);
let _ = syscall::write(*mmap_min_fd, &usize::to_ne_bytes(aligned_last_addr));
}
let addrspace_selection_fd = FdGuard::new(syscall::dup(*open_via_dup, b"current-addrspace")?);
let _ = syscall::write(
*addrspace_selection_fd,
&create_set_addr_space_buf(*grants_fd, header.e_entry as usize, sp),
);
Ok(FexecResult::Normal {
addrspace_handle: addrspace_selection_fd,
})
}
fn write_usizes<const N: usize>(fd: &FdGuard, usizes: [usize; N]) -> Result<()> {
let _ = syscall::write(**fd, unsafe { plain::as_bytes(&usizes) });
Ok(())
}
fn allocate_remote(
addrspace_fd: &FdGuard,
memory_scheme_fd: &FdGuard,
dst_addr: usize,
len: usize,
flags: MapFlags,
) -> Result<()> {
mmap_remote(addrspace_fd, memory_scheme_fd, 0, dst_addr, len, flags)
}
pub fn mmap_remote(
addrspace_fd: &FdGuard,
fd: &FdGuard,
offset: usize,
dst_addr: usize,
len: usize,
flags: MapFlags,
) -> Result<()> {
write_usizes(
addrspace_fd,
[
// op
syscall::flag::ADDRSPACE_OP_MMAP,
// fd
**fd,
// "offset"
offset,
// address
dst_addr,
// size
len,
// flags
(flags | MapFlags::MAP_FIXED_NOREPLACE).bits(),
],
)
}
pub fn mprotect_remote(
addrspace_fd: &FdGuard,
addr: usize,
len: usize,
flags: MapFlags,
) -> Result<()> {
write_usizes(
addrspace_fd,
[
// op
syscall::flag::ADDRSPACE_OP_MPROTECT,
// address
addr,
// size
len,
// flags
flags.bits(),
],
)
}
pub fn munmap_remote(addrspace_fd: &FdGuard, addr: usize, len: usize) -> Result<()> {
write_usizes(
addrspace_fd,
[
// op
syscall::flag::ADDRSPACE_OP_MUNMAP,
// address
addr,
// size
len,
],
)
}
pub fn munmap_transfer(
src: &FdGuard,
dst: &FdGuard,
src_addr: usize,
dst_addr: usize,
len: usize,
flags: MapFlags,
) -> Result<()> {
write_usizes(
dst,
[
// op
syscall::flag::ADDRSPACE_OP_TRANSFER,
// fd
**src,
// "offset" (source address)
src_addr,
// address
dst_addr,
// size
len,
// flags
(flags | MapFlags::MAP_FIXED_NOREPLACE).bits(),
],
)
}
fn read_all(fd: usize, offset: Option<u64>, buf: &mut [u8]) -> Result<()> {
if let Some(offset) = offset {
syscall::lseek(fd, offset as isize, SEEK_SET)?;
}
let mut total_bytes_read = 0;
while total_bytes_read < buf.len() {
total_bytes_read += match syscall::read(fd, &mut buf[total_bytes_read..])? {
0 => return Err(Error::new(ENOEXEC)),
bytes_read => bytes_read,
}
}
Ok(())
}
// TODO: With the introduction of remote mmaps, remove this and let the kernel handle address
// allocation.
fn find_free_target_addr(tree: &BTreeMap<usize, usize>, size: usize) -> Option<usize> {
let mut iterator = tree.iter().peekable();
// Ignore the space between zero and the first region, to avoid null pointers.
while let Some((cur_address, entry_size)) = iterator.next() {
let end = *cur_address + entry_size;
if let Some((next_address, _)) = iterator.peek() {
if **next_address - end > size {
return Some(end);
}
}
// No need to check last entry, since the stack will always be put at the highest
// possible address.
}
None
}
pub struct MmapGuard {
fd: usize,
base: usize,
size: usize,
}
impl MmapGuard {
pub fn map(fd: usize, map: &Map) -> Result<Self> {
Ok(Self {
fd,
size: map.size,
base: unsafe { syscall::fmap(fd, map)? },
})
}
pub fn remap(&mut self, offset: usize, mut flags: MapFlags) -> Result<()> {
flags.remove(MapFlags::MAP_FIXED_NOREPLACE);
flags.insert(MapFlags::MAP_FIXED);
let _new_base = unsafe {
syscall::fmap(
self.fd,
&Map {
offset,
size: self.size,
flags,
address: self.base,
},
)?
};
Ok(())
}
pub unsafe fn map_mut_anywhere<'a>(
fd: usize,
offset: usize,
size: usize,
) -> Result<(Self, &'a mut [u8])> {
let mut this = Self::map(
fd,
&Map {
size,
offset,
address: 0,
flags: PROT_WRITE,
},
)?;
let slice = &mut *this.as_mut_ptr_slice();
Ok((this, slice))
}
pub fn addr(&self) -> usize {
self.base
}
pub fn len(&self) -> usize {
self.size
}
pub fn as_mut_ptr_slice(&mut self) -> *mut [u8] {
core::ptr::slice_from_raw_parts_mut(self.base as *mut u8, self.size)
}
pub fn take(mut self) {
self.size = 0;
}
}
impl Drop for MmapGuard {
fn drop(&mut self) {
if self.size != 0 {
let _ = unsafe { syscall::funmap(self.base, self.size) };
}
}
}
pub struct FdGuard {
fd: usize,
taken: bool,
}
impl FdGuard {
pub fn new(fd: usize) -> Self {
Self { fd, taken: false }
}
pub fn take(&mut self) -> usize {
self.taken = true;
self.fd
}
}
impl core::ops::Deref for FdGuard {
type Target = usize;
fn deref(&self) -> &Self::Target {
&self.fd
}
}
impl Drop for FdGuard {
fn drop(&mut self) {
if !self.taken {
let _ = syscall::close(self.fd);
}
}
}
impl Debug for FdGuard {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "[fd {}]", self.fd)
}
}
pub fn create_set_addr_space_buf(
space: usize,
ip: usize,
sp: usize,
) -> [u8; size_of::<usize>() * 3] {
let mut buf = [0_u8; 3 * size_of::<usize>()];
let mut chunks = buf.array_chunks_mut::<{ size_of::<usize>() }>();
*chunks.next().unwrap() = usize::to_ne_bytes(space);
*chunks.next().unwrap() = usize::to_ne_bytes(sp);
*chunks.next().unwrap() = usize::to_ne_bytes(ip);
buf
}
/// Spawns a new context which will not share the same address space as the current one. File
/// descriptors from other schemes are reobtained with `dup`, and grants referencing such file
/// descriptors are reobtained through `fmap`. Other mappings are kept but duplicated using CoW.
pub fn fork_impl() -> Result<usize> {
let old_mask = crate::signal::get_sigmask()?;
let pid = unsafe { Error::demux(__relibc_internal_fork_wrapper())? };
if pid == 0 {
crate::signal::set_sigmask(Some(old_mask), None)?;
}
Ok(pid)
}
pub fn fork_inner(initial_rsp: *mut usize) -> Result<usize> {
let (cur_filetable_fd, new_pid_fd, new_pid);
{
let cur_pid_fd = FdGuard::new(syscall::open(
"/scheme/thisproc/current/open_via_dup",
O_CLOEXEC,
)?);
(new_pid_fd, new_pid) = new_child_process()?;
copy_str(*cur_pid_fd, *new_pid_fd, "name")?;
// Copy existing files into new file table, but do not reuse the same file table (i.e. new
// parent FDs will not show up for the child).
{
cur_filetable_fd = FdGuard::new(syscall::dup(*cur_pid_fd, b"filetable")?);
// This must be done before the address space is copied.
unsafe {
initial_rsp.write(*cur_filetable_fd);
initial_rsp.add(1).write(*new_pid_fd);
}
}
// CoW-duplicate address space.
{
let new_addr_space_sel_fd =
FdGuard::new(syscall::dup(*new_pid_fd, b"current-addrspace")?);
let cur_addr_space_fd = FdGuard::new(syscall::dup(*cur_pid_fd, b"addrspace")?);
let new_addr_space_fd = FdGuard::new(syscall::dup(*cur_addr_space_fd, b"exclusive")?);
let mut grant_desc_buf = [GrantDesc::default(); 16];
loop {
let bytes_read = {
let buf = unsafe {
core::slice::from_raw_parts_mut(
grant_desc_buf.as_mut_ptr().cast(),
grant_desc_buf.len() * size_of::<GrantDesc>(),
)
};
syscall::read(*cur_addr_space_fd, buf)?
};
if bytes_read == 0 {
break;
}
let grants = &grant_desc_buf[..bytes_read / size_of::<GrantDesc>()];
for grant in grants {
if !grant.flags.contains(GrantFlags::GRANT_SCHEME)
|| !grant.flags.contains(GrantFlags::GRANT_SHARED)
{
continue;
}
let buf;
// TODO: write! using some #![no_std] Cursor type (tracking the length)?
#[cfg(target_pointer_width = "64")]
{
//buf = *b"grant-fd-AAAABBBBCCCCDDDD";
//write!(&mut buf, "grant-fd-{:>016x}", grant.base).unwrap();
buf = alloc::format!("grant-fd-{:>016x}", grant.base).into_bytes();
}
#[cfg(target_pointer_width = "32")]
{
//buf = *b"grant-fd-AAAABBBB";
//write!(&mut buf[..], "grant-fd-{:>08x}", grant.base).unwrap();
buf = alloc::format!("grant-fd-{:>08x}", grant.base).into_bytes();
}
let grant_fd = FdGuard::new(syscall::dup(*cur_addr_space_fd, &buf)?);
let mut flags = MAP_SHARED | MAP_FIXED_NOREPLACE;
flags.set(PROT_READ, grant.flags.contains(GrantFlags::GRANT_READ));
flags.set(PROT_WRITE, grant.flags.contains(GrantFlags::GRANT_WRITE));
flags.set(PROT_EXEC, grant.flags.contains(GrantFlags::GRANT_EXEC));
mmap_remote(
&new_addr_space_fd,
&grant_fd,
grant.offset as usize,
grant.base,
grant.size,
flags,
)?;
}
}
let buf = create_set_addr_space_buf(
*new_addr_space_fd,
__relibc_internal_fork_ret as usize,
initial_rsp as usize,
);
let _ = syscall::write(*new_addr_space_sel_fd, &buf)?;
}
{
// Reuse the same sigaltstack and signal entry (all memory will be re-mapped CoW later).
//
// Do this after the address space is cloned, since the kernel will get a shared
// reference to the TCB and whatever pages stores the signal proc control struct.
{
let new_sighandler_fd = FdGuard::new(syscall::dup(*new_pid_fd, b"sighandler")?);
let _ = syscall::write(
*new_sighandler_fd,
&crate::signal::current_setsighandler_struct(),
)?;
}
}
copy_env_regs(*cur_pid_fd, *new_pid_fd)?;
}
// Copy the file table. We do this last to ensure that all previously used file descriptors are
// closed. The only exception -- the filetable selection fd and the current filetable fd --
// will be closed by the child process.
{
// TODO: Use file descriptor forwarding or something similar to avoid copying the file
// table in the kernel.
let new_filetable_fd = FdGuard::new(syscall::dup(*cur_filetable_fd, b"copy")?);
let new_filetable_sel_fd = FdGuard::new(syscall::dup(*new_pid_fd, b"current-filetable")?);
let _ = syscall::write(
*new_filetable_sel_fd,
&usize::to_ne_bytes(*new_filetable_fd),
)?;
}
let start_fd = FdGuard::new(syscall::dup(*new_pid_fd, b"start")?);
let _ = syscall::write(*start_fd, &[0])?;
Ok(new_pid)
}
pub fn new_child_process() -> Result<(FdGuard, usize)> {
// Create a new context (fields such as uid/gid will be inherited from the current context).
let fd = FdGuard::new(syscall::open(
"/scheme/thisproc/new/open_via_dup",
O_CLOEXEC,
)?);
// Extract pid.
let mut buffer = [0_u8; 64];
let len = syscall::fpath(*fd, &mut buffer)?;
let buffer = buffer.get(..len).ok_or(Error::new(ENAMETOOLONG))?;
let colon_idx = buffer
.iter()
.position(|c| *c == b':')
.ok_or(Error::new(EINVAL))?;
let slash_idx = buffer
.iter()
.skip(colon_idx)
.position(|c| *c == b'/')
.ok_or(Error::new(EINVAL))?
+ colon_idx;
let pid_bytes = buffer
.get(colon_idx + 1..slash_idx)
.ok_or(Error::new(EINVAL))?;
let pid_str = core::str::from_utf8(pid_bytes).map_err(|_| Error::new(EINVAL))?;
let pid = pid_str.parse::<usize>().map_err(|_| Error::new(EINVAL))?;
Ok((fd, pid))
}
pub fn copy_str(cur_pid_fd: usize, new_pid_fd: usize, key: &str) -> Result<()> {
let cur_name_fd = FdGuard::new(syscall::dup(cur_pid_fd, key.as_bytes())?);
let new_name_fd = FdGuard::new(syscall::dup(new_pid_fd, key.as_bytes())?);
// TODO: Max path size?
let mut buf = [0_u8; 256];
let len = syscall::read(*cur_name_fd, &mut buf)?;
let buf = buf.get(..len).ok_or(Error::new(ENAMETOOLONG))?;
syscall::write(*new_name_fd, &buf)?;
Ok(())
}
use core::{ffi::c_int, mem::MaybeUninit, ptr::NonNull, sync::atomic::Ordering};
use syscall::{
data::AtomicU64, Error, RawAction, Result, RtSigInfo, SenderInfo, SetSighandlerData,
SigProcControl, Sigcontrol, SigcontrolFlags, TimeSpec, EAGAIN, EINTR, EINVAL, ENOMEM, EPERM,
SIGCHLD, SIGKILL, SIGSTOP, SIGTSTP, SIGTTIN, SIGTTOU, SIGURG, SIGWINCH,
};
use crate::{arch::*, proc::FdGuard, sync::Mutex, RtTcb, Tcb};
#[cfg(target_arch = "x86_64")]
static CPUID_EAX1_ECX: core::sync::atomic::AtomicU32 = core::sync::atomic::AtomicU32::new(0);
pub fn sighandler_function() -> usize {
// TODO: HWCAP?
__relibc_internal_sigentry as usize
}
/// ucontext_t representation
#[repr(C)]
pub struct SigStack {
#[cfg(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "riscv64"
))]
_pad: [usize; 1], // pad from 7*8 to 64
#[cfg(target_arch = "x86")]
_pad: [usize; 3], // pad from 9*4 to 12*4
pub link: *mut SigStack,
pub old_stack: PosixStackt,
pub old_mask: u64,
pub(crate) sival: usize,
pub(crate) sig_code: u32,
pub(crate) sig_num: u32,
// x86_64: 864 bytes
// i686: 512 bytes
// aarch64: 272 bytes (SIMD TODO)
// riscv64: 520 bytes (vector extensions TODO)
pub regs: ArchIntRegs,
}
#[repr(C)]
pub struct PosixStackt {
pub sp: *mut (),
pub flags: i32,
pub size: usize,
}
pub const SS_ONSTACK: usize = 1;
pub const SS_DISABLE: usize = 2;
impl From<Sigaltstack> for PosixStackt {
fn from(value: Sigaltstack) -> Self {
match value {
Sigaltstack::Disabled => PosixStackt {
sp: core::ptr::null_mut(),
size: 0,
flags: SS_DISABLE.try_into().unwrap(),
},
Sigaltstack::Enabled {
onstack,
base,
size,
} => PosixStackt {
sp: base.cast(),
size,
flags: if onstack {
SS_ONSTACK.try_into().unwrap()
} else {
0
},
},
}
}
}
#[repr(C)]
// TODO: This struct is for practical reasons locked to Linux's ABI, but avoid redefining
// it here. Alternatively, check at compile time that the structs are equivalent.
pub struct SiginfoAbi {
pub si_signo: i32,
pub si_errno: i32,
pub si_code: i32,
pub si_pid: i32, // pid_t
pub si_uid: i32, // uid_t
pub si_addr: *mut (), // *mut c_void
pub si_status: i32,
pub si_value: usize, // sigval
}
#[inline(always)]
unsafe fn inner(stack: &mut SigStack) {
let os = &Tcb::current().unwrap().os_specific;
let stack_ptr = NonNull::from(&mut *stack);
stack.link = core::mem::replace(&mut (*os.arch.get()).last_sigstack, Some(stack_ptr))
.map_or_else(core::ptr::null_mut, |x| x.as_ptr());
let signals_were_disabled = (*os.arch.get()).disable_signals_depth > 0;
let targeted_thread_not_process = stack.sig_num >= 64;
stack.sig_num %= 64;
// asm counts from 0
stack.sig_num += 1;
let (sender_pid, sender_uid) = {
let area = &mut *os.arch.get();
// Undefined if the signal was not realtime
stack.sival = area.tmp_rt_inf.arg;
stack.old_stack = arch_pre(stack, area);
if (stack.sig_num - 1) / 32 == 1 && !targeted_thread_not_process {
stack.sig_code = area.tmp_rt_inf.code as u32;
(area.tmp_rt_inf.pid, area.tmp_rt_inf.uid)
} else {
stack.sig_code = 0; // TODO: SI_USER constant?
// TODO: Handle SIGCHLD. Maybe that should always be queued though?
let inf = SenderInfo::from_raw(area.tmp_id_inf);
(inf.pid, inf.ruid)
}
};
let sigaction = {
let guard = SIGACTIONS_LOCK.lock();
let action = convert_old(&PROC_CONTROL_STRUCT.actions[stack.sig_num as usize - 1]);
if action.flags.contains(SigactionFlags::RESETHAND) {
drop(guard);
sigaction(
stack.sig_num as u8,
Some(&Sigaction {
kind: SigactionKind::Default,
mask: 0,
flags: SigactionFlags::empty(),
}),
None,
);
}
action
};
let shall_restart = sigaction.flags.contains(SigactionFlags::RESTART);
let handler = match sigaction.kind {
SigactionKind::Ignore => {
panic!("ctl {:x?} signal {}", os.control, stack.sig_num)
}
SigactionKind::Default => {
syscall::exit(stack.sig_num as usize);
unreachable!();
}
SigactionKind::Handled { handler } => handler,
};
// Set sigmask to sa_mask and unmark the signal as pending.
let prev_sigallow = get_allowset_raw(&os.control.word);
let mut sigallow_inside = !sigaction.mask & prev_sigallow;
if !sigaction.flags.contains(SigactionFlags::NODEFER) {
sigallow_inside &= !sig_bit(stack.sig_num);
}
let _pending_when_sa_mask = set_allowset_raw(&os.control.word, prev_sigallow, sigallow_inside);
// TODO: If sa_mask caused signals to be unblocked, deliver one or all of those first?
// Re-enable signals again.
let control_flags = &os.control.control_flags;
control_flags.store(
control_flags.load(Ordering::Relaxed) & !SigcontrolFlags::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
core::sync::atomic::compiler_fence(Ordering::Acquire);
stack.old_mask = prev_sigallow;
// Call handler, either sa_handler or sa_siginfo depending on flag.
if sigaction.flags.contains(SigactionFlags::SIGINFO)
&& let Some(sigaction) = handler.sigaction
{
let info = SiginfoAbi {
si_signo: stack.sig_num as c_int,
si_addr: core::ptr::null_mut(),
si_code: stack.sig_code as i32,
si_errno: 0,
si_pid: sender_pid as i32,
si_status: 0,
si_uid: sender_uid as i32,
si_value: stack.sival,
};
sigaction(
stack.sig_num as c_int,
core::ptr::addr_of!(info).cast(),
stack as *mut SigStack as *mut (),
);
} else if let Some(handler) = handler.handler {
handler(stack.sig_num as c_int);
}
// Disable signals while we modify the sigmask again
control_flags.store(
control_flags.load(Ordering::Relaxed) | SigcontrolFlags::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
core::sync::atomic::compiler_fence(Ordering::Acquire);
// Update allowset again.
let new_mask = stack.old_mask;
let old_mask = get_allowset_raw(&os.control.word);
let _pending_when_restored_mask = set_allowset_raw(&os.control.word, old_mask, new_mask);
// TODO: If resetting the sigmask caused signals to be unblocked, then should they be delivered
// here? And would it be possible to tail-call-optimize that?
(*os.arch.get()).last_sig_was_restart = shall_restart;
// TODO: Support setting uc_link to jump back to a different context?
(*os.arch.get()).last_sigstack = NonNull::new(stack.link);
// TODO: Support restoring uc_stack?
// And re-enable them again
if !signals_were_disabled {
core::sync::atomic::compiler_fence(Ordering::Release);
control_flags.store(
control_flags.load(Ordering::Relaxed) & !SigcontrolFlags::INHIBIT_DELIVERY.bits(),
Ordering::Relaxed,
);
}
}
#[cfg(not(target_arch = "x86"))]
pub(crate) unsafe extern "C" fn inner_c(stack: usize) {
inner(&mut *(stack as *mut SigStack))
}
#[cfg(target_arch = "x86")]
pub(crate) unsafe extern "fastcall" fn inner_fastcall(stack: usize) {
inner(&mut *(stack as *mut SigStack))
}
pub fn get_sigmask() -> Result<u64> {
let mut mask = 0;
modify_sigmask(Some(&mut mask), Option::<fn(u64) -> u64>::None)?;
Ok(mask)
}
pub fn set_sigmask(new: Option<u64>, old: Option<&mut u64>) -> Result<()> {
modify_sigmask(old, new.map(move |newmask| move |_| newmask))
}
pub fn or_sigmask(new: Option<u64>, old: Option<&mut u64>) -> Result<()> {
// Parsing nightmare... :)
modify_sigmask(
old,
new.map(move |newmask| move |oldmask| oldmask | newmask),
)
}
pub fn andn_sigmask(new: Option<u64>, old: Option<&mut u64>) -> Result<()> {
modify_sigmask(
old,
new.map(move |newmask| move |oldmask| oldmask & !newmask),
)
}
fn get_allowset_raw(words: &[AtomicU64; 2]) -> u64 {
(words[0].load(Ordering::Relaxed) >> 32) | ((words[1].load(Ordering::Relaxed) >> 32) << 32)
}
/// Sets mask from old to new, returning what was pending at the time.
fn set_allowset_raw(words: &[AtomicU64; 2], old: u64, new: u64) -> u64 {
// This assumes *only this thread* can change the allowset. If this rule is broken, the use of
// fetch_add will corrupt the words entirely. fetch_add is very efficient on x86, being
// generated as LOCK XADD which is the fastest RMW instruction AFAIK.
let prev_w0 = words[0].fetch_add(
((new & 0xffff_ffff) << 32).wrapping_sub((old & 0xffff_ffff) << 32),
Ordering::Relaxed,
) & 0xffff_ffff;
let prev_w1 = words[1].fetch_add(
((new >> 32) << 32).wrapping_sub((old >> 32) << 32),
Ordering::Relaxed,
) & 0xffff_ffff;
prev_w0 | (prev_w1 << 32)
}
fn modify_sigmask(old: Option<&mut u64>, op: Option<impl FnOnce(u64) -> u64>) -> Result<()> {
let _guard = tmp_disable_signals();
let ctl = current_sigctl();
let prev = get_allowset_raw(&ctl.word);
if let Some(old) = old {
*old = !prev;
}
let Some(op) = op else {
return Ok(());
};
let next = !op(!prev);
let pending = set_allowset_raw(&ctl.word, prev, next);
// POSIX requires that at least one pending unblocked signal be delivered before
// pthread_sigmask returns, if there is one.
if pending != 0 {
unsafe {
manually_enter_trampoline();
}
}
Ok(())
}
#[derive(Clone, Copy, Default)]
pub enum SigactionKind {
#[default]
Default,
Ignore,
Handled {
handler: SignalHandler,
},
}
#[derive(Clone, Copy, Default)]
pub struct Sigaction {
pub kind: SigactionKind,
pub mask: u64,
pub flags: SigactionFlags,
}
impl Sigaction {
fn ip(&self) -> usize {
unsafe {
match self.kind {
SigactionKind::Handled { handler } => {
if self.flags.contains(SigactionFlags::SIGINFO) {
handler.sigaction.map_or(0, |a| a as usize)
} else {
handler.handler.map_or(0, |a| a as usize)
}
}
_ => 0,
}
}
}
}
const MASK_DONTCARE: u64 = !0;
fn convert_old(action: &RawAction) -> Sigaction {
let old_first = action.first.load(Ordering::Relaxed);
let old_mask = action.user_data.load(Ordering::Relaxed);
let handler = (old_first & !(u64::from(STORED_FLAGS) << 32)) as usize;
let flags = SigactionFlags::from_bits_retain(((old_first >> 32) as u32) & STORED_FLAGS);
let kind = if handler == default_handler as usize {
SigactionKind::Default
} else if flags.contains(SigactionFlags::IGNORED) {
SigactionKind::Ignore
} else {
SigactionKind::Handled {
handler: unsafe { core::mem::transmute(handler as usize) },
}
};
Sigaction {
mask: old_mask,
flags,
kind,
}
}
pub fn sigaction(signal: u8, new: Option<&Sigaction>, old: Option<&mut Sigaction>) -> Result<()> {
if matches!(usize::from(signal), 0 | 32 | SIGKILL | SIGSTOP | 65..) {
return Err(Error::new(EINVAL));
}
let _sigguard = tmp_disable_signals();
let ctl = current_sigctl();
let _guard = SIGACTIONS_LOCK.lock();
let action = &PROC_CONTROL_STRUCT.actions[usize::from(signal) - 1];
if let Some(old) = old {
*old = convert_old(action);
}
let Some(new) = new else {
return Ok(());
};
let explicit_handler = new.ip();
let (mask, flags, handler) = match (usize::from(signal), new.kind) {
(_, SigactionKind::Ignore) | (SIGURG | SIGWINCH, SigactionKind::Default) => {
// TODO: POSIX specifies that pending signals shall be discarded if set to SIG_IGN by
// sigaction.
// TODO: handle tmp_disable_signals
(
MASK_DONTCARE,
SigactionFlags::IGNORED,
if matches!(new.kind, SigactionKind::Default) {
default_handler as usize
} else {
0
},
)
}
// TODO: Handle pending signals before these flags are set.
(SIGTSTP | SIGTTOU | SIGTTIN, SigactionKind::Default) => (
MASK_DONTCARE,
SigactionFlags::SIG_SPECIFIC,
default_handler as usize,
),
(SIGCHLD, SigactionKind::Default) => {
let nocldstop_bit = new.flags & SigactionFlags::SIG_SPECIFIC;
(
MASK_DONTCARE,
SigactionFlags::IGNORED | nocldstop_bit,
default_handler as usize,
)
}
(_, SigactionKind::Default) => (new.mask, new.flags, default_handler as usize),
(_, SigactionKind::Handled { .. }) => (new.mask, new.flags, explicit_handler),
};
let new_first = (handler as u64) | (u64::from(flags.bits() & STORED_FLAGS) << 32);
action.first.store(new_first, Ordering::Relaxed);
action.user_data.store(mask, Ordering::Relaxed);
Ok(())
}
fn current_sigctl() -> &'static Sigcontrol {
&unsafe { Tcb::current() }.unwrap().os_specific.control
}
pub struct TmpDisableSignalsGuard {
_inner: (),
}
pub fn tmp_disable_signals() -> TmpDisableSignalsGuard {
unsafe {
let ctl = &current_sigctl().control_flags;
ctl.store(
ctl.load(Ordering::Relaxed) | syscall::flag::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
core::sync::atomic::compiler_fence(Ordering::Acquire);
// TODO: fence?
(*Tcb::current().unwrap().os_specific.arch.get()).disable_signals_depth += 1;
}
TmpDisableSignalsGuard { _inner: () }
}
impl Drop for TmpDisableSignalsGuard {
fn drop(&mut self) {
unsafe {
let depth =
&mut (*Tcb::current().unwrap().os_specific.arch.get()).disable_signals_depth;
*depth -= 1;
if *depth == 0 {
let ctl = &current_sigctl().control_flags;
ctl.store(
ctl.load(Ordering::Relaxed) & !syscall::flag::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
core::sync::atomic::compiler_fence(Ordering::Acquire);
}
}
}
}
bitflags::bitflags! {
// Some flags are ignored by the rt, but they match relibc's 1:1 to simplify conversion.
#[derive(Clone, Copy, Default)]
pub struct SigactionFlags: u32 {
const NOCLDWAIT = 2;
const RESTORER = 4;
const SIGINFO = 0x0200_0000;
const ONSTACK = 0x0400_0000;
const RESTART = 0x0800_0000;
const NODEFER = 0x1000_0000;
const RESETHAND = 0x2000_0000;
const SIG_SPECIFIC = 0x4000_0000;
const IGNORED = 0x8000_0000;
}
}
const STORED_FLAGS: u32 = 0xfe00_0000;
fn default_handler(sig: c_int) {
syscall::exit(sig as usize);
}
#[derive(Clone, Copy)]
pub union SignalHandler {
pub handler: Option<extern "C" fn(c_int)>,
pub sigaction: Option<unsafe extern "C" fn(c_int, *const (), *mut ())>,
}
static SIGACTIONS_LOCK: Mutex<()> = Mutex::new(());
pub(crate) static PROC_CONTROL_STRUCT: SigProcControl = SigProcControl {
pending: AtomicU64::new(0),
actions: [const {
RawAction {
first: AtomicU64::new(0),
user_data: AtomicU64::new(0),
}
}; 64],
sender_infos: [const { AtomicU64::new(0) }; 32],
};
fn combine_allowset([lo, hi]: [u64; 2]) -> u64 {
(lo >> 32) | ((hi >> 32) << 32)
}
const fn sig_bit(sig: u32) -> u64 {
//assert_ne!(sig, 32);
//assert_ne!(sig, 0);
1 << (sig - 1)
}
pub fn setup_sighandler(tcb: &RtTcb) {
{
let _guard = SIGACTIONS_LOCK.lock();
for (sig_idx, action) in PROC_CONTROL_STRUCT.actions.iter().enumerate() {
let sig = sig_idx + 1;
let bits = if matches!(sig, SIGTSTP | SIGTTIN | SIGTTOU) {
SigactionFlags::SIG_SPECIFIC
} else if matches!(sig, SIGCHLD | SIGURG | SIGWINCH) {
SigactionFlags::IGNORED
} else {
SigactionFlags::empty()
};
action.first.store(
(u64::from(bits.bits()) << 32) | default_handler as u64,
Ordering::Relaxed,
);
}
}
let arch = unsafe { &mut *tcb.arch.get() };
{
// The asm decides whether to use the altstack, based on whether the saved stack pointer
// was already on that stack. Thus, setting the altstack to the entire address space, is
// equivalent to not using any altstack at all (the default).
arch.altstack_top = usize::MAX;
arch.altstack_bottom = 0;
// TODO
#[cfg(any(target_arch = "x86", target_arch = "aarch64", target_arch = "riscv64"))]
{
arch.pctl = core::ptr::addr_of!(PROC_CONTROL_STRUCT) as usize;
}
}
#[cfg(target_arch = "x86_64")]
{
let cpuid_eax1_ecx = unsafe { core::arch::x86_64::__cpuid(1) }.ecx;
CPUID_EAX1_ECX.store(cpuid_eax1_ecx, core::sync::atomic::Ordering::Relaxed);
SUPPORTS_AVX.store(u8::from(cpuid_eax1_ecx & 1 << 28 != 0), Ordering::Relaxed);
}
let data = current_setsighandler_struct();
let fd = FdGuard::new(
syscall::dup(**tcb.thread_fd(), b"sighandler").expect("failed to open sighandler fd"),
);
let _ = syscall::write(*fd, &data).expect("failed to write to sighandler fd");
// TODO: Inherited set of ignored signals
set_sigmask(Some(0), None);
}
pub type RtSigarea = RtTcb; // TODO
pub fn current_setsighandler_struct() -> SetSighandlerData {
SetSighandlerData {
user_handler: sighandler_function(),
excp_handler: 0, // TODO
thread_control_addr: core::ptr::addr_of!(
unsafe { Tcb::current() }.unwrap().os_specific.control
) as usize,
proc_control_addr: &PROC_CONTROL_STRUCT as *const SigProcControl as usize,
}
}
#[derive(Clone, Copy, Default, PartialEq)]
pub enum Sigaltstack {
#[default]
Disabled,
Enabled {
onstack: bool,
base: *mut (),
size: usize,
},
}
pub(crate) fn get_sigaltstack(tcb: &SigArea, sp: usize) -> Sigaltstack {
if tcb.altstack_bottom == 0 && tcb.altstack_top == usize::MAX {
Sigaltstack::Disabled
} else {
Sigaltstack::Enabled {
base: tcb.altstack_bottom as *mut (),
size: tcb.altstack_top - tcb.altstack_bottom,
onstack: (tcb.altstack_bottom..tcb.altstack_top).contains(&sp),
}
}
}
pub unsafe fn sigaltstack(
new: Option<&Sigaltstack>,
old_out: Option<&mut Sigaltstack>,
) -> Result<()> {
let _g = tmp_disable_signals();
let tcb = &mut *Tcb::current().unwrap().os_specific.arch.get();
let old = get_sigaltstack(tcb, crate::arch::current_sp());
if matches!(old, Sigaltstack::Enabled { onstack: true, .. }) && new != Some(&old) {
return Err(Error::new(EPERM));
}
if let Some(old_out) = old_out {
*old_out = old;
}
if let Some(new) = new {
match *new {
Sigaltstack::Disabled => {
tcb.altstack_bottom = 0;
tcb.altstack_top = usize::MAX;
}
Sigaltstack::Enabled { onstack: true, .. } => return Err(Error::new(EINVAL)),
Sigaltstack::Enabled {
base,
size,
onstack: false,
} => {
if size < MIN_SIGALTSTACK_SIZE {
return Err(Error::new(ENOMEM));
}
tcb.altstack_bottom = base as usize;
tcb.altstack_top = base as usize + size;
}
}
}
Ok(())
}
pub const MIN_SIGALTSTACK_SIZE: usize = 2048;
pub fn currently_pending_blocked() -> u64 {
let control = &unsafe { Tcb::current().unwrap() }.os_specific.control;
let w0 = control.word[0].load(Ordering::Relaxed);
let w1 = control.word[1].load(Ordering::Relaxed);
let allow = (w0 >> 32) | ((w1 >> 32) << 32);
let thread_pending = (w0 & 0xffff_ffff) | ((w1 >> 32) & 0xffff_ffff);
let proc_pending = PROC_CONTROL_STRUCT.pending.load(Ordering::Relaxed);
core::sync::atomic::fence(Ordering::Acquire); // TODO: Correct ordering?
(thread_pending | proc_pending) & !allow
}
pub enum Unreachable {}
pub fn await_signal_async(inner_allowset: u64) -> Result<Unreachable> {
let _guard = tmp_disable_signals();
let control = &unsafe { Tcb::current().unwrap() }.os_specific.control;
let old_allowset = get_allowset_raw(&control.word);
set_allowset_raw(&control.word, old_allowset, inner_allowset);
let res = syscall::nanosleep(
&TimeSpec {
tv_sec: i64::MAX,
tv_nsec: 0,
},
&mut TimeSpec::default(),
);
set_allowset_raw(&control.word, inner_allowset, old_allowset);
if res == Err(Error::new(EINTR)) {
unsafe {
manually_enter_trampoline();
}
}
res?;
unreachable!()
}
// TODO: deadline-based API
pub fn await_signal_sync(inner_allowset: u64, timeout: Option<&TimeSpec>) -> Result<SiginfoAbi> {
let _guard = tmp_disable_signals();
let control = &unsafe { Tcb::current().unwrap() }.os_specific.control;
let old_allowset = get_allowset_raw(&control.word);
let proc_pending = PROC_CONTROL_STRUCT.pending.load(Ordering::Acquire);
let thread_pending = set_allowset_raw(&control.word, old_allowset, inner_allowset);
// Check if there are already signals matching the requested set, before waiting.
if let Some(info) = try_claim_multiple(proc_pending, thread_pending, inner_allowset, control) {
// TODO: RAII
set_allowset_raw(&control.word, inner_allowset, old_allowset);
return Ok(info);
}
let res = match timeout {
Some(t) => syscall::nanosleep(&t, &mut TimeSpec::default()),
None => syscall::nanosleep(
&TimeSpec {
tv_sec: i64::MAX,
tv_nsec: 0,
},
&mut TimeSpec::default(),
),
};
let thread_pending = set_allowset_raw(&control.word, inner_allowset, old_allowset);
let proc_pending = PROC_CONTROL_STRUCT.pending.load(Ordering::Acquire);
if let Err(error) = res
&& error.errno != EINTR
{
return Err(error);
}
// Then check if there were any signals left after waiting.
try_claim_multiple(proc_pending, thread_pending, inner_allowset, control)
// Normally ETIMEDOUT but not for sigtimedwait.
.ok_or(Error::new(EAGAIN))
}
fn try_claim_multiple(
mut proc_pending: u64,
mut thread_pending: u64,
allowset: u64,
control: &Sigcontrol,
) -> Option<SiginfoAbi> {
while (proc_pending | thread_pending) & allowset != 0 {
let sig_idx = ((proc_pending | thread_pending) & allowset).trailing_zeros();
if thread_pending & allowset & (1 << sig_idx) != 0
&& let Some(res) = try_claim_single(sig_idx, Some(control))
{
return Some(res);
}
thread_pending &= !(1 << sig_idx);
if proc_pending & allowset & (1 << sig_idx) != 0
&& let Some(res) = try_claim_single(sig_idx, None)
{
return Some(res);
}
proc_pending &= !(1 << sig_idx);
}
None
}
fn try_claim_single(sig_idx: u32, thread_control: Option<&Sigcontrol>) -> Option<SiginfoAbi> {
let sig_group = sig_idx / 32;
if sig_group == 1 && thread_control.is_none() {
// Queued (realtime) signal
let mut ret = MaybeUninit::<RtSigInfo>::uninit();
let rt_inf = unsafe {
syscall::syscall2(
syscall::SYS_SIGDEQUEUE,
ret.as_mut_ptr() as usize,
sig_idx as usize - 32,
)
.ok()?;
ret.assume_init()
};
Some(SiginfoAbi {
si_signo: sig_idx as i32 + 1,
si_errno: 0,
si_code: rt_inf.code,
si_pid: rt_inf.pid as i32,
si_uid: rt_inf.uid as i32,
si_status: 0,
si_value: rt_inf.arg,
si_addr: core::ptr::null_mut(),
})
} else {
// Idempotent (standard or thread realtime) signal
let info = SenderInfo::from_raw(match thread_control {
Some(ctl) => {
// Only this thread can clear pending bits, so this will always succeed.
let info = ctl.sender_infos[sig_idx as usize].load(Ordering::Acquire);
// TODO: Ordering
ctl.word[sig_group as usize].fetch_and(!(1 << (sig_idx % 32)), Ordering::Release);
info
}
None => {
let info =
PROC_CONTROL_STRUCT.sender_infos[sig_idx as usize].load(Ordering::Acquire);
if PROC_CONTROL_STRUCT
.pending
.fetch_and(!(1 << sig_idx), Ordering::Release)
& (1 << sig_idx)
== 0
{
// already claimed
return None;
}
info
}
});
Some(SiginfoAbi {
si_signo: sig_idx as i32 + 1,
si_errno: 0,
si_code: 0, // TODO: SI_USER const?
si_pid: info.pid as i32,
si_uid: info.ruid as i32,
si_status: 0,
si_value: 0, // undefined
si_addr: core::ptr::null_mut(),
})
}
}
// TODO: Share code for simple futex-based mutex between relibc's Mutex<()> and this.
use core::{
cell::UnsafeCell,
ops::{Deref, DerefMut},
sync::atomic::{AtomicU32, Ordering},
};
pub struct Mutex<T> {
pub lockword: AtomicU32,
pub inner: UnsafeCell<T>,
}
const UNLOCKED: u32 = 0;
const LOCKED: u32 = 1;
const WAITING: u32 = 2;
unsafe impl<T: Send> Send for Mutex<T> {}
unsafe impl<T: Send> Sync for Mutex<T> {}
impl<T> Mutex<T> {
pub const fn new(t: T) -> Self {
Self {
lockword: AtomicU32::new(0),
inner: UnsafeCell::new(t),
}
}
pub fn lock(&self) -> MutexGuard<'_, T> {
while self
.lockword
.compare_exchange(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
core::hint::spin_loop();
}
MutexGuard { lock: self }
}
}
pub struct MutexGuard<'l, T> {
lock: &'l Mutex<T>,
}
impl<T> Deref for MutexGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.lock.inner.get() }
}
}
impl<T> DerefMut for MutexGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.lock.inner.get() }
}
}
impl<T> Drop for MutexGuard<'_, T> {
fn drop(&mut self) {
self.lock.lockword.store(UNLOCKED, Ordering::Release);
}
}
use core::{
ptr::addr_of,
sync::atomic::{AtomicU32, Ordering},
};
use syscall::{
error::{Error, Result, EINTR},
RtSigInfo, TimeSpec,
};
use crate::{arch::manually_enter_trampoline, proc::FdGuard, signal::tmp_disable_signals, Tcb};
#[inline]
fn wrapper<T>(restart: bool, mut f: impl FnMut() -> Result<T>) -> Result<T> {
loop {
let _guard = tmp_disable_signals();
let rt_sigarea = unsafe { &Tcb::current().unwrap().os_specific };
let res = f();
if let Err(err) = res
&& err == Error::new(EINTR)
{
unsafe {
manually_enter_trampoline();
}
if restart && unsafe { (*rt_sigarea.arch.get()).last_sig_was_restart } {
continue;
}
}
return res;
}
}
// TODO: uninitialized memory?
#[inline]
pub fn posix_read(fd: usize, buf: &mut [u8]) -> Result<usize> {
wrapper(true, || syscall::read(fd, buf))
}
#[inline]
pub fn posix_write(fd: usize, buf: &[u8]) -> Result<usize> {
wrapper(true, || syscall::write(fd, buf))
}
#[inline]
pub fn posix_kill(pid: usize, sig: usize) -> Result<()> {
match wrapper(false, || syscall::kill(pid, sig)) {
Ok(_) | Err(Error { errno: EINTR }) => Ok(()),
Err(error) => Err(error),
}
}
#[inline]
pub fn posix_sigqueue(pid: usize, sig: usize, arg: usize) -> Result<()> {
let siginf = RtSigInfo {
arg,
code: -1, // TODO: SI_QUEUE constant
uid: 0, // TODO
pid: posix_getpid(),
};
match wrapper(false, || unsafe {
syscall::syscall3(syscall::SYS_SIGENQUEUE, pid, sig, addr_of!(siginf) as usize)
}) {
Ok(_) | Err(Error { errno: EINTR }) => Ok(()),
Err(error) => Err(error),
}
}
#[inline]
pub fn posix_getpid() -> u32 {
// SAFETY: read-only except during program/fork child initialization
unsafe { crate::THIS_PID.get().read() }
}
#[inline]
pub fn posix_killpg(pgrp: usize, sig: usize) -> Result<()> {
match wrapper(false, || syscall::kill(usize::wrapping_neg(pgrp), sig)) {
Ok(_) | Err(Error { errno: EINTR }) => Ok(()),
Err(error) => Err(error),
}
}
#[inline]
pub unsafe fn sys_futex_wait(addr: *mut u32, val: u32, deadline: Option<&TimeSpec>) -> Result<()> {
wrapper(true, || {
syscall::syscall5(
syscall::SYS_FUTEX,
addr as usize,
syscall::FUTEX_WAIT,
val as usize,
deadline.map_or(0, |d| d as *const _ as usize),
0,
)
.map(|_| ())
})
}
#[inline]
pub unsafe fn sys_futex_wake(addr: *mut u32, num: u32) -> Result<u32> {
syscall::syscall5(
syscall::SYS_FUTEX,
addr as usize,
syscall::FUTEX_WAKE,
num as usize,
0,
0,
)
.map(|awoken| awoken as u32)
}
pub fn sys_waitpid(pid: usize, status: &mut usize, flags: usize) -> Result<usize> {
wrapper(true, || {
syscall::waitpid(
pid,
status,
syscall::WaitFlags::from_bits(flags).expect("waitpid: invalid bit pattern"),
)
})
}
pub fn posix_kill_thread(thread_fd: usize, signal: u32) -> Result<()> {
let killfd = FdGuard::new(syscall::dup(thread_fd, b"signal")?);
match wrapper(false, || syscall::write(*killfd, &signal.to_ne_bytes())) {
Ok(_) | Err(Error { errno: EINTR }) => Ok(()),
Err(error) => Err(error),
}
}
static UMASK: AtomicU32 = AtomicU32::new(0o022);
/// Controls the set of bits removed from the `mode` mask when new file descriptors are created.
///
/// Must be validated by the caller
//
// TODO: validate here?
#[inline]
pub fn swap_umask(mask: u32) -> u32 {
UMASK.swap(mask, Ordering::AcqRel)
}
#[inline]
pub fn get_umask() -> u32 {
UMASK.load(Ordering::Acquire)
}
use core::mem::size_of;
use syscall::{Result, O_CLOEXEC};
use crate::{arch::*, proc::*, signal::tmp_disable_signals, RtTcb};
/// Spawns a new context sharing the same address space as the current one (i.e. a new thread).
pub unsafe fn rlct_clone_impl(stack: *mut usize) -> Result<FdGuard> {
let cur_thr_fd = RtTcb::current().thread_fd();
let new_thr_fd = FdGuard::new(syscall::open(
"/scheme/thisproc/new-thread/open_via_dup",
O_CLOEXEC,
)?);
copy_str(**cur_thr_fd, *new_thr_fd, "name")?;
// Inherit existing address space
{
let cur_addr_space_fd = FdGuard::new(syscall::dup(**cur_thr_fd, b"addrspace")?);
let new_addr_space_sel_fd = FdGuard::new(syscall::dup(*new_thr_fd, b"current-addrspace")?);
let buf = create_set_addr_space_buf(
*cur_addr_space_fd,
__relibc_internal_rlct_clone_ret as usize,
stack as usize,
);
let _ = syscall::write(*new_addr_space_sel_fd, &buf)?;
}
// Inherit reference to file table
{
let cur_filetable_fd = FdGuard::new(syscall::dup(**cur_thr_fd, b"filetable")?);
let new_filetable_sel_fd = FdGuard::new(syscall::dup(*new_thr_fd, b"current-filetable")?);
let _ = syscall::write(
*new_filetable_sel_fd,
&usize::to_ne_bytes(*cur_filetable_fd),
)?;
}
// Since the signal handler is not yet initialized, signals specifically targeting the thread
// (relibc is only required to implement thread-specific signals that already originate from
// the same process) will be discarded. Process-specific signals will ignore this new thread,
// until it has initialized its own signal handler.
// Unblock context.
let start_fd = FdGuard::new(syscall::dup(*new_thr_fd, b"start")?);
let _ = syscall::write(*start_fd, &[0])?;
Ok(new_thr_fd)
}
pub unsafe fn exit_this_thread(stack_base: *mut (), stack_size: usize) -> ! {
let _guard = tmp_disable_signals();
let tcb = RtTcb::current();
let thread_fd = tcb.thread_fd();
let _ = syscall::funmap(tcb as *const RtTcb as usize, syscall::PAGE_SIZE);
// TODO: modify interface so it writes directly to the thread fd?
let status_fd = syscall::dup(**thread_fd, b"status").unwrap();
let mut buf = [0; size_of::<usize>() * 3];
plain::slice_from_mut_bytes(&mut buf)
.unwrap()
.copy_from_slice(&[usize::MAX, stack_base as usize, stack_size]);
syscall::write(status_fd, &buf).unwrap();
unreachable!()
}
......@@ -2,7 +2,7 @@
set -e
if [[ "$(cargo install --list | grep '^redoxer ')" != "redoxer "* ]]
if ! which redoxer
then
cargo install redoxer
fi
......@@ -12,6 +12,7 @@ then
redoxer toolchain
fi
export CARGOFLAGS=""
export CARGO_TEST="redoxer"
export TEST_RUNNER="redoxer exec --folder . -- sh --"
export TEST_RUNNER="redoxer exec --folder . --"
redoxer env make "$@"
#!/bin/bash
set -e
target=$1
deps_dir=$2
if [ -z "$target" ] || [ -z "$deps_dir" ]; then
echo "Usage:\n\t./renamesyms.sh TARGET DEPS_DIR"
exit 1
fi
if [ ! -f "$target" ]; then
echo "Target file '$target' does not exist"
exit 1
fi
if [ ! -d "$deps_dir" ] ; then
echo "Deps dir '$deps_dir' does not exist or not a directory"
exit 1
fi
symbols_file=`mktemp`
special_syms=(
__rdl_oom
__rg_alloc
__rg_alloc_zeroed
__rg_dealloc
__rg_oom
__rg_realloc
__rust_alloc
__rust_alloc_error_handler
__rust_alloc_error_handler_should_panic
__rust_alloc_zeroed
__rust_dealloc
__rust_no_alloc_shim_is_unstable
__rust_realloc
)
for dep in `find $deps_dir -type f -name "*.rlib"`; do
"${NM}" --format=posix -g "$dep" 2>/dev/null | sed 's/.*:.*//g' | awk '{if ($2 == "T") print $1}' | sed 's/^\(.*\)$/\1 __relibc_\1/g' >> $symbols_file
done
for special_sym in "${special_syms[@]}"; do
echo "$special_sym __relibc_$special_sym" >> $symbols_file
done
sorted_file=`mktemp`
sort -u "$symbols_file" > "$sorted_file"
rm -f "$symbols_file"
"${OBJCOPY}" --redefine-syms="$sorted_file" "$target"
rm -f "$sorted_file"
nightly-2019-11-25
[toolchain]
channel = "nightly-2025-01-12"
components = ["rust-src"]
max_width = 100
hard_tabs = false
tab_spaces = 4
newline_style = "Unix"
indent_style = "Block"
format_strings = false
blank_lines_lower_bound = 0
blank_lines_upper_bound = 1
brace_style = "SameLineWhere"
disable_all_formatting = false
edition = "2018"
empty_item_single_line = true
fn_single_line = false
where_single_line = false
force_explicit_abi = true
format_strings = false
hard_tabs = false
hide_parse_errors = false
imports_granularity = "Crate"
imports_indent = "Block"
imports_layout = "Mixed"
fn_args_density = "Tall"
brace_style = "SameLineWhere"
trailing_comma = "Vertical"
blank_lines_upper_bound = 1
blank_lines_lower_bound = 0
force_explicit_abi = true
disable_all_formatting = false
indent_style = "Block"
max_width = 100
newline_style = "Unix"
skip_children = false
hide_parse_errors = false
report_todo = "Never"
report_fixme = "Never"
edition = "2018"
merge_imports = true
tab_spaces = 4
trailing_comma = "Vertical"
where_single_line = false
Source diff could not be displayed: it is too large. Options to address this: view the blob.
#include <stdarg.h>
#include <sys/types_internal.h>
// TODO: Can be implemented in rust when cbindgen supports "..." syntax
int sys_open(const char* filename, int flags, mode_t mode);
int open(const char* filename, int flags, ...) {
mode_t mode = 0;
va_list ap;
va_start(ap, flags);
mode = va_arg(ap, mode_t);
va_end(ap);
return sys_open(filename, flags, mode);
}
int sys_fcntl(int fildes, int cmd, int args);
int fcntl(int fildes, int cmd, ...) {
int args = 0;
va_list ap;
va_start(ap, cmd);
args = va_arg(ap, int);
va_end(ap);
return sys_fcntl(fildes, cmd, args);
}
// TODO: Can be implemented in rust when cbindgen supports "..." syntax
#include <stdarg.h>
int sys_ptrace(int request, va_list ap);
int ptrace(int request, ...) {
va_list ap;
va_start(ap, request);
int ret = sys_ptrace(request, ap);
va_end(ap);
return ret;
}
#include <stdint.h>
void abort();
uintptr_t __stack_chk_guard = 0xd048c37519fcadfe;
__attribute__((noreturn))
void __stack_chk_fail(void) {
abort();
}
#include <stdarg.h>
#include <stddef.h>
typedef struct FILE FILE;
// TODO: Can be implemented in rust when cbindgen supports "..." syntax
int vasprintf(char ** strp, const char * fmt, va_list ap);
int asprintf(char ** strp, const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vasprintf(strp, fmt, ap);
va_end(ap);
return ret;
}
int vfprintf(FILE * stream, const char * fmt, va_list ap);
int fprintf(FILE * stream, const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vfprintf(stream, fmt, ap);
va_end(ap);
return ret;
}
int vprintf(const char * fmt, va_list ap);
int printf(const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vprintf(fmt, ap);
va_end(ap);
return ret;
}
int vsnprintf(char * s, size_t n, const char * fmt, va_list ap);
int snprintf(char * s, size_t n, const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vsnprintf(s, n, fmt, ap);
va_end(ap);
return ret;
}
int vsprintf(char * s, const char * fmt, va_list ap);
int sprintf(char *s, const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vsprintf(s, fmt, ap);
va_end(ap);
return ret;
}
int vfscanf(FILE * stream, const char * fmt, va_list ap);
int fscanf(FILE * stream, const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vfscanf(stream, fmt, ap);
va_end(ap);
return ret;
}
int vscanf(const char * fmt, va_list ap);
int scanf(const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vscanf(fmt, ap);
va_end(ap);
return ret;
}
int vsscanf(const char * input, const char * fmt, va_list ap);
int sscanf(const char * input, const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vsscanf(input, fmt, ap);
va_end(ap);
return ret;
}
#include <stdarg.h>
#include <stddef.h>
// TODO: Can be implemented in rust when cbindgen supports "..." syntax
int execv(const char *path, char *const *argv);
int execl(const char *path, const char* argv0, ...)
{
int argc;
va_list ap;
va_start(ap, argv0);
for (argc = 1; va_arg(ap, const char*); argc++);
va_end(ap);
{
int i;
char *argv[argc+1];
va_start(ap, argv0);
argv[0] = (char *)argv0;
for (i = 1; i < argc; i++) {
argv[i] = va_arg(ap, char *);
}
argv[i] = NULL;
va_end(ap);
return execv(path, argv);
}
}
int execve(const char *path, char *const *argv, char *const *envp);
int execle(const char *path, const char* argv0, ...)
{
int argc;
va_list ap;
va_start(ap, argv0);
for (argc = 1; va_arg(ap, const char *); argc++);
va_end(ap);
{
int i;
char *argv[argc+1];
char **envp;
va_start(ap, argv0);
argv[0] = (char *)argv0;
for (i = 1; i <= argc; i++) {
argv[i] = va_arg(ap, char *);
}
envp = va_arg(ap, char **);
va_end(ap);
return execve(path, argv, envp);
}
}
int execvp(const char *file, char *const *argv);
int execlp(const char *file, const char* argv0, ...)
{
int argc;
va_list ap;
va_start(ap, argv0);
for (argc = 1; va_arg(ap, const char*); argc++);
va_end(ap);
{
int i;
char *argv[argc+1];
va_start(ap, argv0);
argv[0] = (char *)argv0;
for (i = 1; i < argc; i++) {
argv[i] = va_arg(ap, char *);
}
argv[i] = NULL;
va_end(ap);
return execvp(file, argv);
}
}
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Nul-terminated byte strings.
use core::{marker::PhantomData, ptr::NonNull, str::Utf8Error};
use alloc::{
borrow::{Borrow, Cow, ToOwned},
boxed::Box,
rc::Rc,
borrow::{Cow, ToOwned},
string::String,
sync::Arc,
vec::Vec,
};
use core::{
ascii,
cmp::Ordering,
fmt::{self, Write},
mem, ops, ptr, slice,
str::{self, Utf8Error},
};
use crate::{header::string::strlen, platform::types::*};
pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
use crate::header::string;
let p = unsafe {
string::memchr(
haystack.as_ptr() as *const c_void,
needle as c_int,
haystack.len(),
)
};
if p.is_null() {
None
} else {
Some(p as usize - (haystack.as_ptr() as usize))
}
}
/// A type representing an owned, C-compatible, nul-terminated string with no nul bytes in the
/// middle.
///
/// This type serves the purpose of being able to safely generate a
/// C-compatible string from a Rust byte slice or vector. An instance of this
/// type is a static guarantee that the underlying bytes contain no interior 0
/// bytes ("nul characters") and that the final byte is 0 ("nul terminator").
///
/// `CString` is to [`CStr`] as [`String`] is to [`&str`]: the former
/// in each pair are owned strings; the latter are borrowed
/// references.
///
/// # Creating a `CString`
///
/// A `CString` is created from either a byte slice or a byte vector,
/// or anything that implements [`Into`]`<`[`Vec`]`<`[`u8`]`>>` (for
/// example, you can build a `CString` straight out of a [`String`] or
/// a [`&str`], since both implement that trait).
///
/// The [`new`] method will actually check that the provided `&[u8]`
/// does not have 0 bytes in the middle, and return an error if it
/// finds one.
///
/// # Extracting a raw pointer to the whole C string
///
/// `CString` implements a [`as_ptr`] method through the [`Deref`]
/// trait. This method will give you a `*const c_char` which you can
/// feed directly to extern functions that expect a nul-terminated
/// string, like C's `strdup()`.
///
/// # Extracting a slice of the whole C string
///
/// Alternatively, you can obtain a `&[`[`u8`]`]` slice from a
/// `CString` with the [`as_bytes`] method. Slices produced in this
/// way do *not* contain the trailing nul terminator. This is useful
/// when you will be calling an extern function that takes a `*const
/// u8` argument which is not necessarily nul-terminated, plus another
/// argument with the length of the string — like C's `strndup()`.
/// You can of course get the slice's length with its
/// [`len`][slice.len] method.
///
/// If you need a `&[`[`u8`]`]` slice *with* the nul terminator, you
/// can use [`as_bytes_with_nul`] instead.
///
/// Once you have the kind of slice you need (with or without a nul
/// terminator), you can call the slice's own
/// [`as_ptr`][slice.as_ptr] method to get a raw pointer to pass to
/// extern functions. See the documentation for that function for a
/// discussion on ensuring the lifetime of the raw pointer.
///
/// [`Into`]: ../convert/trait.Into.html
/// [`Vec`]: ../vec/struct.Vec.html
/// [`String`]: ../string/struct.String.html
/// [`&str`]: ../primitive.str.html
/// [`u8`]: ../primitive.u8.html
/// [`new`]: #method.new
/// [`as_bytes`]: #method.as_bytes
/// [`as_bytes_with_nul`]: #method.as_bytes_with_nul
/// [`as_ptr`]: #method.as_ptr
/// [slice.as_ptr]: ../primitive.slice.html#method.as_ptr
/// [slice.len]: ../primitive.slice.html#method.len
/// [`Deref`]: ../ops/trait.Deref.html
/// [`CStr`]: struct.CStr.html
///
/// # Examples
///
/// ```ignore (extern-declaration)
/// # fn main() {
/// use std::ffi::CString;
/// use std::os::raw::c_char;
///
/// extern {
/// fn my_printer(s: *const c_char);
/// }
///
/// // We are certain that our string doesn't have 0 bytes in the middle,
/// // so we can .unwrap()
/// let c_to_print = CString::new("Hello, world!").unwrap();
/// unsafe {
/// my_printer(c_to_print.as_ptr());
/// }
/// # }
/// ```
///
/// # Safety
///
/// `CString` is intended for working with traditional C-style strings
/// (a sequence of non-nul bytes terminated by a single nul byte); the
/// primary use case for these kinds of strings is interoperating with C-like
/// code. Often you will need to transfer ownership to/from that external
/// code. It is strongly recommended that you thoroughly read through the
/// documentation of `CString` before use, as improper ownership management
/// of `CString` instances can lead to invalid memory accesses, memory leaks,
/// and other memory errors.
#[derive(PartialEq, PartialOrd, Eq, Ord, Hash, Clone)]
pub struct CString {
// Invariant 1: the slice ends with a zero byte and has a length of at least one.
// Invariant 2: the slice contains only one zero byte.
// Improper usage of unsafe function can break Invariant 2, but not Invariant 1.
inner: Box<[u8]>,
}
/// Representation of a borrowed C string.
///
/// This type represents a borrowed reference to a nul-terminated
/// array of bytes. It can be constructed safely from a `&[`[`u8`]`]`
/// slice, or unsafely from a raw `*const c_char`. It can then be
/// converted to a Rust [`&str`] by performing UTF-8 validation, or
/// into an owned [`CString`].
///
/// `CStr` is to [`CString`] as [`&str`] is to [`String`]: the former
/// in each pair are borrowed references; the latter are owned
/// strings.
///
/// Note that this structure is **not** `repr(C)` and is not recommended to be
/// placed in the signatures of FFI functions. Instead, safe wrappers of FFI
/// functions may leverage the unsafe [`from_ptr`] constructor to provide a safe
/// interface to other consumers.
///
/// # Examples
///
/// Inspecting a foreign C string:
///
/// ```ignore (extern-declaration)
/// use std::ffi::CStr;
/// use std::os::raw::c_char;
///
/// extern { fn my_string() -> *const c_char; }
///
/// unsafe {
/// let slice = CStr::from_ptr(my_string());
/// println!("string buffer size without nul terminator: {}", slice.to_bytes().len());
/// }
/// ```
///
/// Passing a Rust-originating C string:
///
/// ```ignore (extern-declaration)
/// use std::ffi::{CString, CStr};
/// use std::os::raw::c_char;
///
/// fn work(data: &CStr) {
/// extern { fn work_with(data: *const c_char); }
///
/// unsafe { work_with(data.as_ptr()) }
/// }
///
/// let s = CString::new("data data data data").unwrap();
/// work(&s);
/// ```
///
/// Converting a foreign C string into a Rust [`String`]:
///
/// ```ignore (extern-declaration)
/// use std::ffi::CStr;
/// use std::os::raw::c_char;
///
/// extern { fn my_string() -> *const c_char; }
///
/// fn my_string_safe() -> String {
/// unsafe {
/// CStr::from_ptr(my_string()).to_string_lossy().into_owned()
/// }
/// }
///
/// println!("string: {}", my_string_safe());
/// ```
///
/// [`u8`]: ../primitive.u8.html
/// [`&str`]: ../primitive.str.html
/// [`String`]: ../string/struct.String.html
/// [`CString`]: struct.CString.html
/// [`from_ptr`]: #method.from_ptr
#[derive(Hash)]
pub struct CStr {
// FIXME: this should not be represented with a DST slice but rather with
// just a raw `c_char` along with some form of marker to make
// this an unsized type. Essentially `sizeof(&CStr)` should be the
// same as `sizeof(&c_char)` but `CStr` should be an unsized type.
inner: [c_char],
}
/// An error indicating that an interior nul byte was found.
///
/// While Rust strings may contain nul bytes in the middle, C strings
/// can't, as that byte would effectively truncate the string.
///
/// This error is created by the [`new`][`CString::new`] method on
/// [`CString`]. See its documentation for more.
///
/// [`CString`]: struct.CString.html
/// [`CString::new`]: struct.CString.html#method.new
///
/// # Examples
///
/// ```
/// use std::ffi::{CString, NulError};
///
/// let _: NulError = CString::new(b"f\0oo".to_vec()).unwrap_err();
/// ```
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct NulError(usize, Vec<u8>);
use crate::{header::string::strlen, platform::types::c_char};
/// An error indicating that a nul byte was not in the expected position.
///
/// The slice used to create a [`CStr`] must have one and only one nul
/// byte at the end of the slice.
///
/// This error is created by the
/// [`from_bytes_with_nul`][`CStr::from_bytes_with_nul`] method on
/// [`CStr`]. See its documentation for more.
///
/// [`CStr`]: struct.CStr.html
/// [`CStr::from_bytes_with_nul`]: struct.CStr.html#method.from_bytes_with_nul
///
/// # Examples
///
/// ```
/// use std::ffi::{CStr, FromBytesWithNulError};
///
/// let _: FromBytesWithNulError = CStr::from_bytes_with_nul(b"f\0oo").unwrap_err();
/// ```
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FromBytesWithNulError {
kind: FromBytesWithNulErrorKind,
/// C string wrapper, guaranteed to be
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct CStr<'a> {
ptr: NonNull<c_char>,
_marker: PhantomData<&'a [u8]>,
}
#[derive(Clone, PartialEq, Eq, Debug)]
enum FromBytesWithNulErrorKind {
InteriorNul(usize),
NotNulTerminated,
}
impl FromBytesWithNulError {
fn interior_nul(pos: usize) -> FromBytesWithNulError {
FromBytesWithNulError {
kind: FromBytesWithNulErrorKind::InteriorNul(pos),
}
}
fn not_nul_terminated() -> FromBytesWithNulError {
FromBytesWithNulError {
kind: FromBytesWithNulErrorKind::NotNulTerminated,
}
}
fn description(&self) -> &str {
match self.kind {
FromBytesWithNulErrorKind::InteriorNul(..) => {
"data provided contains an interior nul byte"
}
FromBytesWithNulErrorKind::NotNulTerminated => "data provided is not nul terminated",
}
}
}
/// An error indicating invalid UTF-8 when converting a [`CString`] into a [`String`].
///
/// `CString` is just a wrapper over a buffer of bytes with a nul
/// terminator; [`into_string`][`CString::into_string`] performs UTF-8
/// validation on those bytes and may return this error.
///
/// This `struct` is created by the
/// [`into_string`][`CString::into_string`] method on [`CString`]. See
/// its documentation for more.
///
/// [`String`]: ../string/struct.String.html
/// [`CString`]: struct.CString.html
/// [`CString::into_string`]: struct.CString.html#method.into_string
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct IntoStringError {
inner: CString,
error: Utf8Error,
}
impl CString {
/// Creates a new C-compatible string from a container of bytes.
///
/// This function will consume the provided data and use the
/// underlying bytes to construct a new string, ensuring that
/// there is a trailing 0 byte. This trailing 0 byte will be
/// appended by this function; the provided data should *not*
/// contain any 0 bytes in it.
///
/// # Examples
///
/// ```ignore (extern-declaration)
/// use std::ffi::CString;
/// use std::os::raw::c_char;
///
/// extern { fn puts(s: *const c_char); }
///
/// let to_print = CString::new("Hello!").unwrap();
/// unsafe {
/// puts(to_print.as_ptr());
/// }
/// ```
///
/// # Errors
///
/// This function will return an error if the supplied bytes contain an
/// internal 0 byte. The [`NulError`] returned will contain the bytes as well as
/// the position of the nul byte.
///
/// [`NulError`]: struct.NulError.html
pub fn new<T: Into<Vec<u8>>>(t: T) -> Result<CString, NulError> {
Self::_new(t.into())
}
fn _new(bytes: Vec<u8>) -> Result<CString, NulError> {
match memchr(0, &bytes) {
Some(i) => Err(NulError(i, bytes)),
None => Ok(unsafe { CString::from_vec_unchecked(bytes) }),
}
}
/// Creates a C-compatible string by consuming a byte vector,
/// without checking for interior 0 bytes.
///
/// This method is equivalent to [`new`] except that no runtime assertion
/// is made that `v` contains no 0 bytes, and it requires an actual
/// byte vector, not anything that can be converted to one with Into.
///
/// [`new`]: #method.new
///
/// # Examples
///
/// ```
/// use std::ffi::CString;
///
/// let raw = b"foo".to_vec();
/// unsafe {
/// let c_string = CString::from_vec_unchecked(raw);
/// }
/// ```
pub unsafe fn from_vec_unchecked(mut v: Vec<u8>) -> CString {
v.reserve_exact(1);
v.push(0);
CString {
inner: v.into_boxed_slice(),
}
}
/// Retakes ownership of a `CString` that was transferred to C via [`into_raw`].
///
/// Additionally, the length of the string will be recalculated from the pointer.
///
/// # Safety
///
/// This should only ever be called with a pointer that was earlier
/// obtained by calling [`into_raw`] on a `CString`. Other usage (e.g. trying to take
/// ownership of a string that was allocated by foreign code) is likely to lead
/// to undefined behavior or allocator corruption.
impl<'a> CStr<'a> {
/// Safety
///
/// > **Note:** If you need to borrow a string that was allocated by
/// > foreign code, use [`CStr`]. If you need to take ownership of
/// > a string that was allocated by foreign code, you will need to
/// > make your own provisions for freeing it appropriately, likely
/// > with the foreign code's API to do that.
///
/// [`into_raw`]: #method.into_raw
/// [`CStr`]: struct.CStr.html
///
/// # Examples
///
/// Create a `CString`, pass ownership to an `extern` function (via raw pointer), then retake
/// ownership with `from_raw`:
///
/// ```ignore (extern-declaration)
/// use std::ffi::CString;
/// use std::os::raw::c_char;
///
/// extern {
/// fn some_extern_function(s: *mut c_char);
/// }
///
/// let c_string = CString::new("Hello!").unwrap();
/// let raw = c_string.into_raw();
/// unsafe {
/// some_extern_function(raw);
/// let c_string = CString::from_raw(raw);
/// }
/// ```
pub unsafe fn from_raw(ptr: *mut c_char) -> CString {
let len = strlen(ptr) + 1; // Including the NUL byte
let slice = slice::from_raw_parts_mut(ptr, len as usize);
CString {
inner: Box::from_raw(slice as *mut [c_char] as *mut [u8]),
/// The ptr must be valid up to and including the first NUL byte from the base ptr.
pub const unsafe fn from_ptr(ptr: *const c_char) -> Self {
Self {
ptr: NonNull::new_unchecked(ptr as *mut c_char),
_marker: PhantomData,
}
}
/// Consumes the `CString` and transfers ownership of the string to a C caller.
///
/// The pointer which this function returns must be returned to Rust and reconstituted using
/// [`from_raw`] to be properly deallocated. Specifically, one
/// should *not* use the standard C `free()` function to deallocate
/// this string.
///
/// Failure to call [`from_raw`] will lead to a memory leak.
///
/// [`from_raw`]: #method.from_raw
///
/// # Examples
///
/// ```
/// use std::ffi::CString;
///
/// let c_string = CString::new("foo").unwrap();
///
/// let ptr = c_string.into_raw();
///
/// unsafe {
/// assert_eq!(b'f', *ptr as u8);
/// assert_eq!(b'o', *ptr.offset(1) as u8);
/// assert_eq!(b'o', *ptr.offset(2) as u8);
/// assert_eq!(b'\0', *ptr.offset(3) as u8);
///
/// // retake pointer to free memory
/// let _ = CString::from_raw(ptr);
/// }
/// ```
#[inline]
pub fn into_raw(self) -> *mut c_char {
Box::into_raw(self.into_inner()) as *mut c_char
}
/// Converts the `CString` into a [`String`] if it contains valid UTF-8 data.
///
/// On failure, ownership of the original `CString` is returned.
///
/// [`String`]: ../string/struct.String.html
///
/// # Examples
///
/// ```
/// use std::ffi::CString;
///
/// let valid_utf8 = vec![b'f', b'o', b'o'];
/// let cstring = CString::new(valid_utf8).unwrap();
/// assert_eq!(cstring.into_string().unwrap(), "foo");
///
/// let invalid_utf8 = vec![b'f', 0xff, b'o', b'o'];
/// let cstring = CString::new(invalid_utf8).unwrap();
/// let err = cstring.into_string().err().unwrap();
/// assert_eq!(err.utf8_error().valid_up_to(), 1);
/// ```
pub fn into_string(self) -> Result<String, IntoStringError> {
String::from_utf8(self.into_bytes()).map_err(|e| IntoStringError {
error: e.utf8_error(),
inner: unsafe { CString::from_vec_unchecked(e.into_bytes()) },
})
}
/// Consumes the `CString` and returns the underlying byte buffer.
///
/// The returned buffer does **not** contain the trailing nul
/// terminator, and it is guaranteed to not have any interior nul
/// bytes.
///
/// # Examples
///
/// ```
/// use std::ffi::CString;
///
/// let c_string = CString::new("foo").unwrap();
/// let bytes = c_string.into_bytes();
/// assert_eq!(bytes, vec![b'f', b'o', b'o']);
/// ```
pub fn into_bytes(self) -> Vec<u8> {
let mut vec = self.into_inner().into_vec();
let _nul = vec.pop();
debug_assert_eq!(_nul, Some(0u8));
vec
}
/// Equivalent to the [`into_bytes`] function except that the returned vector
/// includes the trailing nul terminator.
///
/// [`into_bytes`]: #method.into_bytes
///
/// # Examples
///
/// ```
/// use std::ffi::CString;
///
/// let c_string = CString::new("foo").unwrap();
/// let bytes = c_string.into_bytes_with_nul();
/// assert_eq!(bytes, vec![b'f', b'o', b'o', b'\0']);
/// ```
pub fn into_bytes_with_nul(self) -> Vec<u8> {
self.into_inner().into_vec()
}
/// Returns the contents of this `CString` as a slice of bytes.
///
/// The returned slice does **not** contain the trailing nul
/// terminator, and it is guaranteed to not have any interior nul
/// bytes. If you need the nul terminator, use
/// [`as_bytes_with_nul`] instead.
///
/// [`as_bytes_with_nul`]: #method.as_bytes_with_nul
///
/// # Examples
///
/// ```
/// use std::ffi::CString;
///
/// let c_string = CString::new("foo").unwrap();
/// let bytes = c_string.as_bytes();
/// assert_eq!(bytes, &[b'f', b'o', b'o']);
/// ```
#[inline]
pub fn as_bytes(&self) -> &[u8] {
&self.inner[..self.inner.len() - 1]
}
/// Equivalent to the [`as_bytes`] function except that the returned slice
/// includes the trailing nul terminator.
///
/// [`as_bytes`]: #method.as_bytes
///
/// # Examples
///
/// ```
/// use std::ffi::CString;
///
/// let c_string = CString::new("foo").unwrap();
/// let bytes = c_string.as_bytes_with_nul();
/// assert_eq!(bytes, &[b'f', b'o', b'o', b'\0']);
/// ```
#[inline]
pub fn as_bytes_with_nul(&self) -> &[u8] {
&self.inner
}
/// Extracts a [`CStr`] slice containing the entire string.
///
/// [`CStr`]: struct.CStr.html
///
/// # Examples
///
/// ```
/// use std::ffi::{CString, CStr};
///
/// let c_string = CString::new(b"foo".to_vec()).unwrap();
/// let c_str = c_string.as_c_str();
/// assert_eq!(c_str, CStr::from_bytes_with_nul(b"foo\0").unwrap());
/// ```
#[inline]
pub fn as_c_str(&self) -> &CStr {
&*self
}
/// Converts this `CString` into a boxed [`CStr`].
///
/// [`CStr`]: struct.CStr.html
///
/// # Examples
///
/// ```
/// use std::ffi::{CString, CStr};
///
/// let c_string = CString::new(b"foo".to_vec()).unwrap();
/// let boxed = c_string.into_boxed_c_str();
/// assert_eq!(&*boxed, CStr::from_bytes_with_nul(b"foo\0").unwrap());
/// ```
pub fn into_boxed_c_str(self) -> Box<CStr> {
unsafe { Box::from_raw(Box::into_raw(self.into_inner()) as *mut CStr) }
}
// Bypass "move out of struct which implements [`Drop`] trait" restriction.
///
/// [`Drop`]: ../ops/trait.Drop.html
fn into_inner(self) -> Box<[u8]> {
unsafe {
let result = ptr::read(&self.inner);
mem::forget(self);
result
pub unsafe fn from_nullable_ptr(ptr: *const c_char) -> Option<Self> {
if ptr.is_null() {
None
} else {
Some(Self::from_ptr(ptr))
}
}
}
// Turns this `CString` into an empty string to prevent
// memory unsafe code from working by accident. Inline
// to prevent LLVM from optimizing it away in debug builds.
impl Drop for CString {
#[inline]
fn drop(&mut self) {
pub fn to_bytes_with_nul(self) -> &'a [u8] {
unsafe {
*self.inner.get_unchecked_mut(0) = 0;
// SAFETY: The string must be valid at least until (and including) the NUL byte.
let len = strlen(self.ptr.as_ptr());
core::slice::from_raw_parts(self.ptr.as_ptr().cast(), len + 1)
}
}
}
impl ops::Deref for CString {
type Target = CStr;
#[inline]
fn deref(&self) -> &CStr {
unsafe { CStr::from_bytes_with_nul_unchecked(self.as_bytes_with_nul()) }
}
}
impl fmt::Debug for CString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl From<CString> for Vec<u8> {
/// Converts a [`CString`] into a [`Vec`]`<u8>`.
///
/// The conversion consumes the [`CString`], and removes the terminating NUL byte.
///
/// [`Vec`]: ../vec/struct.Vec.html
/// [`CString`]: ../ffi/struct.CString.html
#[inline]
fn from(s: CString) -> Vec<u8> {
s.into_bytes()
}
}
impl fmt::Debug for CStr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "\"")?;
for byte in self
.to_bytes()
.iter()
.flat_map(|&b| ascii::escape_default(b))
{
f.write_char(byte as char)?;
}
write!(f, "\"")
pub fn to_bytes(self) -> &'a [u8] {
let s = self.to_bytes_with_nul();
&s[..s.len() - 1]
}
}
impl<'a> Default for &'a CStr {
fn default() -> &'a CStr {
const SLICE: &[c_char] = &[0];
unsafe { CStr::from_ptr(SLICE.as_ptr()) }
pub fn to_str(self) -> Result<&'a str, Utf8Error> {
core::str::from_utf8(self.to_bytes())
}
}
impl Default for CString {
/// Creates an empty `CString`.
fn default() -> CString {
let a: &CStr = Default::default();
a.to_owned()
}
}
impl Borrow<CStr> for CString {
#[inline]
fn borrow(&self) -> &CStr {
self
}
}
impl<'a> From<Cow<'a, CStr>> for CString {
#[inline]
fn from(s: Cow<'a, CStr>) -> Self {
s.into_owned()
}
}
impl<'a> From<&'a CStr> for Box<CStr> {
fn from(s: &'a CStr) -> Box<CStr> {
let boxed: Box<[u8]> = Box::from(s.to_bytes_with_nul());
unsafe { Box::from_raw(Box::into_raw(boxed) as *mut CStr) }
}
}
impl From<Box<CStr>> for CString {
/// Converts a [`Box`]`<CStr>` into a [`CString`] without copying or allocating.
///
/// [`Box`]: ../boxed/struct.Box.html
/// [`CString`]: ../ffi/struct.CString.html
#[inline]
fn from(s: Box<CStr>) -> CString {
s.into_c_string()
}
}
impl Clone for Box<CStr> {
#[inline]
fn clone(&self) -> Self {
(**self).into()
}
}
impl From<CString> for Box<CStr> {
/// Converts a [`CString`] into a [`Box`]`<CStr>` without copying or allocating.
///
/// [`CString`]: ../ffi/struct.CString.html
/// [`Box`]: ../boxed/struct.Box.html
#[inline]
fn from(s: CString) -> Box<CStr> {
s.into_boxed_c_str()
}
}
impl<'a> From<CString> for Cow<'a, CStr> {
#[inline]
fn from(s: CString) -> Cow<'a, CStr> {
Cow::Owned(s)
}
}
impl<'a> From<&'a CStr> for Cow<'a, CStr> {
#[inline]
fn from(s: &'a CStr) -> Cow<'a, CStr> {
Cow::Borrowed(s)
}
}
impl<'a> From<&'a CString> for Cow<'a, CStr> {
#[inline]
fn from(s: &'a CString) -> Cow<'a, CStr> {
Cow::Borrowed(s.as_c_str())
}
}
impl From<CString> for Arc<CStr> {
/// Converts a [`CString`] into a [`Arc`]`<CStr>` without copying or allocating.
///
/// [`CString`]: ../ffi/struct.CString.html
/// [`Arc`]: ../sync/struct.Arc.html
#[inline]
fn from(s: CString) -> Arc<CStr> {
let arc: Arc<[u8]> = Arc::from(s.into_inner());
unsafe { Arc::from_raw(Arc::into_raw(arc) as *const CStr) }
}
}
impl<'a> From<&'a CStr> for Arc<CStr> {
#[inline]
fn from(s: &CStr) -> Arc<CStr> {
let arc: Arc<[u8]> = Arc::from(s.to_bytes_with_nul());
unsafe { Arc::from_raw(Arc::into_raw(arc) as *const CStr) }
}
}
impl From<CString> for Rc<CStr> {
/// Converts a [`CString`] into a [`Rc`]`<CStr>` without copying or allocating.
///
/// [`CString`]: ../ffi/struct.CString.html
/// [`Rc`]: ../rc/struct.Rc.html
#[inline]
fn from(s: CString) -> Rc<CStr> {
let rc: Rc<[u8]> = Rc::from(s.into_inner());
unsafe { Rc::from_raw(Rc::into_raw(rc) as *const CStr) }
}
}
impl<'a> From<&'a CStr> for Rc<CStr> {
#[inline]
fn from(s: &CStr) -> Rc<CStr> {
let rc: Rc<[u8]> = Rc::from(s.to_bytes_with_nul());
unsafe { Rc::from_raw(Rc::into_raw(rc) as *const CStr) }
}
}
impl Default for Box<CStr> {
fn default() -> Box<CStr> {
let boxed: Box<[u8]> = Box::from([0]);
unsafe { Box::from_raw(Box::into_raw(boxed) as *mut CStr) }
}
}
impl NulError {
/// Returns the position of the nul byte in the slice that caused
/// [`CString::new`] to fail.
///
/// [`CString::new`]: struct.CString.html#method.new
///
/// # Examples
///
/// ```
/// use std::ffi::CString;
///
/// let nul_error = CString::new("foo\0bar").unwrap_err();
/// assert_eq!(nul_error.nul_position(), 3);
///
/// let nul_error = CString::new("foo bar\0").unwrap_err();
/// assert_eq!(nul_error.nul_position(), 7);
/// ```
pub fn nul_position(&self) -> usize {
self.0
pub fn to_string_lossy(self) -> Cow<'a, str> {
String::from_utf8_lossy(self.to_bytes())
}
/// Consumes this error, returning the underlying vector of bytes which
/// generated the error in the first place.
///
/// # Examples
///
/// ```
/// use std::ffi::CString;
///
/// let nul_error = CString::new("foo\0bar").unwrap_err();
/// assert_eq!(nul_error.into_vec(), b"foo\0bar");
/// ```
pub fn into_vec(self) -> Vec<u8> {
self.1
pub const fn as_ptr(self) -> *const c_char {
self.ptr.as_ptr()
}
}
impl fmt::Display for NulError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "nul byte found in provided data at position: {}", self.0)
pub const unsafe fn from_bytes_with_nul_unchecked(bytes: &'a [u8]) -> Self {
Self::from_ptr(bytes.as_ptr().cast())
}
}
impl fmt::Display for FromBytesWithNulError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.description())?;
if let FromBytesWithNulErrorKind::InteriorNul(pos) = self.kind {
write!(f, " at byte pos {}", pos)?;
pub fn from_bytes_with_nul(bytes: &'a [u8]) -> Result<Self, FromBytesWithNulError> {
if bytes.last() != Some(&b'\0') || bytes[..bytes.len() - 1].contains(&b'\0') {
return Err(FromBytesWithNulError);
}
Ok(())
}
}
impl IntoStringError {
/// Consumes this error, returning original [`CString`] which generated the
/// error.
///
/// [`CString`]: struct.CString.html
pub fn into_cstring(self) -> CString {
self.inner
}
/// Access the underlying UTF-8 error that was the cause of this error.
pub fn utf8_error(&self) -> Utf8Error {
self.error
Ok(unsafe { Self::from_bytes_with_nul_unchecked(bytes) })
}
fn description(&self) -> &str {
"C string contained non-utf8 bytes"
}
}
impl fmt::Display for IntoStringError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.description().fmt(f)
}
}
impl CStr {
/// Wraps a raw C string with a safe C string wrapper.
///
/// This function will wrap the provided `ptr` with a `CStr` wrapper, which
/// allows inspection and interoperation of non-owned C strings. This method
/// is unsafe for a number of reasons:
///
/// * There is no guarantee to the validity of `ptr`.
/// * The returned lifetime is not guaranteed to be the actual lifetime of
/// `ptr`.
/// * There is no guarantee that the memory pointed to by `ptr` contains a
/// valid nul terminator byte at the end of the string.
/// * It is not guaranteed that the memory pointed by `ptr` won't change
/// before the `CStr` has been destroyed.
///
/// > **Note**: This operation is intended to be a 0-cost cast but it is
/// > currently implemented with an up-front calculation of the length of
/// > the string. This is not guaranteed to always be the case.
///
/// # Examples
///
/// ```ignore (extern-declaration)
/// # fn main() {
/// use std::ffi::CStr;
/// use std::os::raw::c_char;
///
/// extern {
/// fn my_string() -> *const c_char;
/// }
///
/// unsafe {
/// let slice = CStr::from_ptr(my_string());
/// println!("string returned: {}", slice.to_str().unwrap());
/// }
/// # }
/// ```
pub unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr {
let len = strlen(ptr);
let ptr = ptr as *const u8;
CStr::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr, len as usize + 1))
}
/// Creates a C string wrapper from a byte slice.
///
/// This function will cast the provided `bytes` to a `CStr`
/// wrapper after ensuring that the byte slice is nul-terminated
/// and does not contain any interior nul bytes.
///
/// # Examples
///
/// ```
/// use std::ffi::CStr;
///
/// let cstr = CStr::from_bytes_with_nul(b"hello\0");
/// assert!(cstr.is_ok());
/// ```
///
/// Creating a `CStr` without a trailing nul terminator is an error:
///
/// ```
/// use std::ffi::CStr;
///
/// let c_str = CStr::from_bytes_with_nul(b"hello");
/// assert!(c_str.is_err());
/// ```
///
/// Creating a `CStr` with an interior nul byte is an error:
///
/// ```
/// use std::ffi::CStr;
///
/// let c_str = CStr::from_bytes_with_nul(b"he\0llo\0");
/// assert!(c_str.is_err());
/// ```
pub fn from_bytes_with_nul(bytes: &[u8]) -> Result<&CStr, FromBytesWithNulError> {
let nul_pos = memchr(0, bytes);
if let Some(nul_pos) = nul_pos {
if nul_pos + 1 != bytes.len() {
return Err(FromBytesWithNulError::interior_nul(nul_pos));
}
Ok(unsafe { CStr::from_bytes_with_nul_unchecked(bytes) })
} else {
Err(FromBytesWithNulError::not_nul_terminated())
pub fn from_bytes_until_nul(bytes: &'a [u8]) -> Result<Self, FromBytesUntilNulError> {
if !bytes.contains(&b'\0') {
return Err(FromBytesUntilNulError);
}
}
/// Unsafely creates a C string wrapper from a byte slice.
///
/// This function will cast the provided `bytes` to a `CStr` wrapper without
/// performing any sanity checks. The provided slice **must** be nul-terminated
/// and not contain any interior nul bytes.
///
/// # Examples
///
/// ```
/// use std::ffi::{CStr, CString};
///
/// unsafe {
/// let cstring = CString::new("hello").unwrap();
/// let cstr = CStr::from_bytes_with_nul_unchecked(cstring.to_bytes_with_nul());
/// assert_eq!(cstr, &*cstring);
/// }
/// ```
#[inline]
pub const unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &CStr {
&*(bytes as *const [u8] as *const CStr)
Ok(unsafe { Self::from_bytes_with_nul_unchecked(bytes) })
}
/// Returns the inner pointer to this C string.
///
/// The returned pointer will be valid for as long as `self` is, and points
/// to a contiguous region of memory terminated with a 0 byte to represent
/// the end of the string.
///
/// **WARNING**
///
/// It is your responsibility to make sure that the underlying memory is not
/// freed too early. For example, the following code will cause undefined
/// behavior when `ptr` is used inside the `unsafe` block:
///
/// ```no_run
/// # #![allow(unused_must_use)]
/// use std::ffi::{CString};
///
/// let ptr = CString::new("Hello").unwrap().as_ptr();
/// unsafe {
/// // `ptr` is dangling
/// *ptr;
/// }
/// ```
///
/// This happens because the pointer returned by `as_ptr` does not carry any
/// lifetime information and the [`CString`] is deallocated immediately after
/// the `CString::new("Hello").unwrap().as_ptr()` expression is evaluated.
/// To fix the problem, bind the `CString` to a local variable:
///
/// ```no_run
/// # #![allow(unused_must_use)]
/// use std::ffi::{CString};
///
/// let hello = CString::new("Hello").unwrap();
/// let ptr = hello.as_ptr();
/// unsafe {
/// // `ptr` is valid because `hello` is in scope
/// *ptr;
/// }
/// ```
///
/// This way, the lifetime of the `CString` in `hello` encompasses
/// the lifetime of `ptr` and the `unsafe` block.
///
/// [`CString`]: struct.CString.html
#[inline]
pub fn as_ptr(&self) -> *const c_char {
self.inner.as_ptr()
pub fn to_owned_cstring(self) -> CString {
CString::from(unsafe { core::ffi::CStr::from_ptr(self.ptr.as_ptr()) })
}
/// Converts this C string to a byte slice.
///
/// The returned slice will **not** contain the trailing nul terminator that this C
/// string has.
///
/// > **Note**: This method is currently implemented as a constant-time
/// > cast, but it is planned to alter its definition in the future to
/// > perform the length calculation whenever this method is called.
///
/// # Examples
///
/// ```
/// use std::ffi::CStr;
///
/// let c_str = CStr::from_bytes_with_nul(b"foo\0").unwrap();
/// assert_eq!(c_str.to_bytes(), b"foo");
/// ```
#[inline]
pub fn to_bytes(&self) -> &[u8] {
let bytes = self.to_bytes_with_nul();
&bytes[..bytes.len() - 1]
}
/// Converts this C string to a byte slice containing the trailing 0 byte.
///
/// This function is the equivalent of [`to_bytes`] except that it will retain
/// the trailing nul terminator instead of chopping it off.
///
/// > **Note**: This method is currently implemented as a 0-cost cast, but
/// > it is planned to alter its definition in the future to perform the
/// > length calculation whenever this method is called.
///
/// [`to_bytes`]: #method.to_bytes
///
/// # Examples
///
/// ```
/// use std::ffi::CStr;
///
/// let c_str = CStr::from_bytes_with_nul(b"foo\0").unwrap();
/// assert_eq!(c_str.to_bytes_with_nul(), b"foo\0");
/// ```
#[inline]
pub fn to_bytes_with_nul(&self) -> &[u8] {
unsafe { &*(&self.inner as *const [c_char] as *const [u8]) }
}
/// Yields a [`&str`] slice if the `CStr` contains valid UTF-8.
///
/// If the contents of the `CStr` are valid UTF-8 data, this
/// function will return the corresponding [`&str`] slice. Otherwise,
/// it will return an error with details of where UTF-8 validation failed.
///
/// > **Note**: This method is currently implemented to check for validity
/// > after a constant-time cast, but it is planned to alter its definition
/// > in the future to perform the length calculation in addition to the
/// > UTF-8 check whenever this method is called.
///
/// [`&str`]: ../primitive.str.html
///
/// # Examples
///
/// ```
/// use std::ffi::CStr;
///
/// let c_str = CStr::from_bytes_with_nul(b"foo\0").unwrap();
/// assert_eq!(c_str.to_str(), Ok("foo"));
/// ```
pub fn to_str(&self) -> Result<&str, str::Utf8Error> {
// NB: When CStr is changed to perform the length check in .to_bytes()
// instead of in from_ptr(), it may be worth considering if this should
// be rewritten to do the UTF-8 check inline with the length calculation
// instead of doing it afterwards.
str::from_utf8(self.to_bytes())
}
/// Converts a `CStr` into a [`Cow`]`<`[`str`]`>`.
///
/// If the contents of the `CStr` are valid UTF-8 data, this
/// function will return a [`Cow`]`::`[`Borrowed`]`(`[`&str`]`)`
/// with the the corresponding [`&str`] slice. Otherwise, it will
/// replace any invalid UTF-8 sequences with
/// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD] and return a
/// [`Cow`]`::`[`Owned`]`(`[`String`]`)` with the result.
///
/// > **Note**: This method is currently implemented to check for validity
/// > after a constant-time cast, but it is planned to alter its definition
/// > in the future to perform the length calculation in addition to the
/// > UTF-8 check whenever this method is called.
///
/// [`Cow`]: ../borrow/enum.Cow.html
/// [`Borrowed`]: ../borrow/enum.Cow.html#variant.Borrowed
/// [`Owned`]: ../borrow/enum.Cow.html#variant.Owned
/// [`str`]: ../primitive.str.html
/// [`String`]: ../string/struct.String.html
/// [U+FFFD]: ../char/constant.REPLACEMENT_CHARACTER.html
///
/// # Examples
///
/// Calling `to_string_lossy` on a `CStr` containing valid UTF-8:
///
/// ```
/// use std::borrow::Cow;
/// use std::ffi::CStr;
///
/// let c_str = CStr::from_bytes_with_nul(b"Hello World\0").unwrap();
/// assert_eq!(c_str.to_string_lossy(), Cow::Borrowed("Hello World"));
/// ```
///
/// Calling `to_string_lossy` on a `CStr` containing invalid UTF-8:
///
/// ```
/// use std::borrow::Cow;
/// use std::ffi::CStr;
///
/// let c_str = CStr::from_bytes_with_nul(b"Hello \xF0\x90\x80World\0").unwrap();
/// assert_eq!(
/// c_str.to_string_lossy(),
/// Cow::Owned(String::from("Hello �World")) as Cow<str>
/// );
/// ```
pub fn to_string_lossy(&self) -> Cow<str> {
String::from_utf8_lossy(self.to_bytes())
}
/// Converts a [`Box`]`<CStr>` into a [`CString`] without copying or allocating.
///
/// [`Box`]: ../boxed/struct.Box.html
/// [`CString`]: struct.CString.html
///
/// # Examples
///
/// ```
/// use std::ffi::CString;
///
/// let c_string = CString::new(b"foo".to_vec()).unwrap();
/// let boxed = c_string.into_boxed_c_str();
/// assert_eq!(boxed.into_c_string(), CString::new("foo").unwrap());
/// ```
pub fn into_c_string(self: Box<CStr>) -> CString {
let raw = Box::into_raw(self) as *mut [u8];
CString {
inner: unsafe { Box::from_raw(raw) },
}
}
}
impl PartialEq for CStr {
fn eq(&self, other: &CStr) -> bool {
self.to_bytes().eq(other.to_bytes())
pub fn borrow(string: &'a CString) -> Self {
unsafe { Self::from_ptr(string.as_ptr()) }
}
}
impl Eq for CStr {}
unsafe impl Send for CStr<'_> {}
unsafe impl Sync for CStr<'_> {}
impl PartialOrd for CStr {
fn partial_cmp(&self, other: &CStr) -> Option<Ordering> {
self.to_bytes().partial_cmp(&other.to_bytes())
impl From<&core::ffi::CStr> for CStr<'_> {
fn from(s: &core::ffi::CStr) -> Self {
// SAFETY:
// * We can assume that `s` is valid because the caller should have upheld its
// safety concerns when constructing it.
unsafe { Self::from_ptr(s.as_ptr()) }
}
}
impl Ord for CStr {
fn cmp(&self, other: &CStr) -> Ordering {
self.to_bytes().cmp(&other.to_bytes())
}
}
impl ToOwned for CStr {
type Owned = CString;
fn to_owned(&self) -> CString {
CString {
inner: self.to_bytes_with_nul().into(),
}
}
}
#[derive(Debug)]
pub struct FromBytesWithNulError;
impl<'a> From<&'a CStr> for CString {
fn from(s: &'a CStr) -> CString {
s.to_owned()
}
}
#[derive(Debug)]
pub struct FromBytesUntilNulError;
impl ops::Index<ops::RangeFull> for CString {
type Output = CStr;
#[inline]
fn index(&self, _index: ops::RangeFull) -> &CStr {
self
}
}
impl AsRef<CStr> for CStr {
#[inline]
fn as_ref(&self) -> &CStr {
self
}
}
impl AsRef<CStr> for CString {
#[inline]
fn as_ref(&self) -> &CStr {
self
}
}
pub use alloc::ffi::CString;
//! Equivalent of Rust's `Vec<T>`, but using relibc's own allocator.
use crate::{
io::{self, Write},
platform::{self, WriteByte, types::*},
platform::{self, types::*, WriteByte},
};
use core::{
cmp,
fmt,
cmp, fmt,
iter::IntoIterator,
mem,
ops::{Deref, DerefMut},
......@@ -60,7 +61,8 @@ impl<T> CVec<T> {
let ptr = if cap == 0 {
NonNull::dangling()
} else if self.cap > 0 {
NonNull::new(platform::realloc(self.ptr.as_ptr() as *mut c_void, size) as *mut T).ok_or(AllocError)?
NonNull::new(platform::realloc(self.ptr.as_ptr() as *mut c_void, size) as *mut T)
.ok_or(AllocError)?
} else {
NonNull::new((platform::alloc(size)) as *mut T).ok_or(AllocError)?
};
......@@ -208,10 +210,12 @@ impl<'a, T> IntoIterator for &'a mut CVec<T> {
impl Write for CVec<u8> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.extend_from_slice(buf).map_err(|err| io::Error::new(
io::ErrorKind::Other,
"AllocStringWriter::write failed to allocate",
))?;
self.extend_from_slice(buf).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
"AllocStringWriter::write failed to allocate",
)
})?;
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
......@@ -247,7 +251,7 @@ mod tests {
}
#[test]
fn extend_from_slice() {
use core_io::Write;
use crate::io::Write;
let mut vec = CVec::new();
vec.extend_from_slice(&[1, 2, 3]).unwrap();
......