Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • martin/relibc
  • ashton/relibc
  • vincent/relibc
  • boomshroom/relibc
  • njskalski/relibc
  • bjorn3/relibc
  • microcolonel/relibc
  • gmacd/relibc
  • 4lDO2/relibc
  • feliwir/relibc
  • devnexen/relibc
  • jamesgraves/relibc
  • oddcoder/relibc
  • andar1an/relibc
  • gugz0r/relibc
  • matijaskala/relibc
  • zen3ger/relibc
  • Majoneza/relibc
  • enygmator/relibc
  • JustAnotherDev/relibc
  • doriancodes/relibc
  • adamantinum/relibc
  • wiredtv/relibc
  • stratact/relibc
  • Ramla-I/relibc
  • bitstr0m/relibc
  • bpisch/relibc
  • henritel/relibc
  • smckay/relibc
  • xTibor/relibc
  • devajithvs/relibc
  • andypython/relibc
  • t-nil/relibc
  • DataTriny/relibc
  • SteveLauC/relibc
  • dlrobertson/relibc
  • AgostonSzepessy/relibc
  • TheDarkula/relibc
  • willnode/relibc
  • bamontan/relibc
  • redoxeon/relibc
  • ayf/relibc
  • heghe/relibc
  • Ivan/relibc
  • hasheddan/relibc
  • dahc/relibc
  • auwardoctor/relibc
  • kodicraft/relibc
  • arthurpaulino/relibc
  • jasonhansel/relibc
  • kel/relibc
  • GrayJack/relibc
  • darley/relibc
  • sahitpj/relibc
  • plimkilde/relibc
  • BjornTheProgrammer/relibc
  • defra/relibc
  • Schyrsivochter/relibc
  • ebalalic/relibc
  • adchacon/relibc
  • aaronjanse/relibc
  • josh_williams/relibc
  • 8tab/relibc
  • josh/relibc
  • nicoan/relibc
  • athei/relibc
  • carrot93/relibc
  • RA_GM1/relibc
  • zhaozhao/relibc
  • JCake/relibc
  • KGrewal1/relibc
  • emturner/relibc
  • LuigiPiucco/relibc
  • bfrascher/relibc
  • starsheriff/relibc
  • kcired/relibc
  • jamespcfrancis/relibc
  • neallred/relibc
  • omar-mohamed-khallaf/relibc
  • jD91mZM2/relibc
  • rw_van/relibc
  • Skallwar/relibc
  • matt-vdv/relibc
  • mati865/relibc
  • SoyaOhnishi/relibc
  • ArniDagur/relibc
  • tlam/relibc
  • glongo/relibc
  • kamirr/relibc
  • abdullah/relibc
  • saeedtabrizi/relibc
  • sajattack/relibc
  • lmiskiew/relibc
  • seanpk/relibc
  • MaikuZ/relibc
  • jamadazi/relibc
  • coolreader18/relibc
  • wt/relibc
  • lebensterben/relibc
  • uuuvn/relibc
  • vadorovsky/relibc
  • ids1024/relibc
  • raffaeleragni/relibc
  • freewilll/relibc
  • LLeny/relibc
  • alfredoyang/relibc
  • batonius/relibc
  • TornaxO7/relibc
  • Arcterus/relibc
  • Tommoa/relibc
  • samuela/relibc
  • mindriot101/relibc
  • redox-os/relibc
  • lygstate/relibc
114 results
Show changes
Showing with 4286 additions and 132 deletions
Subproject commit 867c809039aef77dbce22e98b1009b8995dfa868
[package]
name = "redox-rt"
authors = ["4lDO2 <4lDO2@protonmail.com>"]
version = "0.1.0"
edition = "2021"
license = "MIT"
description = "Libc-independent runtime for Redox"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitflags = "2"
goblin = { version = "0.7", default-features = false, features = ["elf32", "elf64", "endian_fd"] }
plain = "0.2"
redox_syscall = "0.5.8"
generic-rt = { path = "../generic-rt" }
use core::{mem::offset_of, ptr::NonNull};
use syscall::{data::*, error::*};
use crate::{
proc::{fork_inner, FdGuard},
signal::{inner_c, PosixStackt, RtSigarea, SigStack, PROC_CONTROL_STRUCT},
RtTcb, Tcb,
};
// Setup a stack starting from the very end of the address space, and then growing downwards.
pub(crate) const STACK_TOP: usize = 1 << 47;
pub(crate) const STACK_SIZE: usize = 1024 * 1024;
#[derive(Debug, Default)]
#[repr(C)]
pub struct SigArea {
pub altstack_top: usize,
pub altstack_bottom: usize,
pub tmp_x1_x2: [usize; 2],
pub tmp_x3_x4: [usize; 2],
pub tmp_x5_x6: [usize; 2],
pub tmp_sp: usize,
pub onstack: u64,
pub disable_signals_depth: u64,
pub pctl: usize, // TODO: remove
pub last_sig_was_restart: bool,
pub last_sigstack: Option<NonNull<SigStack>>,
pub tmp_rt_inf: RtSigInfo,
pub tmp_id_inf: u64,
}
#[repr(C)]
#[derive(Debug, Default)]
pub struct ArchIntRegs {
pub x30: usize,
pub x29: usize,
pub x28: usize,
pub x27: usize,
pub x26: usize,
pub x25: usize,
pub x24: usize,
pub x23: usize,
pub x22: usize,
pub x21: usize,
pub x20: usize,
pub x19: usize,
pub x18: usize,
pub x17: usize,
pub x16: usize,
pub x15: usize,
pub x14: usize,
pub x13: usize,
pub x12: usize,
pub x11: usize,
pub x10: usize,
pub x9: usize,
pub x8: usize,
pub x7: usize,
pub x6: usize,
pub x5: usize,
pub x4: usize,
pub x3: usize,
pub x2: usize,
pub x1: usize,
pub sp: usize,
pub nzcv: usize, // user-accessible PSTATE bits
pub pc: usize,
pub x0: usize,
}
/// Deactive TLS, used before exec() on Redox to not trick target executable into thinking TLS
/// is already initialized as if it was a thread.
pub unsafe fn deactivate_tcb(open_via_dup: usize) -> Result<()> {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(syscall::dup(open_via_dup, b"regs/env")?);
env.tpidr_el0 = 0;
let _ = syscall::write(*file, &mut env)?;
Ok(())
}
pub fn copy_env_regs(cur_pid_fd: usize, new_pid_fd: usize) -> Result<()> {
// Copy environment registers.
{
let cur_env_regs_fd = FdGuard::new(syscall::dup(cur_pid_fd, b"regs/env")?);
let new_env_regs_fd = FdGuard::new(syscall::dup(new_pid_fd, b"regs/env")?);
let mut env_regs = syscall::EnvRegisters::default();
let _ = syscall::read(*cur_env_regs_fd, &mut env_regs)?;
let _ = syscall::write(*new_env_regs_fd, &env_regs)?;
}
Ok(())
}
unsafe extern "C" fn fork_impl(initial_rsp: *mut usize) -> usize {
Error::mux(fork_inner(initial_rsp))
}
unsafe extern "C" fn child_hook(cur_filetable_fd: usize, new_pid_fd: usize) {
let _ = syscall::close(cur_filetable_fd);
// TODO: Currently pidfd == threadfd, but this will not be the case later.
RtTcb::current()
.thr_fd
.get()
.write(Some(FdGuard::new(new_pid_fd)));
}
asmfunction!(__relibc_internal_fork_wrapper -> usize: ["
stp x29, x30, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x19, x20, [sp, #-16]!
sub sp, sp, #32
//TODO: store floating point regs
mov x0, sp
bl {fork_impl}
add sp, sp, #32
ldp x19, x20, [sp], #16
ldp x21, x22, [sp], #16
ldp x23, x24, [sp], #16
ldp x25, x26, [sp], #16
ldp x27, x28, [sp], #16
ldp x29, x30, [sp], #16
ret
"] <= [fork_impl = sym fork_impl]);
asmfunction!(__relibc_internal_fork_ret: ["
ldp x0, x1, [sp]
bl {child_hook}
//TODO: load floating point regs
mov x0, xzr
add sp, sp, #32
ldp x19, x20, [sp], #16
ldp x21, x22, [sp], #16
ldp x23, x24, [sp], #16
ldp x25, x26, [sp], #16
ldp x27, x28, [sp], #16
ldp x29, x30, [sp], #16
ret
"] <= [child_hook = sym child_hook]);
// https://devblogs.microsoft.com/oldnewthing/20220811-00/?p=106963
asmfunction!(__relibc_internal_sigentry: ["
// Clear any active reservation.
clrex
// The old pc and x0 are saved in the sigcontrol struct.
mrs x0, tpidr_el0 // ABI ptr
ldr x0, [x0] // TCB ptr
// Save x1-x6 and sp
stp x1, x2, [x0, #{tcb_sa_off} + {sa_tmp_x1_x2}]
stp x3, x4, [x0, #{tcb_sa_off} + {sa_tmp_x3_x4}]
stp x5, x6, [x0, #{tcb_sa_off} + {sa_tmp_x5_x6}]
mov x1, sp
str x1, [x0, #{tcb_sa_off} + {sa_tmp_sp}]
ldr x6, [x0, #{tcb_sa_off} + {sa_pctl}]
1:
// Load x1 with the thread's bits
add x5, x0, #{tcb_sc_off} + {sc_word}
ldaxr x1, [x5]
// First check if there are standard thread signals,
and x4, x1, x1, lsr #32 // x4 := x1 & (x1 >> 32)
cbnz x4, 3f // jump if x4 != 0
clrex
// and if not, load process pending bitset.
add x5, x6, #{pctl_pending}
ldaxr x2, [x5]
// Check if there are standard proc signals:
lsr x3, x1, #32 // mask
and w3, w2, w3 // pending unblocked proc
cbz w3, 4f // skip 'fetch_andn' step if zero
// If there was one, find which one, and try clearing the bit (last value in x3, addr in x6)
// this picks the MSB rather than the LSB, unlike x86. POSIX does not require any specific
// ordering though.
clz w3, w3
mov w4, #31
sub w3, w4, w3
// x3 now contains the sig_idx
mov x4, #1
lsl x4, x4, x3 // bit to remove
sub x4, x2, x4 // bit was certainly set, so sub is allowed
// x4 is now the new mask to be set
add x5, x6, #{pctl_pending}
add x2, x5, #{pctl_sender_infos}
add x2, x2, w3, uxtb 3
ldar x2, [x2]
// Try clearing the bit, retrying on failure.
stxr w1, x4, [x5] // try setting pending set to x4, set w1 := 0 on success
cbnz w1, 1b // retry everything if this fails
mov x1, x3
b 2f
4:
// Check for realtime signals, thread/proc.
clrex
// Load the pending set again. TODO: optimize this?
add x1, x6, #{pctl_pending}
ldaxr x2, [x1]
lsr x2, x2, #32
add x5, x0, #{tcb_sc_off} + {sc_word} + 8
ldar x1, [x5]
orr x2, x1, x2
and x2, x2, x2, lsr #32
cbz x2, 7f
rbit x3, x2
clz x3, x3
mov x4, #31
sub x2, x4, x3
// x2 now contains sig_idx - 32
// If realtime signal was directed at thread, handle it as an idempotent signal.
lsr x3, x1, x2
tbnz x3, #0, 5f
mov x5, x0
mov x4, x8
mov x8, #{SYS_SIGDEQUEUE}
mov x0, x1
add x1, x0, #{tcb_sa_off} + {sa_tmp_rt_inf}
svc 0
mov x0, x5
mov x8, x4
cbnz x0, 1b
b 2f
5:
// A realtime signal was sent to this thread, try clearing its bit.
// x3 contains last rt signal word, x2 contains rt_idx
clrex
// Calculate the absolute sig_idx
add x1, x3, 32
// Load si_pid and si_uid
add x2, x0, #{tcb_sc_off} + {sc_sender_infos}
add x2, x2, w1, uxtb #3
ldar x2, [x2]
add x3, x0, #{tcb_sc_off} + {sc_word} + 8
ldxr x2, [x3]
// Calculate new mask
mov x4, #1
lsl x4, x4, x2
sub x2, x2, x4 // remove bit
stxr w5, x2, [x3]
cbnz w5, 1b
str x2, [x0, #{tcb_sa_off} + {sa_tmp_id_inf}]
b 2f
3:
// A standard signal was sent to this thread, try clearing its bit.
clz x1, x1
mov x2, #31
sub x1, x2, x1
// Load si_pid and si_uid
add x2, x0, #{tcb_sc_off} + {sc_sender_infos}
add x2, x2, w1, uxtb #3
ldar x2, [x2]
// Clear bit from mask
mov x3, #1
lsl x3, x3, x1
sub x4, x4, x3
// Try updating the mask
stxr w3, x1, [x5]
cbnz w3, 1b
str x2, [x0, #{tcb_sa_off} + {sa_tmp_id_inf}]
2:
ldr x3, [x0, #{tcb_sa_off} + {sa_pctl}]
add x2, x2, {pctl_actions}
add x2, x3, w1, uxtb #4 // actions_base + sig_idx * sizeof Action
// TODO: NOT ATOMIC (tearing allowed between regs)!
ldxp x2, x3, [x2]
clrex
// Calculate new sp wrt redzone and alignment
mov x4, sp
sub x4, x4, {REDZONE_SIZE}
and x4, x4, -{STACK_ALIGN}
mov sp, x4
// skip sigaltstack step if SA_ONSTACK is clear
// tbz x2, #{SA_ONSTACK_BIT}, 2f
ldr x2, [x0, #{tcb_sc_off} + {sc_saved_pc}]
ldr x3, [x0, #{tcb_sc_off} + {sc_saved_x0}]
stp x2, x3, [sp, #-16]!
ldr x2, [x0, #{tcb_sa_off} + {sa_tmp_sp}]
mrs x3, nzcv
stp x2, x3, [sp, #-16]!
ldp x2, x3, [x0, #{tcb_sa_off} + {sa_tmp_x1_x2}]
stp x2, x3, [sp, #-16]!
ldp x3, x4, [x0, #{tcb_sa_off} + {sa_tmp_x3_x4}]
stp x4, x3, [sp, #-16]!
ldp x5, x6, [x0, #{tcb_sa_off} + {sa_tmp_x5_x6}]
stp x6, x5, [sp, #-16]!
stp x8, x7, [sp, #-16]!
stp x10, x9, [sp, #-16]!
stp x12, x11, [sp, #-16]!
stp x14, x13, [sp, #-16]!
stp x16, x15, [sp, #-16]!
stp x18, x17, [sp, #-16]!
stp x20, x19, [sp, #-16]!
stp x22, x21, [sp, #-16]!
stp x24, x23, [sp, #-16]!
stp x26, x25, [sp, #-16]!
stp x28, x27, [sp, #-16]!
stp x30, x29, [sp, #-16]!
str w1, [sp, #-4]
sub sp, sp, #64
mov x0, sp
bl {inner}
add sp, sp, #64
ldp x30, x29, [sp], #16
ldp x28, x27, [sp], #16
ldp x26, x25, [sp], #16
ldp x24, x23, [sp], #16
ldp x22, x21, [sp], #16
ldp x20, x19, [sp], #16
ldp x18, x17, [sp], #16
ldp x16, x15, [sp], #16
ldp x14, x13, [sp], #16
ldp x12, x11, [sp], #16
ldp x10, x9, [sp], #16
ldp x8, x7, [sp], #16
ldp x6, x5, [sp], #16
ldp x4, x3, [sp], #16
ldp x2, x1, [sp], #16
ldr x0, [sp, #8]
msr nzcv, x0
8:
// x18 is reserved by ABI as 'platform register', so clobbering it should be safe.
mov x18, sp
ldr x0, [x18]
mov sp, x0
ldp x18, x0, [x18, #16]
br x18
7:
// Spurious signal, i.e. all bitsets were 0 at the time they were checked
clrex
ldr x1, [x0, #{tcb_sc_off} + {sc_flags}]
and x1, x1, ~1
str x1, [x0, #{tcb_sc_off} + {sc_flags}]
ldp x1, x2, [x0, #{tcb_sa_off} + {sa_tmp_x1_x2}]
ldp x3, x4, [x0, #{tcb_sa_off} + {sa_tmp_x3_x4}]
ldp x5, x6, [x0, #{tcb_sa_off} + {sa_tmp_x5_x6}]
ldr x18, [x0, #{tcb_sc_off} + {sc_saved_pc}]
ldr x0, [x0, #{tcb_sc_off} + {sc_saved_x0}]
br x18
"] <= [
pctl_pending = const (offset_of!(SigProcControl, pending)),
pctl_actions = const (offset_of!(SigProcControl, actions)),
pctl_sender_infos = const (offset_of!(SigProcControl, sender_infos)),
tcb_sc_off = const (offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control)),
tcb_sa_off = const (offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, arch)),
sa_tmp_x1_x2 = const offset_of!(SigArea, tmp_x1_x2),
sa_tmp_x3_x4 = const offset_of!(SigArea, tmp_x3_x4),
sa_tmp_x5_x6 = const offset_of!(SigArea, tmp_x5_x6),
sa_tmp_sp = const offset_of!(SigArea, tmp_sp),
sa_tmp_rt_inf = const offset_of!(SigArea, tmp_rt_inf),
sa_tmp_id_inf = const offset_of!(SigArea, tmp_id_inf),
sa_pctl = const offset_of!(SigArea, pctl),
sc_saved_pc = const offset_of!(Sigcontrol, saved_ip),
sc_saved_x0 = const offset_of!(Sigcontrol, saved_archdep_reg),
sc_sender_infos = const offset_of!(Sigcontrol, sender_infos),
sc_word = const offset_of!(Sigcontrol, word),
sc_flags = const offset_of!(Sigcontrol, control_flags),
inner = sym inner_c,
SA_ONSTACK_BIT = const 58, // (1 << 58) >> 32 = 0x0400_0000
SYS_SIGDEQUEUE = const syscall::SYS_SIGDEQUEUE,
STACK_ALIGN = const 16,
REDZONE_SIZE = const 128,
]);
asmfunction!(__relibc_internal_rlct_clone_ret: ["
# Load registers
ldp x8, x0, [sp], #16
ldp x1, x2, [sp], #16
ldp x3, x4, [sp], #16
# Call entry point
blr x8
ret
"] <= []);
pub fn current_sp() -> usize {
let sp: usize;
unsafe {
core::arch::asm!("mov {}, sp", out(reg) sp);
}
sp
}
pub unsafe fn manually_enter_trampoline() {
let ctl = &Tcb::current().unwrap().os_specific.control;
ctl.saved_archdep_reg.set(0);
let ip_location = &ctl.saved_ip as *const _ as usize;
core::arch::asm!("
bl 2f
b 3f
2:
str lr, [x0]
b __relibc_internal_sigentry
3:
", inout("x0") ip_location => _, out("lr") _);
}
pub unsafe fn arch_pre(stack: &mut SigStack, os: &mut SigArea) -> PosixStackt {
PosixStackt {
sp: core::ptr::null_mut(), // TODO
size: 0, // TODO
flags: 0, // TODO
}
}
use core::{mem::offset_of, ptr::NonNull, sync::atomic::Ordering};
use syscall::*;
use crate::{
proc::{fork_inner, FdGuard},
signal::{inner_fastcall, PosixStackt, RtSigarea, SigStack, PROC_CONTROL_STRUCT},
RtTcb,
};
// Setup a stack starting from the very end of the address space, and then growing downwards.
pub(crate) const STACK_TOP: usize = 1 << 31;
pub(crate) const STACK_SIZE: usize = 1024 * 1024;
#[derive(Debug, Default)]
#[repr(C)]
pub struct SigArea {
pub altstack_top: usize,
pub altstack_bottom: usize,
pub tmp_eip: usize,
pub tmp_esp: usize,
pub tmp_eax: usize,
pub tmp_ecx: usize,
pub tmp_edx: usize,
pub tmp_rt_inf: RtSigInfo,
pub tmp_id_inf: u64,
pub tmp_mm0: u64,
pub pctl: usize, // TODO: reference pctl directly
pub disable_signals_depth: u64,
pub last_sig_was_restart: bool,
pub last_sigstack: Option<NonNull<SigStack>>,
}
#[derive(Debug, Default)]
#[repr(C, align(16))]
pub struct ArchIntRegs {
pub fxsave: [u16; 29],
// ensure fxsave region is 16 byte aligned
pub _pad: [usize; 2], // fxsave "available" +0
pub ebp: usize, // fxsave "available" +8
pub esi: usize, // avail +12
pub edi: usize, // avail +16
pub ebx: usize, // avail +20
pub eax: usize, // avail +24
pub ecx: usize, // avail +28
pub edx: usize, // avail +32
pub eflags: usize, // avail +36
pub eip: usize, // avail +40
pub esp: usize, // avail +44
}
/// Deactive TLS, used before exec() on Redox to not trick target executable into thinking TLS
/// is already initialized as if it was a thread.
pub unsafe fn deactivate_tcb(open_via_dup: usize) -> Result<()> {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(syscall::dup(open_via_dup, b"regs/env")?);
env.fsbase = 0;
env.gsbase = 0;
let _ = syscall::write(*file, &mut env)?;
Ok(())
}
pub fn copy_env_regs(cur_pid_fd: usize, new_pid_fd: usize) -> Result<()> {
// Copy environment registers.
{
let cur_env_regs_fd = FdGuard::new(syscall::dup(cur_pid_fd, b"regs/env")?);
let new_env_regs_fd = FdGuard::new(syscall::dup(new_pid_fd, b"regs/env")?);
let mut env_regs = syscall::EnvRegisters::default();
let _ = syscall::read(*cur_env_regs_fd, &mut env_regs)?;
let _ = syscall::write(*new_env_regs_fd, &env_regs)?;
}
Ok(())
}
unsafe extern "cdecl" fn fork_impl(initial_rsp: *mut usize) -> usize {
Error::mux(fork_inner(initial_rsp))
}
unsafe extern "cdecl" fn child_hook(cur_filetable_fd: usize, new_pid_fd: usize) {
let _ = syscall::close(cur_filetable_fd);
crate::child_hook_common(FdGuard::new(new_pid_fd));
}
asmfunction!(__relibc_internal_fork_wrapper -> usize: ["
push ebp
mov ebp, esp
// Push preserved registers
push ebx
push edi
push esi
push ebp
sub esp, 32
//TODO stmxcsr [esp+16]
fnstcw [esp+24]
push esp
call {fork_impl}
pop esp
jmp 2f
"] <= [fork_impl = sym fork_impl]);
asmfunction!(__relibc_internal_fork_ret: ["
// Arguments already on the stack
call {child_hook}
//TODO ldmxcsr [esp+16]
fldcw [esp+24]
xor eax, eax
.p2align 4
2:
add esp, 32
// Pop preserved registers
pop ebp
pop esi
pop edi
pop ebx
pop ebp
ret
"] <= [child_hook = sym child_hook]);
asmfunction!(__relibc_internal_sigentry: ["
// Save some registers
mov gs:[{tcb_sa_off} + {sa_tmp_esp}], esp
mov gs:[{tcb_sa_off} + {sa_tmp_eax}], eax
mov gs:[{tcb_sa_off} + {sa_tmp_edx}], edx
mov gs:[{tcb_sa_off} + {sa_tmp_ecx}], ecx
1:
// Read standard signal word - first for this thread
mov edx, gs:[{tcb_sc_off} + {sc_word} + 4]
mov eax, gs:[{tcb_sc_off} + {sc_word}]
and eax, edx
bsf eax, eax
jnz 9f
mov ecx, gs:[{tcb_sa_off} + {sa_pctl}]
// Read standard signal word - for the process
mov eax, [ecx + {pctl_pending}]
and eax, edx
bsf eax, eax
jz 3f
// Read si_pid and si_uid, atomically.
movq gs:[{tcb_sa_off} + {sa_tmp_mm0}], mm0
movq mm0, [ecx + {pctl_sender_infos} + eax * 8]
movq gs:[{tcb_sa_off} + {sa_tmp_id_inf}], mm0
movq mm0, gs:[{tcb_sa_off} + {sa_tmp_mm0}]
// Try clearing the pending bit, otherwise retry if another thread did that first
lock btr [ecx + {pctl_pending}], eax
jnc 1b
jmp 2f
3:
// Read realtime thread and process signal word together
mov edx, [ecx + {pctl_pending} + 4]
mov eax, gs:[{tcb_sc_off} + {sc_word} + 8]
or eax, edx
and eax, gs:[{tcb_sc_off} + {sc_word} + 12]
jz 7f // spurious signal
bsf eax, eax
// If thread was specifically targeted, send the signal to it first.
bt edx, eax
jc 8f
mov edx, ebx
lea ecx, [eax+32]
mov eax, {SYS_SIGDEQUEUE}
mov edx, gs:[0]
add edx, {tcb_sa_off} + {sa_tmp_rt_inf}
int 0x80
mov ebx, edx
test eax, eax
jnz 1b
mov eax, ecx
jmp 2f
8:
add eax, 32
9:
// Read si_pid and si_uid, atomically.
movq gs:[{tcb_sa_off} + {sa_tmp_mm0}], mm0
movq mm0, gs:[{tcb_sc_off} + {sc_sender_infos} + eax * 8]
movq gs:[{tcb_sa_off} + {sa_tmp_id_inf}], mm0
movq mm0, gs:[{tcb_sa_off} + {sa_tmp_mm0}]
mov edx, eax
shr edx, 5
mov ecx, eax
and ecx, 31
lock btr gs:[{tcb_sc_off} + {sc_word} + edx * 8], ecx
add eax, 64
2:
and esp, -{STACK_ALIGN}
mov edx, eax
add edx, edx
bt dword ptr [{pctl} + {pctl_actions} + edx * 8 + 4], 28
jnc 4f
mov edx, gs:[{tcb_sa_off} + {sa_altstack_top}]
cmp esp, edx
ja 3f
cmp esp, gs:[{tcb_sa_off} + {sa_altstack_bottom}]
jnbe 4f
3:
mov esp, edx
4:
// Now that we have a stack, we can finally start populating the signal stack.
push dword ptr gs:[{tcb_sa_off} + {sa_tmp_esp}]
push dword ptr gs:[{tcb_sc_off} + {sc_saved_eip}]
push dword ptr gs:[{tcb_sc_off} + {sc_saved_eflags}]
push dword ptr gs:[{tcb_sa_off} + {sa_tmp_edx}]
push dword ptr gs:[{tcb_sa_off} + {sa_tmp_ecx}]
push dword ptr gs:[{tcb_sa_off} + {sa_tmp_eax}]
push ebx
push edi
push esi
push ebp
sub esp, 2 * 4 + 29 * 16
fxsave [esp]
mov [esp - 4], eax
sub esp, 48
mov ecx, esp
call {inner}
fxrstor [esp + 48]
add esp, 48 + 29 * 16 + 2 * 4
pop ebp
pop esi
pop edi
pop ebx
pop eax
pop ecx
pop edx
popfd
pop dword ptr gs:[{tcb_sa_off} + {sa_tmp_eip}]
.globl __relibc_internal_sigentry_crit_first
__relibc_internal_sigentry_crit_first:
pop esp
.globl __relibc_internal_sigentry_crit_second
__relibc_internal_sigentry_crit_second:
jmp dword ptr gs:[{tcb_sa_off} + {sa_tmp_eip}]
7:
mov eax, gs:[0]
lea esp, [eax + {tcb_sc_off} + {sc_saved_eflags}]
popfd
mov esp, gs:[{tcb_sa_off} + {sa_tmp_esp}]
mov eax, gs:[{tcb_sc_off} + {sc_saved_eip}]
mov gs:[{tcb_sa_off} + {sa_tmp_eip}], eax
mov eax, gs:[{tcb_sa_off} + {sa_tmp_eax}]
mov ecx, gs:[{tcb_sa_off} + {sa_tmp_ecx}]
mov edx, gs:[{tcb_sa_off} + {sa_tmp_edx}]
and dword ptr gs:[{tcb_sc_off} + {sc_control}], ~1
.globl __relibc_internal_sigentry_crit_third
__relibc_internal_sigentry_crit_third:
jmp dword ptr gs:[{tcb_sa_off} + {sa_tmp_eip}]
"] <= [
inner = sym inner_fastcall,
sa_tmp_eip = const offset_of!(SigArea, tmp_eip),
sa_tmp_esp = const offset_of!(SigArea, tmp_esp),
sa_tmp_eax = const offset_of!(SigArea, tmp_eax),
sa_tmp_ecx = const offset_of!(SigArea, tmp_ecx),
sa_tmp_edx = const offset_of!(SigArea, tmp_edx),
sa_tmp_mm0 = const offset_of!(SigArea, tmp_mm0),
sa_tmp_rt_inf = const offset_of!(SigArea, tmp_rt_inf),
sa_tmp_id_inf = const offset_of!(SigArea, tmp_id_inf),
sa_altstack_top = const offset_of!(SigArea, altstack_top),
sa_altstack_bottom = const offset_of!(SigArea, altstack_bottom),
sa_pctl = const offset_of!(SigArea, pctl),
sc_control = const offset_of!(Sigcontrol, control_flags),
sc_saved_eflags = const offset_of!(Sigcontrol, saved_archdep_reg),
sc_saved_eip = const offset_of!(Sigcontrol, saved_ip),
sc_word = const offset_of!(Sigcontrol, word),
sc_sender_infos = const offset_of!(Sigcontrol, sender_infos),
tcb_sa_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, arch),
tcb_sc_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control),
pctl_actions = const offset_of!(SigProcControl, actions),
pctl_sender_infos = const offset_of!(SigProcControl, sender_infos),
pctl_pending = const offset_of!(SigProcControl, pending),
pctl = sym PROC_CONTROL_STRUCT,
STACK_ALIGN = const 16,
SYS_SIGDEQUEUE = const syscall::SYS_SIGDEQUEUE,
]);
asmfunction!(__relibc_internal_rlct_clone_ret -> usize: ["
# Load registers
pop eax
sub esp, 8
mov DWORD PTR [esp], 0x00001F80
# TODO: ldmxcsr [esp]
mov WORD PTR [esp], 0x037F
fldcw [esp]
add esp, 8
# Call entry point
call eax
ret
"] <= []);
extern "C" {
fn __relibc_internal_sigentry_crit_first();
fn __relibc_internal_sigentry_crit_second();
fn __relibc_internal_sigentry_crit_third();
}
pub unsafe fn arch_pre(stack: &mut SigStack, area: &mut SigArea) -> PosixStackt {
if stack.regs.eip == __relibc_internal_sigentry_crit_first as usize {
let stack_ptr = stack.regs.esp as *const usize;
stack.regs.esp = stack_ptr.read();
stack.regs.eip = stack_ptr.sub(1).read();
} else if stack.regs.eip == __relibc_internal_sigentry_crit_second as usize
|| stack.regs.eip == __relibc_internal_sigentry_crit_third as usize
{
stack.regs.eip = area.tmp_eip;
}
PosixStackt {
sp: stack.regs.esp as *mut (),
size: 0, // TODO
flags: 0, // TODO
}
}
#[no_mangle]
pub unsafe fn manually_enter_trampoline() {
let c = &crate::Tcb::current().unwrap().os_specific.control;
c.control_flags.store(
c.control_flags.load(Ordering::Relaxed) | syscall::flag::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
c.saved_archdep_reg.set(0); // TODO: Just reset DF on x86?
core::arch::asm!("
call 2f
jmp 3f
2:
pop dword ptr gs:[{tcb_sc_off} + {sc_saved_eip}]
jmp __relibc_internal_sigentry
3:
",
tcb_sc_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control),
sc_saved_eip = const offset_of!(Sigcontrol, saved_ip),
);
}
/// Get current stack pointer, weak granularity guarantees.
pub fn current_sp() -> usize {
let sp: usize;
unsafe {
core::arch::asm!("mov {}, esp", out(reg) sp);
}
sp
}
#[cfg(target_arch = "aarch64")]
pub use self::aarch64::*;
#[cfg(target_arch = "aarch64")]
pub mod aarch64;
#[cfg(target_arch = "x86")]
pub use self::i686::*;
#[cfg(target_arch = "x86")]
pub mod i686;
#[cfg(target_arch = "x86_64")]
pub use self::x86_64::*;
#[cfg(target_arch = "x86_64")]
pub mod x86_64;
#[cfg(target_arch = "riscv64")]
pub use self::riscv64::*;
#[cfg(target_arch = "riscv64")]
pub mod riscv64;
use crate::{
proc::{fork_inner, FdGuard},
signal::{get_sigaltstack, inner_c, PosixStackt, RtSigarea, SigStack},
RtTcb, Tcb,
};
use core::{mem::offset_of, ptr::NonNull, sync::atomic::Ordering};
use syscall::{data::*, error::*};
// Setup a stack starting from the very end of the address space, and then growing downwards.
pub(crate) const STACK_TOP: usize = 1 << 47;
pub(crate) const STACK_SIZE: usize = 1024 * 1024;
#[derive(Debug, Default)]
#[repr(C)]
pub struct SigArea {
pub tmp_sp: u64,
pub tmp_t1: u64,
pub tmp_t2: u64,
pub tmp_t3: u64,
pub tmp_t4: u64,
pub tmp_a0: u64,
pub tmp_a1: u64,
pub tmp_a2: u64,
pub tmp_a7: u64,
pub pctl: usize, // TODO: remove
pub tmp_ip: u64,
pub tmp_rt_inf: RtSigInfo,
pub tmp_id_inf: u64,
pub altstack_top: usize,
pub altstack_bottom: usize,
pub disable_signals_depth: u64,
pub last_sig_was_restart: bool,
pub last_sigstack: Option<NonNull<SigStack>>,
}
#[repr(C)]
#[derive(Debug, Default)]
pub struct ArchIntRegs {
pub int_regs: [u64; 31],
pub pc: u64,
pub fp_regs: [u64; 32],
pub fcsr: u32,
_pad: u32,
}
/// Deactive TLS, used before exec() on Redox to not trick target executable into thinking TLS
/// is already initialized as if it was a thread.
pub unsafe fn deactivate_tcb(open_via_dup: usize) -> Result<()> {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(syscall::dup(open_via_dup, b"regs/env")?);
env.tp = 0;
let _ = syscall::write(*file, &mut env)?;
Ok(())
}
pub fn copy_env_regs(cur_pid_fd: usize, new_pid_fd: usize) -> Result<()> {
// Copy environment registers.
{
let cur_env_regs_fd = FdGuard::new(syscall::dup(cur_pid_fd, b"regs/env")?);
let new_env_regs_fd = FdGuard::new(syscall::dup(new_pid_fd, b"regs/env")?);
let mut env_regs = syscall::EnvRegisters::default();
let _ = syscall::read(*cur_env_regs_fd, &mut env_regs)?;
let _ = syscall::write(*new_env_regs_fd, &env_regs)?;
}
Ok(())
}
unsafe extern "C" fn fork_impl(initial_rsp: *mut usize) -> usize {
Error::mux(fork_inner(initial_rsp))
}
unsafe extern "C" fn child_hook(cur_filetable_fd: usize, new_pid_fd: usize) {
let _ = syscall::close(cur_filetable_fd);
crate::child_hook_common(FdGuard::new(new_pid_fd));
}
asmfunction!(__relibc_internal_fork_wrapper -> usize: ["
.attribute arch, \"rv64gc\" # rust bug 80608
addi sp, sp, -200
sd s0, 0(sp)
sd s1, 8(sp)
sd s2, 16(sp)
sd s3, 24(sp)
sd s4, 32(sp)
sd s5, 40(sp)
sd s6, 48(sp)
sd s7, 56(sp)
sd s8, 64(sp)
sd s9, 72(sp)
sd s10, 80(sp)
sd s11, 88(sp)
sd ra, 96(sp)
fsd fs0, 104(sp)
fsd fs1, 112(sp)
fsd fs2, 120(sp)
fsd fs3, 128(sp)
fsd fs4, 136(sp)
fsd fs5, 144(sp)
fsd fs6, 152(sp)
fsd fs7, 160(sp)
fsd fs8, 168(sp)
fsd fs9, 176(sp)
fsd fs10, 184(sp)
fsd fs11, 192(sp)
addi sp, sp, -32
mv a0, sp
jal {fork_impl}
addi sp, sp, 32
ld s0, 0(sp)
ld s1, 8(sp)
ld s2, 16(sp)
ld s3, 24(sp)
ld s4, 32(sp)
ld s5, 40(sp)
ld s6, 48(sp)
ld s7, 56(sp)
ld s8, 64(sp)
ld s9, 72(sp)
ld s10, 80(sp)
ld s11, 88(sp)
ld ra, 96(sp)
fld fs0, 104(sp)
fld fs1, 112(sp)
fld fs2, 120(sp)
fld fs3, 128(sp)
fld fs4, 136(sp)
fld fs5, 144(sp)
fld fs6, 152(sp)
fld fs7, 160(sp)
fld fs8, 168(sp)
fld fs9, 176(sp)
fld fs10, 184(sp)
fld fs11, 192(sp)
addi sp, sp, 200
ret
"] <= [fork_impl = sym fork_impl]);
asmfunction!(__relibc_internal_fork_ret: ["
.attribute arch, \"rv64gc\" # rust bug 80608
ld a0, 0(sp)
ld a1, 8(sp)
jal {child_hook}
mv a0, x0
addi sp, sp, 32
ld s0, 0(sp)
ld s1, 8(sp)
ld s2, 16(sp)
ld s3, 24(sp)
ld s4, 32(sp)
ld s5, 40(sp)
ld s6, 48(sp)
ld s7, 56(sp)
ld s8, 64(sp)
ld s9, 72(sp)
ld s10, 80(sp)
ld s11, 88(sp)
ld ra, 96(sp)
fld fs0, 104(sp)
fld fs1, 112(sp)
fld fs2, 120(sp)
fld fs3, 128(sp)
fld fs4, 136(sp)
fld fs5, 144(sp)
fld fs6, 152(sp)
fld fs7, 160(sp)
fld fs8, 168(sp)
fld fs9, 176(sp)
fld fs10, 184(sp)
fld fs11, 192(sp)
addi sp, sp, 200
ret
"] <= [child_hook = sym child_hook]);
asmfunction!(__relibc_internal_sigentry: ["
.attribute arch, \"rv64gc\" # rust bug 80608
// Save some registers
ld t0, -8(tp) // Tcb
sd sp, ({tcb_sa_off} + {sa_tmp_sp})(t0)
sd t1, ({tcb_sa_off} + {sa_tmp_t1})(t0)
sd t2, ({tcb_sa_off} + {sa_tmp_t2})(t0)
sd t3, ({tcb_sa_off} + {sa_tmp_t3})(t0)
sd t4, ({tcb_sa_off} + {sa_tmp_t4})(t0)
ld t4, ({tcb_sa_off} + {sa_off_pctl})(t0)
// First, select signal, always pick first available bit
99:
// Read first signal word
ld t1, ({tcb_sc_off} + {sc_word})(t0)
srli t2, t1, 32 // bitset to low word
and t1, t1, t2 // masked bitset in low word
beqz t1, 3f
// Found in first thread signal word
mv t3, x0
2: andi t2, t1, 1
bnez t2, 10f
addi t3, t3, 1
srli t1, t1, 1
j 2b
// If no unblocked thread signal was found, check for process.
// This is competitive; we need to atomically check if *we* cleared the process-wide pending
// bit, otherwise restart.
3: lw t1, {pctl_off_pending}(t4)
and t1, t1, t2
beqz t1, 3f
// Found in first process signal word
li t3, -1
2: andi t2, t1, 1
addi t3, t3, 1
srli t1, t1, 1
beqz t2, 2b
slli t1, t3, 3 // * 8 == size_of SenderInfo
add t1, t1, t4
ld t1, {pctl_off_sender_infos}(t1)
sd t1, ({tcb_sa_off} + {sa_tmp_id_inf})(t0)
li t1, 1
sll t1, t1, t3
not t1, t1
addi t2, t4, {pctl_off_pending}
amoand.w.aq t2, t1, (t2)
and t1, t1, t2
bne t1, t2, 9f
3:
// Read second signal word - both process and thread simultaneously.
// This must be done since POSIX requires low realtime signals to be picked first.
ld t1, ({tcb_sc_off} + {sc_word} + 8)(t0)
lw t2, ({pctl_off_pending} + 4)(t4)
or t4, t1, t2
srli t2, t1, 32
and t4, t2, t4
beqz t4, 7f
li t3, -1
2: andi t2, t4, 1
addi t3, t3, 1
srli t4, t4, 1
beqz t2, 2b
li t2, 1
sll t2, t2, t3
and t1, t1, t2
addi t3, t3, 32
bnez t1, 10f // thread signal
// otherwise, try (competitively) dequeueing realtime signal
sd a0, ({tcb_sa_off} + {sa_tmp_a0})(t0)
sd a1, ({tcb_sa_off} + {sa_tmp_a1})(t0)
sd a2, ({tcb_sa_off} + {sa_tmp_a2})(t0)
sd a7, ({tcb_sa_off} + {sa_tmp_a7})(t0)
li a0, {SYS_SIGDEQUEUE}
addi a1, t3, -32
add a2, t0, {tcb_sa_off} + {sa_tmp_rt_inf} // out pointer of dequeued realtime sig
ecall
bnez a0, 99b // assumes error can only be EAGAIN
j 9f
10: // thread signal. t3 holds signal number
srli t1, t3, 5
bnez t1, 2f // FIXME senderinfo?
sll t2, t3, 3 // * 8 == size_of SenderInfo
add t2, t2, t0
ld t2, ({tcb_sc_off} + {sc_sender_infos})(t2)
sd t2, ({tcb_sa_off} + {sa_tmp_id_inf})(t0)
2: andi t4, t3, 31
li t2, 1
sll t2, t2, t4
not t2, t2
sll t1, t1, 3
add t1, t1, t0
addi t1, t1, {tcb_sc_off} + {sc_word}
amoand.w.aq x0, t2, (t1)
addi t3, t3, 64 // indicate signal was targeted at thread
9: // process signal t3 holds signal number
// By now we have selected a signal, stored in eax (6-bit). We now need to choose whether or
// not to switch to the alternate signal stack. If SA_ONSTACK is clear for this signal, then
// skip the sigaltstack logic.
ld t4, ({tcb_sa_off} + {sa_off_pctl})(t0)
andi t1, t3, 63
slli t1, t1, 4 // * 16 == size_of RawAction
add t1, t1, t4
ld t1, {pctl_off_actions}(t1)
slli t1, t1, 63-58 // SA_ONSTACK in sign bit
bgez t1, 3f
// If current RSP is above altstack region, switch to altstack
ld t1, ({tcb_sa_off} + {sa_altstack_top})(t0)
bgtu sp, t1, 2f
ld t2, ({tcb_sa_off} + {sa_altstack_bottom})(t0)
bgtu sp, t3, 3f
2: mv sp, t1
3:
// form mcontext on stack
addi sp, sp, -33 * 8
fsd f0, (0 * 8)(sp)
fsd f1, (1 * 8)(sp)
fsd f2, (2 * 8)(sp)
fsd f3, (3 * 8)(sp)
fsd f4, (4 * 8)(sp)
fsd f5, (5 * 8)(sp)
fsd f6, (6 * 8)(sp)
fsd f7, (7 * 8)(sp)
fsd f8, (8 * 8)(sp)
fsd f9, (9 * 8)(sp)
fsd f10, (10 * 8)(sp)
fsd f11, (11 * 8)(sp)
fsd f12, (12 * 8)(sp)
fsd f13, (13 * 8)(sp)
fsd f14, (14 * 8)(sp)
fsd f15, (15 * 8)(sp)
fsd f16, (16 * 8)(sp)
fsd f17, (17 * 8)(sp)
fsd f18, (18 * 8)(sp)
fsd f19, (19 * 8)(sp)
fsd f20, (20 * 8)(sp)
fsd f21, (21 * 8)(sp)
fsd f22, (22 * 8)(sp)
fsd f23, (23 * 8)(sp)
fsd f24, (24 * 8)(sp)
fsd f25, (25 * 8)(sp)
fsd f26, (26 * 8)(sp)
fsd f27, (27 * 8)(sp)
fsd f28, (28 * 8)(sp)
fsd f29, (29 * 8)(sp)
fsd f30, (30 * 8)(sp)
fsd f31, (31 * 8)(sp)
csrr t1, fcsr
sw t1, (32 * 8)(sp)
addi sp, sp, -32 * 8
sd x1, 0(sp)
ld t1, ({tcb_sa_off} + {sa_tmp_sp})(t0)
sd t1, (1 * 8)(sp) // x2 is sp
sd x3, (2 * 8)(sp)
sd x4, (3 * 8)(sp)
ld t1, ({tcb_sc_off} + {sc_saved_t0})(t0)
sd t1, (4 * 8)(sp) // x5 is t0
ld t1, ({tcb_sa_off} + {sa_tmp_t1})(t0)
sd t1, (5 * 8)(sp) // x6 is t1
ld t1, ({tcb_sa_off} + {sa_tmp_t2})(t0)
sd t1, (6 * 8)(sp) // x7 is t2
sd x8, (7 * 8)(sp)
sd x9, (8 * 8)(sp)
sd x10, (9 * 8)(sp)
sd x11, (10 * 8)(sp)
sd x12, (11 * 8)(sp)
sd x13, (12 * 8)(sp)
sd x14, (13 * 8)(sp)
sd x15, (14 * 8)(sp)
sd x16, (15 * 8)(sp)
sd x17, (16 * 8)(sp)
sd x18, (17 * 8)(sp)
sd x19, (18 * 8)(sp)
sd x20, (19 * 8)(sp)
sd x21, (20 * 8)(sp)
sd x22, (21 * 8)(sp)
sd x23, (22 * 8)(sp)
sd x24, (23 * 8)(sp)
sd x25, (24 * 8)(sp)
sd x26, (25 * 8)(sp)
sd x27, (26 * 8)(sp)
ld t1, ({tcb_sa_off} + {sa_tmp_t3})(t0)
sd t1, (27 * 8)(sp) // t3 is x28
ld t1, ({tcb_sa_off} + {sa_tmp_t4})(t0)
sd t1, (28 * 8)(sp) // t4 is x29
sd x30, (29 * 8)(sp)
sd x31, (30 * 8)(sp)
ld t1, ({tcb_sc_off} + {sc_saved_ip})(t0)
sd t1, (31 * 8)(sp)
// form ucontext
addi sp, sp, -64
sw t3, 60(sp)
mv t0, sp
jal {inner}
addi sp, sp, 64
addi t0, sp, 32 * 8
fld f0, (0 * 8)(t0)
fld f1, (1 * 8)(t0)
fld f2, (2 * 8)(t0)
fld f3, (3 * 8)(t0)
fld f4, (4 * 8)(t0)
fld f5, (5 * 8)(t0)
fld f6, (6 * 8)(t0)
fld f7, (7 * 8)(t0)
fld f8, (8 * 8)(t0)
fld f9, (9 * 8)(t0)
fld f10, (10 * 8)(t0)
fld f11, (11 * 8)(t0)
fld f12, (12 * 8)(t0)
fld f13, (13 * 8)(t0)
fld f14, (14 * 8)(t0)
fld f15, (15 * 8)(t0)
fld f16, (16 * 8)(t0)
fld f17, (17 * 8)(t0)
fld f18, (18 * 8)(t0)
fld f19, (19 * 8)(t0)
fld f20, (20 * 8)(t0)
fld f21, (21 * 8)(t0)
fld f22, (22 * 8)(t0)
fld f23, (23 * 8)(t0)
fld f24, (24 * 8)(t0)
fld f25, (25 * 8)(t0)
fld f26, (26 * 8)(t0)
fld f27, (27 * 8)(t0)
fld f28, (28 * 8)(t0)
fld f29, (29 * 8)(t0)
fld f30, (30 * 8)(t0)
fld f31, (31 * 8)(t0)
lw t1, (32 * 8)(t0)
csrw fcsr, t1
ld x1, 0(sp)
// skip sp
// skip gp
ld x4, (3 * 8)(sp)
ld x5, (4 * 8)(sp)
ld x6, (5 * 8)(sp)
ld x7, (6 * 8)(sp)
ld x8, (7 * 8)(sp)
ld x9, (8 * 8)(sp)
ld x10, (9 * 8)(sp)
ld x11, (10 * 8)(sp)
ld x12, (11 * 8)(sp)
ld x13, (12 * 8)(sp)
ld x14, (13 * 8)(sp)
ld x15, (14 * 8)(sp)
ld x16, (15 * 8)(sp)
ld x17, (16 * 8)(sp)
ld x18, (17 * 8)(sp)
ld x19, (18 * 8)(sp)
ld x20, (19 * 8)(sp)
ld x21, (20 * 8)(sp)
ld x22, (21 * 8)(sp)
ld x23, (22 * 8)(sp)
ld x24, (23 * 8)(sp)
ld x25, (24 * 8)(sp)
ld x26, (25 * 8)(sp)
ld x27, (26 * 8)(sp)
ld x28, (27 * 8)(sp)
ld x29, (28 * 8)(sp)
ld x30, (29 * 8)(sp)
ld x31, (30 * 8)(sp)
ld gp, (31 * 8)(sp) // new IP; this clobbers register x3/gp which is ABI reserved
.global __relibc_internal_sigentry_crit_first
__relibc_internal_sigentry_crit_first:
ld sp, (1 * 8)(sp)
.global __relibc_internal_sigentry_crit_second
__relibc_internal_sigentry_crit_second:
jr gp
7:
// A spurious signal occurred. Signals are still disabled here, but will need to be re-enabled.
// restore stack
ld sp, ({tcb_sa_off} + {sa_tmp_sp})(t0)
// move saved IP away from control, allowing arch_pre to save us if interrupted.
ld t1, ({tcb_sc_off} + {sc_saved_ip})(t0)
sd t1, ({tcb_sa_off} + {sa_tmp_ip})(t0)
// restore regs
ld t2, ({tcb_sa_off} + {sa_tmp_t2})(t0)
ld t3, ({tcb_sa_off} + {sa_tmp_t3})(t0)
ld t4, ({tcb_sa_off} + {sa_tmp_t4})(t0)
// move saved t0 away from control as well
mv t1, t0
ld t0, ({tcb_sc_off} + {sc_saved_t0})(t0)
// Re-enable signals. This code can be interrupted after this signal, so we need to define
// 'crit_third'.
ld gp, ({tcb_sc_off} + {sc_control})(t1)
andi gp, gp, ~1
sd gp, ({tcb_sc_off} + {sc_control})(t1)
.globl __relibc_internal_sigentry_crit_third
__relibc_internal_sigentry_crit_third:
ld gp, ({tcb_sa_off} + {sa_tmp_ip})(t1)
.globl __relibc_internal_sigentry_crit_fourth
__relibc_internal_sigentry_crit_fourth:
ld t1, ({tcb_sa_off} + {sa_tmp_t1})(t1)
.globl __relibc_internal_sigentry_crit_fifth
__relibc_internal_sigentry_crit_fifth:
jr gp
"] <= [
tcb_sc_off = const (offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control)),
sc_word = const offset_of!(Sigcontrol, word),
sc_saved_t0 = const offset_of!(Sigcontrol, saved_archdep_reg),
sc_saved_ip = const offset_of!(Sigcontrol, saved_ip),
sc_sender_infos = const offset_of!(Sigcontrol, sender_infos),
sc_control = const offset_of!(Sigcontrol, control_flags),
tcb_sa_off = const (offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, arch)),
sa_off_pctl = const offset_of!(SigArea, pctl),
sa_tmp_sp = const offset_of!(SigArea, tmp_sp),
sa_tmp_t1 = const offset_of!(SigArea, tmp_t1),
sa_tmp_t2 = const offset_of!(SigArea, tmp_t2),
sa_tmp_t3 = const offset_of!(SigArea, tmp_t3),
sa_tmp_t4 = const offset_of!(SigArea, tmp_t4),
sa_tmp_a0 = const offset_of!(SigArea, tmp_a0),
sa_tmp_a1 = const offset_of!(SigArea, tmp_a1),
sa_tmp_a2 = const offset_of!(SigArea, tmp_a2),
sa_tmp_a7 = const offset_of!(SigArea, tmp_a7),
sa_tmp_ip = const offset_of!(SigArea, tmp_ip),
sa_tmp_id_inf = const offset_of!(SigArea, tmp_id_inf),
sa_tmp_rt_inf = const offset_of!(SigArea, tmp_rt_inf),
sa_altstack_top = const offset_of!(SigArea, altstack_top),
sa_altstack_bottom = const offset_of!(SigArea, altstack_bottom),
pctl_off_actions = const offset_of!(SigProcControl, actions),
inner = sym inner_c,
pctl_off_pending = const offset_of!(SigProcControl, pending),
pctl_off_sender_infos = const offset_of!(SigProcControl, sender_infos),
SYS_SIGDEQUEUE = const syscall::SYS_SIGDEQUEUE,
]);
asmfunction!(__relibc_internal_rlct_clone_ret: ["
ld t0, 0(sp)
ld a0, 8(sp)
ld a1, 16(sp)
ld a2, 24(sp)
ld a3, 32(sp)
ld a4, 40(sp)
ld a5, 48(sp)
addi sp, sp, 56
jalr t0
ret
"] <= []);
pub fn current_sp() -> usize {
let sp: usize;
unsafe {
core::arch::asm!(
"mv {}, sp",
out(reg) sp,
options(nomem));
}
sp
}
pub unsafe fn manually_enter_trampoline() {
let ctl = &Tcb::current().unwrap().os_specific.control;
ctl.control_flags.store(
ctl.control_flags.load(Ordering::Relaxed) | syscall::flag::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
ctl.saved_archdep_reg.set(0);
let ip_location = &ctl.saved_ip as *const _ as usize;
core::arch::asm!("
jal 2f
j 3f
2:
sd ra, 0(t0)
la t0, __relibc_internal_sigentry
jalr x0, t0
3:
", inout("t0") ip_location => _, out("ra") _);
}
extern "C" {
fn __relibc_internal_sigentry_crit_first();
fn __relibc_internal_sigentry_crit_second();
fn __relibc_internal_sigentry_crit_third();
fn __relibc_internal_sigentry_crit_fourth();
fn __relibc_internal_sigentry_crit_fifth();
}
pub unsafe fn arch_pre(stack: &mut SigStack, area: &mut SigArea) -> PosixStackt {
// It is impossible to update SP and PC atomically. Instead, we abuse the fact that
// signals are disabled in the prologue of the signal trampoline, which allows us to emulate
// atomicity inside the critical section, consisting of one instruction at 'crit_first', and
// one at 'crit_second', see asm.
if stack.regs.pc == __relibc_internal_sigentry_crit_first as u64 {
// Reexecute 'ld sp, (1 * 8)(sp)'
let stack_ptr = stack.regs.int_regs[1] as *const u64; // x2
stack.regs.int_regs[1] = stack_ptr.add(1).read();
// and 'jr gp' steps.
stack.regs.pc = stack.regs.int_regs[2];
} else if stack.regs.pc == __relibc_internal_sigentry_crit_second as u64
|| stack.regs.pc == __relibc_internal_sigentry_crit_fifth as u64
{
// just reexecute the jump
stack.regs.pc = stack.regs.int_regs[2];
} else if stack.regs.pc == __relibc_internal_sigentry_crit_third as u64 {
// ld gp, ({tcb_sa_off} + {sa_tmp_ip})(t1)
stack.regs.int_regs[2] = area.tmp_ip;
// ld t1, ({tcb_sa_off} + {sa_tmp_t1})(t1)
stack.regs.int_regs[5] = area.tmp_t1;
// j gp
stack.regs.pc = stack.regs.int_regs[2];
} else if stack.regs.pc == __relibc_internal_sigentry_crit_fourth as u64 {
// ld t1, ({tcb_sa_off} + {sa_tmp_t1})(t1)
stack.regs.int_regs[5] = area.tmp_t1;
// jr gp
stack.regs.pc = stack.regs.int_regs[2];
}
get_sigaltstack(area, stack.regs.int_regs[1] as usize).into()
}
use core::{
mem::offset_of,
ptr::NonNull,
sync::atomic::{AtomicU8, Ordering},
};
use syscall::{
data::{SigProcControl, Sigcontrol},
error::*,
RtSigInfo,
};
use crate::{
proc::{fork_inner, FdGuard},
signal::{get_sigaltstack, inner_c, PosixStackt, RtSigarea, SigStack, PROC_CONTROL_STRUCT},
Tcb,
};
// Setup a stack starting from the very end of the address space, and then growing downwards.
pub(crate) const STACK_TOP: usize = 1 << 47;
pub(crate) const STACK_SIZE: usize = 1024 * 1024;
#[derive(Debug, Default)]
#[repr(C)]
pub struct SigArea {
pub tmp_rip: usize,
pub tmp_rsp: usize,
pub tmp_rax: usize,
pub tmp_rdx: usize,
pub tmp_rdi: usize,
pub tmp_rsi: usize,
pub tmp_rt_inf: RtSigInfo,
pub tmp_id_inf: u64,
pub altstack_top: usize,
pub altstack_bottom: usize,
pub disable_signals_depth: u64,
pub last_sig_was_restart: bool,
pub last_sigstack: Option<NonNull<SigStack>>,
}
#[repr(C, align(16))]
#[derive(Debug, Default)]
pub struct ArchIntRegs {
pub ymm_upper: [u128; 16],
pub fxsave: [u128; 29],
pub r15: usize, // fxsave "available" +0
pub r14: usize, // available +8
pub r13: usize, // available +16
pub r12: usize, // available +24
pub rbp: usize, // available +32
pub rbx: usize, // available +40
pub r11: usize, // outside fxsave, and so on
pub r10: usize,
pub r9: usize,
pub r8: usize,
pub rax: usize,
pub rcx: usize,
pub rdx: usize,
pub rsi: usize,
pub rdi: usize,
pub rflags: usize,
pub rip: usize,
pub rsp: usize,
}
/// Deactive TLS, used before exec() on Redox to not trick target executable into thinking TLS
/// is already initialized as if it was a thread.
pub unsafe fn deactivate_tcb(open_via_dup: usize) -> Result<()> {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(syscall::dup(open_via_dup, b"regs/env")?);
env.fsbase = 0;
env.gsbase = 0;
let _ = syscall::write(*file, &mut env)?;
Ok(())
}
pub fn copy_env_regs(cur_pid_fd: usize, new_pid_fd: usize) -> Result<()> {
// Copy environment registers.
{
let cur_env_regs_fd = FdGuard::new(syscall::dup(cur_pid_fd, b"regs/env")?);
let new_env_regs_fd = FdGuard::new(syscall::dup(new_pid_fd, b"regs/env")?);
let mut env_regs = syscall::EnvRegisters::default();
let _ = syscall::read(*cur_env_regs_fd, &mut env_regs)?;
let _ = syscall::write(*new_env_regs_fd, &env_regs)?;
}
Ok(())
}
unsafe extern "sysv64" fn fork_impl(initial_rsp: *mut usize) -> usize {
Error::mux(fork_inner(initial_rsp))
}
unsafe extern "sysv64" fn child_hook(cur_filetable_fd: usize, new_pid_fd: usize) {
let _ = syscall::close(cur_filetable_fd);
crate::child_hook_common(FdGuard::new(new_pid_fd));
}
asmfunction!(__relibc_internal_fork_wrapper -> usize: ["
push rbp
mov rbp, rsp
push rbx
push rbp
push r12
push r13
push r14
push r15
sub rsp, 32
stmxcsr [rsp+16]
fnstcw [rsp+24]
mov rdi, rsp
call {fork_impl}
add rsp, 80
pop rbp
ret
"] <= [fork_impl = sym fork_impl]);
asmfunction!(__relibc_internal_fork_ret: ["
mov rdi, [rsp]
mov rsi, [rsp + 8]
call {child_hook}
ldmxcsr [rsp + 16]
fldcw [rsp + 24]
xor rax, rax
add rsp, 32
pop r15
pop r14
pop r13
pop r12
pop rbp
pop rbx
pop rbp
ret
"] <= [child_hook = sym child_hook]);
asmfunction!(__relibc_internal_rlct_clone_ret: ["
# Load registers
pop rax
pop rdi
pop rsi
pop rdx
pop rcx
pop r8
pop r9
mov DWORD PTR [rsp - 8], 0x00001F80
ldmxcsr [rsp - 8]
mov WORD PTR [rsp - 8], 0x037F
fldcw [rsp - 8]
# Call entry point
call rax
ret
"] <= []);
asmfunction!(__relibc_internal_sigentry: ["
// Save some registers
mov fs:[{tcb_sa_off} + {sa_tmp_rsp}], rsp
mov fs:[{tcb_sa_off} + {sa_tmp_rax}], rax
mov fs:[{tcb_sa_off} + {sa_tmp_rdx}], rdx
mov fs:[{tcb_sa_off} + {sa_tmp_rdi}], rdi
mov fs:[{tcb_sa_off} + {sa_tmp_rsi}], rsi
// First, select signal, always pick first available bit
1:
// Read standard signal word - first targeting this thread
mov rax, fs:[{tcb_sc_off} + {sc_word}]
mov rdx, rax
shr rdx, 32
and eax, edx
bsf eax, eax
jnz 2f
// If no unblocked thread signal was found, check for process.
// This is competitive; we need to atomically check if *we* cleared the process-wide pending
// bit, otherwise restart.
mov eax, [rip + {pctl} + {pctl_off_pending}]
and eax, edx
bsf eax, eax
jz 8f
lea rdi, [rip + {pctl} + {pctl_off_sender_infos}]
mov rdi, [rdi + rax * 8]
lock btr [rip + {pctl} + {pctl_off_pending}], eax
mov fs:[{tcb_sa_off} + {sa_tmp_id_inf}], rdi
jc 9f
8:
// Read second signal word - both process and thread simultaneously.
// This must be done since POSIX requires low realtime signals to be picked first.
mov edx, fs:[{tcb_sc_off} + {sc_word} + 8]
mov eax, [rip + {pctl} + {pctl_off_pending} + 4]
or eax, edx
and eax, fs:[{tcb_sc_off} + {sc_word} + 12]
bsf eax, eax
jz 7f
bt edx, eax // check if signal was sent to thread specifically
jc 2f // if so, continue as usual
// otherwise, try (competitively) dequeueing realtime signal
mov esi, eax
mov eax, {SYS_SIGDEQUEUE}
mov rdi, fs:[0]
add rdi, {tcb_sa_off} + {sa_tmp_rt_inf} // out pointer of dequeued realtime sig
syscall
test eax, eax
jnz 1b // assumes error can only be EAGAIN
lea eax, [esi + 32]
jmp 9f
2:
mov edx, eax
shr edx, 5
mov rdi, fs:[{tcb_sc_off} + {sc_sender_infos} + eax * 8]
lock btr fs:[{tcb_sc_off} + {sc_word} + edx * 4], eax
mov fs:[{tcb_sa_off} + {sa_tmp_id_inf}], rdi
add eax, 64 // indicate signal was targeted at thread
9:
sub rsp, {REDZONE_SIZE}
and rsp, -{STACK_ALIGN}
// By now we have selected a signal, stored in eax (6-bit). We now need to choose whether or
// not to switch to the alternate signal stack. If SA_ONSTACK is clear for this signal, then
// skip the sigaltstack logic.
lea rdx, [rip + {pctl} + {pctl_off_actions}]
mov ecx, eax
and ecx, 63
// LEA doesn't support 16x, so just do two x8s.
lea rdx, [rdx + 8 * rcx]
lea rdx, [rdx + 8 * rcx]
bt qword ptr [rdx], {SA_ONSTACK_BIT}
jnc 4f
// Otherwise, the altstack is already active. The sigaltstack being disabled, is equivalent
// to setting 'top' to usize::MAX and 'bottom' to 0.
// If current RSP is above altstack region, switch to altstack
mov rdx, fs:[{tcb_sa_off} + {sa_altstack_top}]
cmp rsp, rdx
cmova rsp, rdx
// If current RSP is below altstack region, also switch to altstack
cmp rsp, fs:[{tcb_sa_off} + {sa_altstack_bottom}]
cmovbe rsp, rdx
.p2align 4
4:
// Now that we have a stack, we can finally start initializing the signal stack!
push fs:[{tcb_sa_off} + {sa_tmp_rsp}]
push fs:[{tcb_sc_off} + {sc_saved_rip}]
push fs:[{tcb_sc_off} + {sc_saved_rflags}]
push fs:[{tcb_sa_off} + {sa_tmp_rdi}]
push fs:[{tcb_sa_off} + {sa_tmp_rsi}]
push fs:[{tcb_sa_off} + {sa_tmp_rdx}]
push rcx
push fs:[{tcb_sa_off} + {sa_tmp_rax}]
push r8
push r9
push r10
push r11
push rbx
push rbp
push r12
push r13
push r14
push r15
sub rsp, (29 + 16) * 16 // fxsave region minus available bytes
fxsave64 [rsp + 16 * 16]
// TODO: self-modifying?
cmp byte ptr [rip + {supports_avx}], 0
je 5f
// Prefer vextractf128 over vextracti128 since the former only requires AVX version 1.
vextractf128 [rsp + 15 * 16], ymm0, 1
vextractf128 [rsp + 14 * 16], ymm1, 1
vextractf128 [rsp + 13 * 16], ymm2, 1
vextractf128 [rsp + 12 * 16], ymm3, 1
vextractf128 [rsp + 11 * 16], ymm4, 1
vextractf128 [rsp + 10 * 16], ymm5, 1
vextractf128 [rsp + 9 * 16], ymm6, 1
vextractf128 [rsp + 8 * 16], ymm7, 1
vextractf128 [rsp + 7 * 16], ymm8, 1
vextractf128 [rsp + 6 * 16], ymm9, 1
vextractf128 [rsp + 5 * 16], ymm10, 1
vextractf128 [rsp + 4 * 16], ymm11, 1
vextractf128 [rsp + 3 * 16], ymm12, 1
vextractf128 [rsp + 2 * 16], ymm13, 1
vextractf128 [rsp + 16], ymm14, 1
vextractf128 [rsp], ymm15, 1
5:
mov [rsp - 4], eax
sub rsp, 64 // alloc space for ucontext fields
mov rdi, rsp
call {inner}
add rsp, 64
fxrstor64 [rsp + 16 * 16]
cmp byte ptr [rip + {supports_avx}], 0
je 6f
vinsertf128 ymm0, ymm0, [rsp + 15 * 16], 1
vinsertf128 ymm1, ymm1, [rsp + 14 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 13 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 12 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 11 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 10 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 9 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 8 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 7 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 6 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 5 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 4 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 3 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 2 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 16], 1
vinsertf128 ymm2, ymm2, [rsp], 1
6:
add rsp, (29 + 16) * 16
pop r15
pop r14
pop r13
pop r12
pop rbp
pop rbx
pop r11
pop r10
pop r9
pop r8
pop rax
pop rcx
pop rdx
pop rsi
pop rdi
popfq
pop qword ptr fs:[{tcb_sa_off} + {sa_tmp_rip}]
// x86 lacks atomic instructions for setting both the stack and instruction pointer
// simultaneously, except the slow microcoded IRETQ instruction. Thus, we let the arch_pre
// function emulate atomicity between the pop rsp and indirect jump.
.globl __relibc_internal_sigentry_crit_first
__relibc_internal_sigentry_crit_first:
pop rsp
.globl __relibc_internal_sigentry_crit_second
__relibc_internal_sigentry_crit_second:
jmp qword ptr fs:[{tcb_sa_off} + {sa_tmp_rip}]
7:
// A spurious signal occurred. Signals are still disabled here, but will need to be re-enabled.
// restore flags
mov rax, fs:[0] // load FS base
// TODO: Use lahf/sahf rather than pushfq/popfq?
lea rsp, [rax + {tcb_sc_off} + {sc_saved_rflags}]
popfq
// restore stack
mov rsp, fs:[{tcb_sa_off} + {sa_tmp_rsp}]
// move saved RIP away from control, allowing arch_pre to save us if interrupted.
mov rax, fs:[{tcb_sc_off} + {sc_saved_rip}]
mov fs:[{tcb_sa_off} + {sa_tmp_rip}], rax
// restore regs
mov rax, fs:[{tcb_sa_off} + {sa_tmp_rax}]
mov rdx, fs:[{tcb_sa_off} + {sa_tmp_rdx}]
// Re-enable signals. This code can be interrupted after this signal, so we need to define
// 'crit_third'.
and qword ptr fs:[{tcb_sc_off} + {sc_control}], ~1
.globl __relibc_internal_sigentry_crit_third
__relibc_internal_sigentry_crit_third:
jmp qword ptr fs:[{tcb_sa_off} + {sa_tmp_rip}]
"] <= [
inner = sym inner_c,
sa_tmp_rip = const offset_of!(SigArea, tmp_rip),
sa_tmp_rsp = const offset_of!(SigArea, tmp_rsp),
sa_tmp_rax = const offset_of!(SigArea, tmp_rax),
sa_tmp_rdx = const offset_of!(SigArea, tmp_rdx),
sa_tmp_rdi = const offset_of!(SigArea, tmp_rdi),
sa_tmp_rsi = const offset_of!(SigArea, tmp_rsi),
sa_tmp_rt_inf = const offset_of!(SigArea, tmp_rt_inf),
sa_tmp_id_inf = const offset_of!(SigArea, tmp_id_inf),
sa_altstack_top = const offset_of!(SigArea, altstack_top),
sa_altstack_bottom = const offset_of!(SigArea, altstack_bottom),
sc_saved_rflags = const offset_of!(Sigcontrol, saved_archdep_reg),
sc_saved_rip = const offset_of!(Sigcontrol, saved_ip),
sc_word = const offset_of!(Sigcontrol, word),
sc_sender_infos = const offset_of!(Sigcontrol, sender_infos),
sc_control = const offset_of!(Sigcontrol, control_flags),
tcb_sa_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, arch),
tcb_sc_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control),
pctl_off_actions = const offset_of!(SigProcControl, actions),
pctl_off_pending = const offset_of!(SigProcControl, pending),
pctl_off_sender_infos = const offset_of!(SigProcControl, sender_infos),
pctl = sym PROC_CONTROL_STRUCT,
supports_avx = sym SUPPORTS_AVX,
REDZONE_SIZE = const 128,
STACK_ALIGN = const 16,
SA_ONSTACK_BIT = const 58, // (1 << 58) >> 32 = 0x0400_0000
SYS_SIGDEQUEUE = const syscall::SYS_SIGDEQUEUE,
]);
extern "C" {
fn __relibc_internal_sigentry_crit_first();
fn __relibc_internal_sigentry_crit_second();
fn __relibc_internal_sigentry_crit_third();
}
/// Fixes some edge cases, and calculates the value for uc_stack.
pub unsafe fn arch_pre(stack: &mut SigStack, area: &mut SigArea) -> PosixStackt {
// It is impossible to update RSP and RIP atomically on x86_64, without using IRETQ, which is
// almost as slow as calling a SIGRETURN syscall would be. Instead, we abuse the fact that
// signals are disabled in the prologue of the signal trampoline, which allows us to emulate
// atomicity inside the critical section, consisting of one instruction at 'crit_first', one at
// 'crit_second', and one at 'crit_third', see asm.
if stack.regs.rip == __relibc_internal_sigentry_crit_first as usize {
// Reexecute pop rsp and jump steps. This case needs to be different from the one below,
// since rsp has not been overwritten with the previous context's stack, just yet. At this
// point, we know [rsp+0] contains the saved RSP, and [rsp-8] contains the saved RIP.
let stack_ptr = stack.regs.rsp as *const usize;
stack.regs.rsp = stack_ptr.read();
stack.regs.rip = stack_ptr.sub(1).read();
} else if stack.regs.rip == __relibc_internal_sigentry_crit_second as usize
|| stack.regs.rip == __relibc_internal_sigentry_crit_third as usize
{
// Almost finished, just reexecute the jump before tmp_rip is overwritten by this
// deeper-level signal.
stack.regs.rip = area.tmp_rip;
}
get_sigaltstack(area, stack.regs.rsp).into()
}
pub(crate) static SUPPORTS_AVX: AtomicU8 = AtomicU8::new(0);
// __relibc will be prepended to the name, so no_mangle is fine
#[no_mangle]
pub unsafe fn manually_enter_trampoline() {
let c = &Tcb::current().unwrap().os_specific.control;
c.control_flags.store(
c.control_flags.load(Ordering::Relaxed) | syscall::flag::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
c.saved_archdep_reg.set(0); // TODO: Just reset DF on x86?
core::arch::asm!("
lea rax, [rip + 2f]
mov fs:[{tcb_sc_off} + {sc_saved_rip}], rax
jmp __relibc_internal_sigentry
2:
",
out("rax") _,
tcb_sc_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control),
sc_saved_rip = const offset_of!(Sigcontrol, saved_ip),
);
}
/// Get current stack pointer, weak granularity guarantees.
pub fn current_sp() -> usize {
let sp: usize;
unsafe {
core::arch::asm!("mov {}, rsp", out(reg) sp);
}
sp
}
#![no_std]
#![feature(
asm_const,
array_chunks,
int_roundings,
let_chains,
slice_ptr_get,
sync_unsafe_cell
)]
#![forbid(unreachable_patterns)]
use core::cell::{SyncUnsafeCell, UnsafeCell};
use generic_rt::{ExpectTlsFree, GenericTcb};
use syscall::{Sigcontrol, O_CLOEXEC};
use self::proc::FdGuard;
extern crate alloc;
#[macro_export]
macro_rules! asmfunction(
($name:ident $(-> $ret:ty)? : [$($asmstmt:expr),*$(,)?] <= [$($decl:ident = $(sym $symname:ident)?$(const $constval:expr)?),*$(,)?]$(,)? ) => {
::core::arch::global_asm!(concat!("
.p2align 4
.section .text.", stringify!($name), ", \"ax\", @progbits
.globl ", stringify!($name), "
.type ", stringify!($name), ", @function
", stringify!($name), ":
", $($asmstmt, "\n",)* "
.size ", stringify!($name), ", . - ", stringify!($name), "
"), $($decl = $(sym $symname)?$(const $constval)?),*);
extern "C" {
pub fn $name() $(-> $ret)?;
}
}
);
pub mod arch;
pub mod proc;
// TODO: Replace auxvs with a non-stack-based interface, but keep getauxval for compatibility
#[path = "../../src/platform/auxv_defs.rs"]
pub mod auxv_defs;
pub mod signal;
pub mod sync;
pub mod sys;
pub mod thread;
#[derive(Debug, Default)]
pub struct RtTcb {
pub control: Sigcontrol,
pub arch: UnsafeCell<crate::arch::SigArea>,
pub thr_fd: UnsafeCell<Option<FdGuard>>,
}
impl RtTcb {
pub fn current() -> &'static Self {
unsafe { &Tcb::current().unwrap().os_specific }
}
pub fn thread_fd(&self) -> &FdGuard {
unsafe {
if (&*self.thr_fd.get()).is_none() {
self.thr_fd.get().write(Some(FdGuard::new(
syscall::open("/scheme/thisproc/current/open_via_dup", O_CLOEXEC).unwrap(),
)));
}
(&*self.thr_fd.get()).as_ref().unwrap()
}
}
}
pub type Tcb = GenericTcb<RtTcb>;
/// OS and architecture specific code to activate TLS - Redox aarch64
#[cfg(target_arch = "aarch64")]
pub unsafe fn tcb_activate(_tcb: &RtTcb, tls_end: usize, tls_len: usize) {
// Uses ABI page
let abi_ptr = tls_end - tls_len - 16;
core::ptr::write(abi_ptr as *mut usize, tls_end);
core::arch::asm!(
"msr tpidr_el0, {}",
in(reg) abi_ptr,
);
}
/// OS and architecture specific code to activate TLS - Redox x86
#[cfg(target_arch = "x86")]
pub unsafe fn tcb_activate(tcb: &RtTcb, tls_end: usize, _tls_len: usize) {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(
syscall::dup(**tcb.thread_fd(), b"regs/env")
.expect_notls("failed to open handle for process registers"),
);
let _ = syscall::read(*file, &mut env).expect_notls("failed to read gsbase");
env.gsbase = tls_end as u32;
let _ = syscall::write(*file, &env).expect_notls("failed to write gsbase");
}
/// OS and architecture specific code to activate TLS - Redox x86_64
#[cfg(target_arch = "x86_64")]
pub unsafe fn tcb_activate(tcb: &RtTcb, tls_end_and_tcb_start: usize, _tls_len: usize) {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(
syscall::dup(**tcb.thread_fd(), b"regs/env")
.expect_notls("failed to open handle for process registers"),
);
let _ = syscall::read(*file, &mut env).expect_notls("failed to read fsbase");
env.fsbase = tls_end_and_tcb_start as u64;
let _ = syscall::write(*file, &env).expect_notls("failed to write fsbase");
}
/// OS and architecture specific code to activate TLS - Redox riscv64
#[cfg(target_arch = "riscv64")]
pub unsafe fn tcb_activate(_tcb: &RtTcb, tls_end: usize, tls_len: usize) {
// tp points to static tls block
// FIXME limited to a single initial master
let tls_start = tls_end - tls_len;
let abi_ptr = tls_start - 8;
core::ptr::write(abi_ptr as *mut usize, tls_end);
core::arch::asm!(
"mv tp, {}",
in(reg) tls_start
);
}
/// Initialize redox-rt in situations where relibc is not used
pub unsafe fn initialize_freestanding() {
// TODO: This code is a hack! Integrate the ld_so TCB code into generic-rt, and then use that
// (this function will need pointers to the ELF structs normally passed in auxvs), so the TCB
// is initialized properly.
// TODO: TLS
let page = {
&mut *(syscall::fmap(
!0,
&syscall::Map {
offset: 0,
size: syscall::PAGE_SIZE,
flags: syscall::MapFlags::PROT_READ
| syscall::MapFlags::PROT_WRITE
| syscall::MapFlags::MAP_PRIVATE,
address: 0,
},
)
.unwrap() as *mut Tcb)
};
page.tcb_ptr = page;
page.tcb_len = syscall::PAGE_SIZE;
page.tls_end = (page as *mut Tcb).cast();
// Make sure to use ptr::write to prevent dropping the existing FdGuard
core::ptr::write(page.os_specific.thr_fd.get(), None);
#[cfg(not(any(target_arch = "aarch64", target_arch = "riscv64")))]
unsafe {
let tcb_addr = page as *mut Tcb as usize;
tcb_activate(&page.os_specific, tcb_addr, 0)
}
#[cfg(target_arch = "aarch64")]
unsafe {
let abi_ptr = core::ptr::addr_of_mut!(page.tcb_ptr);
core::arch::asm!("msr tpidr_el0, {}", in(reg) abi_ptr);
}
#[cfg(target_arch = "riscv64")]
unsafe {
let abi_ptr = core::ptr::addr_of_mut!(page.tcb_ptr) as usize;
core::arch::asm!("mv tp, {}", in(reg) (abi_ptr + 8));
}
initialize();
}
pub unsafe fn initialize() {
THIS_PID
.get()
.write(Some(syscall::getpid().unwrap().try_into().unwrap()).unwrap());
}
static THIS_PID: SyncUnsafeCell<u32> = SyncUnsafeCell::new(0);
unsafe fn child_hook_common(new_pid_fd: FdGuard) {
// TODO: Currently pidfd == threadfd, but this will not be the case later.
RtTcb::current().thr_fd.get().write(Some(new_pid_fd));
THIS_PID
.get()
.write(Some(syscall::getpid().unwrap().try_into().unwrap()).unwrap());
}
use core::{fmt::Debug, mem::size_of};
use crate::{arch::*, auxv_defs::*};
use alloc::{boxed::Box, collections::BTreeMap, vec};
//TODO: allow use of either 32-bit or 64-bit programs
#[cfg(target_pointer_width = "32")]
use goblin::elf32::{
header::Header,
program_header::program_header32::{ProgramHeader, PF_W, PF_X, PT_INTERP, PT_LOAD},
};
#[cfg(target_pointer_width = "64")]
use goblin::elf64::{
header::Header,
program_header::program_header64::{ProgramHeader, PF_W, PF_X, PT_INTERP, PT_LOAD},
};
use syscall::{
error::*,
flag::{MapFlags, SEEK_SET},
GrantDesc, GrantFlags, Map, SetSighandlerData, MAP_FIXED_NOREPLACE, MAP_SHARED, O_CLOEXEC,
PAGE_SIZE, PROT_EXEC, PROT_READ, PROT_WRITE,
};
pub enum FexecResult {
Normal {
addrspace_handle: FdGuard,
},
Interp {
path: Box<[u8]>,
image_file: FdGuard,
open_via_dup: FdGuard,
interp_override: InterpOverride,
},
}
pub struct InterpOverride {
phs: Box<[u8]>,
at_entry: usize,
at_phnum: usize,
at_phent: usize,
name: Box<[u8]>,
tree: BTreeMap<usize, usize>,
}
pub struct ExtraInfo<'a> {
pub cwd: Option<&'a [u8]>,
// Default scheme for the process
pub default_scheme: Option<&'a [u8]>,
// POSIX states that while sigactions are reset, ignored sigactions will remain ignored.
pub sigignmask: u64,
// POSIX also states that the sigprocmask must be preserved across execs.
pub sigprocmask: u64,
/// File mode creation mask (POSIX)
pub umask: u32,
}
pub fn fexec_impl<A, E>(
image_file: FdGuard,
open_via_dup: FdGuard,
memory_scheme_fd: &FdGuard,
path: &[u8],
args: A,
envs: E,
total_args_envs_size: usize,
extrainfo: &ExtraInfo,
mut interp_override: Option<InterpOverride>,
) -> Result<FexecResult>
where
A: IntoIterator,
E: IntoIterator,
A::Item: AsRef<[u8]>,
E::Item: AsRef<[u8]>,
{
// Here, we do the minimum part of loading an application, which is what the kernel used to do.
// We load the executable into memory (albeit at different offsets in this executable), fix
// some misalignments, and then execute the SYS_EXEC syscall to replace the program memory
// entirely.
let mut header_bytes = [0_u8; size_of::<Header>()];
read_all(*image_file, Some(0), &mut header_bytes)?;
let header = Header::from_bytes(&header_bytes);
let grants_fd = {
let current_addrspace_fd = FdGuard::new(syscall::dup(*open_via_dup, b"addrspace")?);
FdGuard::new(syscall::dup(*current_addrspace_fd, b"empty")?)
};
// Never allow more than 1 MiB of program headers.
const MAX_PH_SIZE: usize = 1024 * 1024;
let phentsize = u64::from(header.e_phentsize) as usize;
let phnum = u64::from(header.e_phnum) as usize;
let pheaders_size = phentsize
.saturating_mul(phnum)
.saturating_add(size_of::<Header>());
if pheaders_size > MAX_PH_SIZE {
return Err(Error::new(E2BIG));
}
let mut phs_raw = vec![0_u8; pheaders_size];
phs_raw[..size_of::<Header>()].copy_from_slice(&header_bytes);
let phs = &mut phs_raw[size_of::<Header>()..];
// TODO: Remove clone, but this would require more as_refs and as_muts
let mut tree = interp_override.as_mut().map_or_else(
|| core::iter::once((0, PAGE_SIZE)).collect::<BTreeMap<_, _>>(),
|o| core::mem::take(&mut o.tree),
);
read_all(*image_file as usize, Some(header.e_phoff as u64), phs)
.map_err(|_| Error::new(EIO))?;
for ph_idx in 0..phnum {
let ph_bytes = &phs[ph_idx * phentsize..(ph_idx + 1) * phentsize];
let segment: &ProgramHeader =
plain::from_bytes(ph_bytes).map_err(|_| Error::new(EINVAL))?;
let mut flags = syscall::PROT_READ;
// W ^ X. If it is executable, do not allow it to be writable, even if requested
if segment.p_flags & PF_X == PF_X {
flags |= syscall::PROT_EXEC;
} else if segment.p_flags & PF_W == PF_W {
flags |= syscall::PROT_WRITE;
}
match segment.p_type {
// PT_INTERP must come before any PT_LOAD, so we don't have to iterate twice.
PT_INTERP => {
let mut interp = vec![0_u8; segment.p_filesz as usize];
read_all(
*image_file as usize,
Some(segment.p_offset as u64),
&mut interp,
)?;
return Ok(FexecResult::Interp {
path: interp.into_boxed_slice(),
image_file,
open_via_dup,
interp_override: InterpOverride {
at_entry: header.e_entry as usize,
at_phnum: phnum,
at_phent: phentsize,
phs: phs_raw.into_boxed_slice(),
name: path.into(),
tree,
},
});
}
PT_LOAD => {
let voff = segment.p_vaddr as usize % PAGE_SIZE;
let vaddr = segment.p_vaddr as usize - voff;
let filesz = segment.p_filesz as usize;
let total_page_count = (segment.p_memsz as usize + voff).div_ceil(PAGE_SIZE);
// The case where segments overlap so that they share one page, is not handled.
// TODO: Should it be?
if segment.p_filesz > segment.p_memsz {
return Err(Error::new(ENOEXEC));
}
allocate_remote(
&grants_fd,
memory_scheme_fd,
vaddr,
total_page_count * PAGE_SIZE,
flags,
)?;
syscall::lseek(*image_file, segment.p_offset as isize, SEEK_SET)
.map_err(|_| Error::new(EIO))?;
// If unaligned, read the head page separately.
let (first_aligned_page, remaining_filesz) = if voff > 0 {
let bytes_to_next_page = PAGE_SIZE - voff;
let (_guard, dst_page) =
unsafe { MmapGuard::map_mut_anywhere(*grants_fd, vaddr, PAGE_SIZE)? };
let length = core::cmp::min(bytes_to_next_page, filesz);
read_all(*image_file, None, &mut dst_page[voff..][..length])?;
(vaddr + PAGE_SIZE, filesz - length)
} else {
(vaddr, filesz)
};
let remaining_page_count = remaining_filesz.div_floor(PAGE_SIZE);
let tail_bytes = remaining_filesz % PAGE_SIZE;
// TODO: Unless the calling process if *very* memory-constrained, the max amount of
// pages per iteration has no limit other than the time it takes to setup page
// tables.
//
// TODO: Reserve PAGES_PER_ITER "scratch pages" of virtual memory for that type of
// situation?
const PAGES_PER_ITER: usize = 64;
// TODO: Before this loop, attempt to mmap with MAP_PRIVATE directly from the image
// file.
for page_idx in (0..remaining_page_count).step_by(PAGES_PER_ITER) {
// Use commented out lines to trigger kernel bug (FIXME).
//let pages_in_this_group = core::cmp::min(PAGES_PER_ITER, file_page_count - page_idx * PAGES_PER_ITER);
let pages_in_this_group =
core::cmp::min(PAGES_PER_ITER, remaining_page_count - page_idx);
if pages_in_this_group == 0 {
break;
}
// TODO: MAP_FIXED to optimize away funmap?
let (_guard, dst_memory) = unsafe {
MmapGuard::map_mut_anywhere(
*grants_fd,
first_aligned_page + page_idx * PAGE_SIZE, // offset
pages_in_this_group * PAGE_SIZE, // size
)?
};
// TODO: Are &mut [u8] and &mut [[u8; PAGE_SIZE]] interchangeable (if the
// lengths are aligned, obviously)?
read_all(*image_file, None, dst_memory)?;
}
if tail_bytes > 0 {
let (_guard, dst_page) = unsafe {
MmapGuard::map_mut_anywhere(
*grants_fd,
first_aligned_page + remaining_page_count * PAGE_SIZE,
PAGE_SIZE,
)?
};
read_all(*image_file, None, &mut dst_page[..tail_bytes])?;
}
// file_page_count..file_page_count + zero_page_count are already zero-initialized
// by the kernel.
if !tree
.range(..=vaddr)
.next_back()
.filter(|(start, size)| **start + **size > vaddr)
.is_some()
{
tree.insert(vaddr, total_page_count * PAGE_SIZE);
}
}
_ => continue,
}
}
allocate_remote(
&grants_fd,
memory_scheme_fd,
STACK_TOP - STACK_SIZE,
STACK_SIZE,
MapFlags::PROT_READ | MapFlags::PROT_WRITE,
)?;
tree.insert(STACK_TOP - STACK_SIZE, STACK_SIZE);
let mut sp = STACK_TOP;
let mut stack_page = Option::<MmapGuard>::None;
let mut push = |word: usize| {
let old_page_no = sp / PAGE_SIZE;
sp -= size_of::<usize>();
let new_page_no = sp / PAGE_SIZE;
let new_page_off = sp % PAGE_SIZE;
let page = if let Some(ref mut page) = stack_page
&& old_page_no == new_page_no
{
page
} else if let Some(ref mut stack_page) = stack_page {
stack_page.remap(new_page_no * PAGE_SIZE, PROT_WRITE)?;
stack_page
} else {
let new = MmapGuard::map(
*grants_fd,
&Map {
offset: new_page_no * PAGE_SIZE,
size: PAGE_SIZE,
flags: PROT_WRITE,
address: 0, // let kernel decide
},
)?;
stack_page.insert(new)
};
unsafe {
page.as_mut_ptr_slice()
.as_mut_ptr()
.add(new_page_off)
.cast::<usize>()
.write(word);
}
Ok(())
};
let pheaders_to_convey = if let Some(ref r#override) = interp_override {
&*r#override.phs
} else {
&*phs_raw
};
let pheaders_size_aligned = pheaders_to_convey.len().next_multiple_of(PAGE_SIZE);
let pheaders = find_free_target_addr(&tree, pheaders_size_aligned).ok_or(Error::new(ENOMEM))?;
tree.insert(pheaders, pheaders_size_aligned);
allocate_remote(
&grants_fd,
memory_scheme_fd,
pheaders,
pheaders_size_aligned,
MapFlags::PROT_READ | MapFlags::PROT_WRITE,
)?;
unsafe {
let (_guard, memory) =
MmapGuard::map_mut_anywhere(*grants_fd, pheaders, pheaders_size_aligned)?;
memory[..pheaders_to_convey.len()].copy_from_slice(pheaders_to_convey);
}
mprotect_remote(
&grants_fd,
pheaders,
pheaders_size_aligned,
MapFlags::PROT_READ,
)?;
push(0)?;
push(AT_NULL)?;
push(header.e_entry as usize)?;
if let Some(ref r#override) = interp_override {
push(AT_BASE)?;
push(r#override.at_entry)?;
}
push(AT_ENTRY)?;
push(pheaders + size_of::<Header>())?;
push(AT_PHDR)?;
push(
interp_override
.as_ref()
.map_or(header.e_phnum as usize, |o| o.at_phnum),
)?;
push(AT_PHNUM)?;
push(
interp_override
.as_ref()
.map_or(header.e_phentsize as usize, |o| o.at_phent),
)?;
push(AT_PHENT)?;
let total_args_envs_auxvpointee_size = total_args_envs_size
+ extrainfo.cwd.map_or(0, |s| s.len() + 1)
+ extrainfo.default_scheme.map_or(0, |s| s.len() + 1);
let args_envs_size_aligned = total_args_envs_auxvpointee_size.next_multiple_of(PAGE_SIZE);
let target_args_env_address =
find_free_target_addr(&tree, args_envs_size_aligned).ok_or(Error::new(ENOMEM))?;
allocate_remote(
&grants_fd,
memory_scheme_fd,
target_args_env_address,
args_envs_size_aligned,
MapFlags::PROT_READ | MapFlags::PROT_WRITE,
)?;
tree.insert(target_args_env_address, args_envs_size_aligned);
let mut offset = 0;
let mut argc = 0;
{
let mut append = |source_slice: &[u8]| {
// TODO
let address = target_args_env_address + offset;
if !source_slice.is_empty() {
let containing_page = address.div_floor(PAGE_SIZE) * PAGE_SIZE;
let displacement = address - containing_page;
let size = source_slice.len() + displacement;
let aligned_size = size.next_multiple_of(PAGE_SIZE);
let (_guard, memory) = unsafe {
MmapGuard::map_mut_anywhere(*grants_fd, containing_page, aligned_size)?
};
memory[displacement..][..source_slice.len()].copy_from_slice(source_slice);
}
offset += source_slice.len() + 1;
Ok(address)
};
if let Some(cwd) = extrainfo.cwd {
push(append(cwd)?)?;
push(AT_REDOX_INITIAL_CWD_PTR)?;
push(cwd.len())?;
push(AT_REDOX_INITIAL_CWD_LEN)?;
}
if let Some(default_scheme) = extrainfo.default_scheme {
push(append(default_scheme)?)?;
push(AT_REDOX_INITIAL_DEFAULT_SCHEME_PTR)?;
push(default_scheme.len())?;
push(AT_REDOX_INITIAL_DEFAULT_SCHEME_LEN)?;
}
#[cfg(target_pointer_width = "32")]
{
push((extrainfo.sigignmask >> 32) as usize)?;
push(AT_REDOX_INHERITED_SIGIGNMASK_HI)?;
}
push(extrainfo.sigignmask as usize)?;
push(AT_REDOX_INHERITED_SIGIGNMASK)?;
#[cfg(target_pointer_width = "32")]
{
push((extrainfo.sigprocmask >> 32) as usize)?;
push(AT_REDOX_INHERITED_SIGPROCMASK_HI)?;
}
push(extrainfo.sigprocmask as usize)?;
push(AT_REDOX_INHERITED_SIGPROCMASK)?;
push(extrainfo.umask as usize)?;
push(AT_REDOX_UMASK);
push(0)?;
for env in envs {
push(append(env.as_ref())?)?;
}
push(0)?;
for arg in args {
push(append(arg.as_ref())?)?;
argc += 1;
}
}
push(argc)?;
if let Ok(sighandler_fd) = syscall::dup(*open_via_dup, b"sighandler").map(FdGuard::new) {
let _ = syscall::write(
*sighandler_fd,
&SetSighandlerData {
user_handler: 0,
excp_handler: 0,
thread_control_addr: 0,
proc_control_addr: 0,
},
);
}
unsafe {
deactivate_tcb(*open_via_dup)?;
}
// TODO: Restore old name if exec failed?
if let Ok(name_fd) = syscall::dup(*open_via_dup, b"name").map(FdGuard::new) {
let _ = syscall::write(*name_fd, interp_override.as_ref().map_or(path, |o| &o.name));
}
if interp_override.is_some() {
let mmap_min_fd = FdGuard::new(syscall::dup(*grants_fd, b"mmap-min-addr")?);
let last_addr = tree.iter().rev().nth(1).map_or(0, |(off, len)| *off + *len);
let aligned_last_addr = last_addr.next_multiple_of(PAGE_SIZE);
let _ = syscall::write(*mmap_min_fd, &usize::to_ne_bytes(aligned_last_addr));
}
let addrspace_selection_fd = FdGuard::new(syscall::dup(*open_via_dup, b"current-addrspace")?);
let _ = syscall::write(
*addrspace_selection_fd,
&create_set_addr_space_buf(*grants_fd, header.e_entry as usize, sp),
);
Ok(FexecResult::Normal {
addrspace_handle: addrspace_selection_fd,
})
}
fn write_usizes<const N: usize>(fd: &FdGuard, usizes: [usize; N]) -> Result<()> {
let _ = syscall::write(**fd, unsafe { plain::as_bytes(&usizes) });
Ok(())
}
fn allocate_remote(
addrspace_fd: &FdGuard,
memory_scheme_fd: &FdGuard,
dst_addr: usize,
len: usize,
flags: MapFlags,
) -> Result<()> {
mmap_remote(addrspace_fd, memory_scheme_fd, 0, dst_addr, len, flags)
}
pub fn mmap_remote(
addrspace_fd: &FdGuard,
fd: &FdGuard,
offset: usize,
dst_addr: usize,
len: usize,
flags: MapFlags,
) -> Result<()> {
write_usizes(
addrspace_fd,
[
// op
syscall::flag::ADDRSPACE_OP_MMAP,
// fd
**fd,
// "offset"
offset,
// address
dst_addr,
// size
len,
// flags
(flags | MapFlags::MAP_FIXED_NOREPLACE).bits(),
],
)
}
pub fn mprotect_remote(
addrspace_fd: &FdGuard,
addr: usize,
len: usize,
flags: MapFlags,
) -> Result<()> {
write_usizes(
addrspace_fd,
[
// op
syscall::flag::ADDRSPACE_OP_MPROTECT,
// address
addr,
// size
len,
// flags
flags.bits(),
],
)
}
pub fn munmap_remote(addrspace_fd: &FdGuard, addr: usize, len: usize) -> Result<()> {
write_usizes(
addrspace_fd,
[
// op
syscall::flag::ADDRSPACE_OP_MUNMAP,
// address
addr,
// size
len,
],
)
}
pub fn munmap_transfer(
src: &FdGuard,
dst: &FdGuard,
src_addr: usize,
dst_addr: usize,
len: usize,
flags: MapFlags,
) -> Result<()> {
write_usizes(
dst,
[
// op
syscall::flag::ADDRSPACE_OP_TRANSFER,
// fd
**src,
// "offset" (source address)
src_addr,
// address
dst_addr,
// size
len,
// flags
(flags | MapFlags::MAP_FIXED_NOREPLACE).bits(),
],
)
}
fn read_all(fd: usize, offset: Option<u64>, buf: &mut [u8]) -> Result<()> {
if let Some(offset) = offset {
syscall::lseek(fd, offset as isize, SEEK_SET)?;
}
let mut total_bytes_read = 0;
while total_bytes_read < buf.len() {
total_bytes_read += match syscall::read(fd, &mut buf[total_bytes_read..])? {
0 => return Err(Error::new(ENOEXEC)),
bytes_read => bytes_read,
}
}
Ok(())
}
// TODO: With the introduction of remote mmaps, remove this and let the kernel handle address
// allocation.
fn find_free_target_addr(tree: &BTreeMap<usize, usize>, size: usize) -> Option<usize> {
let mut iterator = tree.iter().peekable();
// Ignore the space between zero and the first region, to avoid null pointers.
while let Some((cur_address, entry_size)) = iterator.next() {
let end = *cur_address + entry_size;
if let Some((next_address, _)) = iterator.peek() {
if **next_address - end > size {
return Some(end);
}
}
// No need to check last entry, since the stack will always be put at the highest
// possible address.
}
None
}
pub struct MmapGuard {
fd: usize,
base: usize,
size: usize,
}
impl MmapGuard {
pub fn map(fd: usize, map: &Map) -> Result<Self> {
Ok(Self {
fd,
size: map.size,
base: unsafe { syscall::fmap(fd, map)? },
})
}
pub fn remap(&mut self, offset: usize, mut flags: MapFlags) -> Result<()> {
flags.remove(MapFlags::MAP_FIXED_NOREPLACE);
flags.insert(MapFlags::MAP_FIXED);
let _new_base = unsafe {
syscall::fmap(
self.fd,
&Map {
offset,
size: self.size,
flags,
address: self.base,
},
)?
};
Ok(())
}
pub unsafe fn map_mut_anywhere<'a>(
fd: usize,
offset: usize,
size: usize,
) -> Result<(Self, &'a mut [u8])> {
let mut this = Self::map(
fd,
&Map {
size,
offset,
address: 0,
flags: PROT_WRITE,
},
)?;
let slice = &mut *this.as_mut_ptr_slice();
Ok((this, slice))
}
pub fn addr(&self) -> usize {
self.base
}
pub fn len(&self) -> usize {
self.size
}
pub fn as_mut_ptr_slice(&mut self) -> *mut [u8] {
core::ptr::slice_from_raw_parts_mut(self.base as *mut u8, self.size)
}
pub fn take(mut self) {
self.size = 0;
}
}
impl Drop for MmapGuard {
fn drop(&mut self) {
if self.size != 0 {
let _ = unsafe { syscall::funmap(self.base, self.size) };
}
}
}
pub struct FdGuard {
fd: usize,
taken: bool,
}
impl FdGuard {
pub fn new(fd: usize) -> Self {
Self { fd, taken: false }
}
pub fn take(&mut self) -> usize {
self.taken = true;
self.fd
}
}
impl core::ops::Deref for FdGuard {
type Target = usize;
fn deref(&self) -> &Self::Target {
&self.fd
}
}
impl Drop for FdGuard {
fn drop(&mut self) {
if !self.taken {
let _ = syscall::close(self.fd);
}
}
}
impl Debug for FdGuard {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "[fd {}]", self.fd)
}
}
pub fn create_set_addr_space_buf(
space: usize,
ip: usize,
sp: usize,
) -> [u8; size_of::<usize>() * 3] {
let mut buf = [0_u8; 3 * size_of::<usize>()];
let mut chunks = buf.array_chunks_mut::<{ size_of::<usize>() }>();
*chunks.next().unwrap() = usize::to_ne_bytes(space);
*chunks.next().unwrap() = usize::to_ne_bytes(sp);
*chunks.next().unwrap() = usize::to_ne_bytes(ip);
buf
}
/// Spawns a new context which will not share the same address space as the current one. File
/// descriptors from other schemes are reobtained with `dup`, and grants referencing such file
/// descriptors are reobtained through `fmap`. Other mappings are kept but duplicated using CoW.
pub fn fork_impl() -> Result<usize> {
let old_mask = crate::signal::get_sigmask()?;
let pid = unsafe { Error::demux(__relibc_internal_fork_wrapper())? };
if pid == 0 {
crate::signal::set_sigmask(Some(old_mask), None)?;
}
Ok(pid)
}
pub fn fork_inner(initial_rsp: *mut usize) -> Result<usize> {
let (cur_filetable_fd, new_pid_fd, new_pid);
{
let cur_pid_fd = FdGuard::new(syscall::open(
"/scheme/thisproc/current/open_via_dup",
O_CLOEXEC,
)?);
(new_pid_fd, new_pid) = new_child_process()?;
copy_str(*cur_pid_fd, *new_pid_fd, "name")?;
// Copy existing files into new file table, but do not reuse the same file table (i.e. new
// parent FDs will not show up for the child).
{
cur_filetable_fd = FdGuard::new(syscall::dup(*cur_pid_fd, b"filetable")?);
// This must be done before the address space is copied.
unsafe {
initial_rsp.write(*cur_filetable_fd);
initial_rsp.add(1).write(*new_pid_fd);
}
}
// CoW-duplicate address space.
{
let new_addr_space_sel_fd =
FdGuard::new(syscall::dup(*new_pid_fd, b"current-addrspace")?);
let cur_addr_space_fd = FdGuard::new(syscall::dup(*cur_pid_fd, b"addrspace")?);
let new_addr_space_fd = FdGuard::new(syscall::dup(*cur_addr_space_fd, b"exclusive")?);
let mut grant_desc_buf = [GrantDesc::default(); 16];
loop {
let bytes_read = {
let buf = unsafe {
core::slice::from_raw_parts_mut(
grant_desc_buf.as_mut_ptr().cast(),
grant_desc_buf.len() * size_of::<GrantDesc>(),
)
};
syscall::read(*cur_addr_space_fd, buf)?
};
if bytes_read == 0 {
break;
}
let grants = &grant_desc_buf[..bytes_read / size_of::<GrantDesc>()];
for grant in grants {
if !grant.flags.contains(GrantFlags::GRANT_SCHEME)
|| !grant.flags.contains(GrantFlags::GRANT_SHARED)
{
continue;
}
let buf;
// TODO: write! using some #![no_std] Cursor type (tracking the length)?
#[cfg(target_pointer_width = "64")]
{
//buf = *b"grant-fd-AAAABBBBCCCCDDDD";
//write!(&mut buf, "grant-fd-{:>016x}", grant.base).unwrap();
buf = alloc::format!("grant-fd-{:>016x}", grant.base).into_bytes();
}
#[cfg(target_pointer_width = "32")]
{
//buf = *b"grant-fd-AAAABBBB";
//write!(&mut buf[..], "grant-fd-{:>08x}", grant.base).unwrap();
buf = alloc::format!("grant-fd-{:>08x}", grant.base).into_bytes();
}
let grant_fd = FdGuard::new(syscall::dup(*cur_addr_space_fd, &buf)?);
let mut flags = MAP_SHARED | MAP_FIXED_NOREPLACE;
flags.set(PROT_READ, grant.flags.contains(GrantFlags::GRANT_READ));
flags.set(PROT_WRITE, grant.flags.contains(GrantFlags::GRANT_WRITE));
flags.set(PROT_EXEC, grant.flags.contains(GrantFlags::GRANT_EXEC));
mmap_remote(
&new_addr_space_fd,
&grant_fd,
grant.offset as usize,
grant.base,
grant.size,
flags,
)?;
}
}
let buf = create_set_addr_space_buf(
*new_addr_space_fd,
__relibc_internal_fork_ret as usize,
initial_rsp as usize,
);
let _ = syscall::write(*new_addr_space_sel_fd, &buf)?;
}
{
// Reuse the same sigaltstack and signal entry (all memory will be re-mapped CoW later).
//
// Do this after the address space is cloned, since the kernel will get a shared
// reference to the TCB and whatever pages stores the signal proc control struct.
{
let new_sighandler_fd = FdGuard::new(syscall::dup(*new_pid_fd, b"sighandler")?);
let _ = syscall::write(
*new_sighandler_fd,
&crate::signal::current_setsighandler_struct(),
)?;
}
}
copy_env_regs(*cur_pid_fd, *new_pid_fd)?;
}
// Copy the file table. We do this last to ensure that all previously used file descriptors are
// closed. The only exception -- the filetable selection fd and the current filetable fd --
// will be closed by the child process.
{
// TODO: Use file descriptor forwarding or something similar to avoid copying the file
// table in the kernel.
let new_filetable_fd = FdGuard::new(syscall::dup(*cur_filetable_fd, b"copy")?);
let new_filetable_sel_fd = FdGuard::new(syscall::dup(*new_pid_fd, b"current-filetable")?);
let _ = syscall::write(
*new_filetable_sel_fd,
&usize::to_ne_bytes(*new_filetable_fd),
)?;
}
let start_fd = FdGuard::new(syscall::dup(*new_pid_fd, b"start")?);
let _ = syscall::write(*start_fd, &[0])?;
Ok(new_pid)
}
pub fn new_child_process() -> Result<(FdGuard, usize)> {
// Create a new context (fields such as uid/gid will be inherited from the current context).
let fd = FdGuard::new(syscall::open(
"/scheme/thisproc/new/open_via_dup",
O_CLOEXEC,
)?);
// Extract pid.
let mut buffer = [0_u8; 64];
let len = syscall::fpath(*fd, &mut buffer)?;
let buffer = buffer.get(..len).ok_or(Error::new(ENAMETOOLONG))?;
let colon_idx = buffer
.iter()
.position(|c| *c == b':')
.ok_or(Error::new(EINVAL))?;
let slash_idx = buffer
.iter()
.skip(colon_idx)
.position(|c| *c == b'/')
.ok_or(Error::new(EINVAL))?
+ colon_idx;
let pid_bytes = buffer
.get(colon_idx + 1..slash_idx)
.ok_or(Error::new(EINVAL))?;
let pid_str = core::str::from_utf8(pid_bytes).map_err(|_| Error::new(EINVAL))?;
let pid = pid_str.parse::<usize>().map_err(|_| Error::new(EINVAL))?;
Ok((fd, pid))
}
pub fn copy_str(cur_pid_fd: usize, new_pid_fd: usize, key: &str) -> Result<()> {
let cur_name_fd = FdGuard::new(syscall::dup(cur_pid_fd, key.as_bytes())?);
let new_name_fd = FdGuard::new(syscall::dup(new_pid_fd, key.as_bytes())?);
// TODO: Max path size?
let mut buf = [0_u8; 256];
let len = syscall::read(*cur_name_fd, &mut buf)?;
let buf = buf.get(..len).ok_or(Error::new(ENAMETOOLONG))?;
syscall::write(*new_name_fd, &buf)?;
Ok(())
}
use core::{ffi::c_int, mem::MaybeUninit, ptr::NonNull, sync::atomic::Ordering};
use syscall::{
data::AtomicU64, Error, RawAction, Result, RtSigInfo, SenderInfo, SetSighandlerData,
SigProcControl, Sigcontrol, SigcontrolFlags, TimeSpec, EAGAIN, EINTR, EINVAL, ENOMEM, EPERM,
SIGCHLD, SIGKILL, SIGSTOP, SIGTSTP, SIGTTIN, SIGTTOU, SIGURG, SIGWINCH,
};
use crate::{arch::*, proc::FdGuard, sync::Mutex, RtTcb, Tcb};
#[cfg(target_arch = "x86_64")]
static CPUID_EAX1_ECX: core::sync::atomic::AtomicU32 = core::sync::atomic::AtomicU32::new(0);
pub fn sighandler_function() -> usize {
// TODO: HWCAP?
__relibc_internal_sigentry as usize
}
/// ucontext_t representation
#[repr(C)]
pub struct SigStack {
#[cfg(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "riscv64"
))]
_pad: [usize; 1], // pad from 7*8 to 64
#[cfg(target_arch = "x86")]
_pad: [usize; 3], // pad from 9*4 to 12*4
pub link: *mut SigStack,
pub old_stack: PosixStackt,
pub old_mask: u64,
pub(crate) sival: usize,
pub(crate) sig_code: u32,
pub(crate) sig_num: u32,
// x86_64: 864 bytes
// i686: 512 bytes
// aarch64: 272 bytes (SIMD TODO)
// riscv64: 520 bytes (vector extensions TODO)
pub regs: ArchIntRegs,
}
#[repr(C)]
pub struct PosixStackt {
pub sp: *mut (),
pub flags: i32,
pub size: usize,
}
pub const SS_ONSTACK: usize = 1;
pub const SS_DISABLE: usize = 2;
impl From<Sigaltstack> for PosixStackt {
fn from(value: Sigaltstack) -> Self {
match value {
Sigaltstack::Disabled => PosixStackt {
sp: core::ptr::null_mut(),
size: 0,
flags: SS_DISABLE.try_into().unwrap(),
},
Sigaltstack::Enabled {
onstack,
base,
size,
} => PosixStackt {
sp: base.cast(),
size,
flags: if onstack {
SS_ONSTACK.try_into().unwrap()
} else {
0
},
},
}
}
}
#[repr(C)]
// TODO: This struct is for practical reasons locked to Linux's ABI, but avoid redefining
// it here. Alternatively, check at compile time that the structs are equivalent.
pub struct SiginfoAbi {
pub si_signo: i32,
pub si_errno: i32,
pub si_code: i32,
pub si_pid: i32, // pid_t
pub si_uid: i32, // uid_t
pub si_addr: *mut (), // *mut c_void
pub si_status: i32,
pub si_value: usize, // sigval
}
#[inline(always)]
unsafe fn inner(stack: &mut SigStack) {
let os = &Tcb::current().unwrap().os_specific;
let stack_ptr = NonNull::from(&mut *stack);
stack.link = core::mem::replace(&mut (*os.arch.get()).last_sigstack, Some(stack_ptr))
.map_or_else(core::ptr::null_mut, |x| x.as_ptr());
let signals_were_disabled = (*os.arch.get()).disable_signals_depth > 0;
let targeted_thread_not_process = stack.sig_num >= 64;
stack.sig_num %= 64;
// asm counts from 0
stack.sig_num += 1;
let (sender_pid, sender_uid) = {
let area = &mut *os.arch.get();
// Undefined if the signal was not realtime
stack.sival = area.tmp_rt_inf.arg;
stack.old_stack = arch_pre(stack, area);
if (stack.sig_num - 1) / 32 == 1 && !targeted_thread_not_process {
stack.sig_code = area.tmp_rt_inf.code as u32;
(area.tmp_rt_inf.pid, area.tmp_rt_inf.uid)
} else {
stack.sig_code = 0; // TODO: SI_USER constant?
// TODO: Handle SIGCHLD. Maybe that should always be queued though?
let inf = SenderInfo::from_raw(area.tmp_id_inf);
(inf.pid, inf.ruid)
}
};
let sigaction = {
let guard = SIGACTIONS_LOCK.lock();
let action = convert_old(&PROC_CONTROL_STRUCT.actions[stack.sig_num as usize - 1]);
if action.flags.contains(SigactionFlags::RESETHAND) {
drop(guard);
sigaction(
stack.sig_num as u8,
Some(&Sigaction {
kind: SigactionKind::Default,
mask: 0,
flags: SigactionFlags::empty(),
}),
None,
);
}
action
};
let shall_restart = sigaction.flags.contains(SigactionFlags::RESTART);
let handler = match sigaction.kind {
SigactionKind::Ignore => {
panic!("ctl {:x?} signal {}", os.control, stack.sig_num)
}
SigactionKind::Default => {
syscall::exit(stack.sig_num as usize);
unreachable!();
}
SigactionKind::Handled { handler } => handler,
};
// Set sigmask to sa_mask and unmark the signal as pending.
let prev_sigallow = get_allowset_raw(&os.control.word);
let mut sigallow_inside = !sigaction.mask & prev_sigallow;
if !sigaction.flags.contains(SigactionFlags::NODEFER) {
sigallow_inside &= !sig_bit(stack.sig_num);
}
let _pending_when_sa_mask = set_allowset_raw(&os.control.word, prev_sigallow, sigallow_inside);
// TODO: If sa_mask caused signals to be unblocked, deliver one or all of those first?
// Re-enable signals again.
let control_flags = &os.control.control_flags;
control_flags.store(
control_flags.load(Ordering::Relaxed) & !SigcontrolFlags::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
core::sync::atomic::compiler_fence(Ordering::Acquire);
stack.old_mask = prev_sigallow;
// Call handler, either sa_handler or sa_siginfo depending on flag.
if sigaction.flags.contains(SigactionFlags::SIGINFO)
&& let Some(sigaction) = handler.sigaction
{
let info = SiginfoAbi {
si_signo: stack.sig_num as c_int,
si_addr: core::ptr::null_mut(),
si_code: stack.sig_code as i32,
si_errno: 0,
si_pid: sender_pid as i32,
si_status: 0,
si_uid: sender_uid as i32,
si_value: stack.sival,
};
sigaction(
stack.sig_num as c_int,
core::ptr::addr_of!(info).cast(),
stack as *mut SigStack as *mut (),
);
} else if let Some(handler) = handler.handler {
handler(stack.sig_num as c_int);
}
// Disable signals while we modify the sigmask again
control_flags.store(
control_flags.load(Ordering::Relaxed) | SigcontrolFlags::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
core::sync::atomic::compiler_fence(Ordering::Acquire);
// Update allowset again.
let new_mask = stack.old_mask;
let old_mask = get_allowset_raw(&os.control.word);
let _pending_when_restored_mask = set_allowset_raw(&os.control.word, old_mask, new_mask);
// TODO: If resetting the sigmask caused signals to be unblocked, then should they be delivered
// here? And would it be possible to tail-call-optimize that?
(*os.arch.get()).last_sig_was_restart = shall_restart;
// TODO: Support setting uc_link to jump back to a different context?
(*os.arch.get()).last_sigstack = NonNull::new(stack.link);
// TODO: Support restoring uc_stack?
// And re-enable them again
if !signals_were_disabled {
core::sync::atomic::compiler_fence(Ordering::Release);
control_flags.store(
control_flags.load(Ordering::Relaxed) & !SigcontrolFlags::INHIBIT_DELIVERY.bits(),
Ordering::Relaxed,
);
}
}
#[cfg(not(target_arch = "x86"))]
pub(crate) unsafe extern "C" fn inner_c(stack: usize) {
inner(&mut *(stack as *mut SigStack))
}
#[cfg(target_arch = "x86")]
pub(crate) unsafe extern "fastcall" fn inner_fastcall(stack: usize) {
inner(&mut *(stack as *mut SigStack))
}
pub fn get_sigmask() -> Result<u64> {
let mut mask = 0;
modify_sigmask(Some(&mut mask), Option::<fn(u64) -> u64>::None)?;
Ok(mask)
}
pub fn set_sigmask(new: Option<u64>, old: Option<&mut u64>) -> Result<()> {
modify_sigmask(old, new.map(move |newmask| move |_| newmask))
}
pub fn or_sigmask(new: Option<u64>, old: Option<&mut u64>) -> Result<()> {
// Parsing nightmare... :)
modify_sigmask(
old,
new.map(move |newmask| move |oldmask| oldmask | newmask),
)
}
pub fn andn_sigmask(new: Option<u64>, old: Option<&mut u64>) -> Result<()> {
modify_sigmask(
old,
new.map(move |newmask| move |oldmask| oldmask & !newmask),
)
}
fn get_allowset_raw(words: &[AtomicU64; 2]) -> u64 {
(words[0].load(Ordering::Relaxed) >> 32) | ((words[1].load(Ordering::Relaxed) >> 32) << 32)
}
/// Sets mask from old to new, returning what was pending at the time.
fn set_allowset_raw(words: &[AtomicU64; 2], old: u64, new: u64) -> u64 {
// This assumes *only this thread* can change the allowset. If this rule is broken, the use of
// fetch_add will corrupt the words entirely. fetch_add is very efficient on x86, being
// generated as LOCK XADD which is the fastest RMW instruction AFAIK.
let prev_w0 = words[0].fetch_add(
((new & 0xffff_ffff) << 32).wrapping_sub((old & 0xffff_ffff) << 32),
Ordering::Relaxed,
) & 0xffff_ffff;
let prev_w1 = words[1].fetch_add(
((new >> 32) << 32).wrapping_sub((old >> 32) << 32),
Ordering::Relaxed,
) & 0xffff_ffff;
prev_w0 | (prev_w1 << 32)
}
fn modify_sigmask(old: Option<&mut u64>, op: Option<impl FnOnce(u64) -> u64>) -> Result<()> {
let _guard = tmp_disable_signals();
let ctl = current_sigctl();
let prev = get_allowset_raw(&ctl.word);
if let Some(old) = old {
*old = !prev;
}
let Some(op) = op else {
return Ok(());
};
let next = !op(!prev);
let pending = set_allowset_raw(&ctl.word, prev, next);
// POSIX requires that at least one pending unblocked signal be delivered before
// pthread_sigmask returns, if there is one.
if pending != 0 {
unsafe {
manually_enter_trampoline();
}
}
Ok(())
}
#[derive(Clone, Copy, Default)]
pub enum SigactionKind {
#[default]
Default,
Ignore,
Handled {
handler: SignalHandler,
},
}
#[derive(Clone, Copy, Default)]
pub struct Sigaction {
pub kind: SigactionKind,
pub mask: u64,
pub flags: SigactionFlags,
}
impl Sigaction {
fn ip(&self) -> usize {
unsafe {
match self.kind {
SigactionKind::Handled { handler } => {
if self.flags.contains(SigactionFlags::SIGINFO) {
handler.sigaction.map_or(0, |a| a as usize)
} else {
handler.handler.map_or(0, |a| a as usize)
}
}
_ => 0,
}
}
}
}
const MASK_DONTCARE: u64 = !0;
fn convert_old(action: &RawAction) -> Sigaction {
let old_first = action.first.load(Ordering::Relaxed);
let old_mask = action.user_data.load(Ordering::Relaxed);
let handler = (old_first & !(u64::from(STORED_FLAGS) << 32)) as usize;
let flags = SigactionFlags::from_bits_retain(((old_first >> 32) as u32) & STORED_FLAGS);
let kind = if handler == default_handler as usize {
SigactionKind::Default
} else if flags.contains(SigactionFlags::IGNORED) {
SigactionKind::Ignore
} else {
SigactionKind::Handled {
handler: unsafe { core::mem::transmute(handler as usize) },
}
};
Sigaction {
mask: old_mask,
flags,
kind,
}
}
pub fn sigaction(signal: u8, new: Option<&Sigaction>, old: Option<&mut Sigaction>) -> Result<()> {
if matches!(usize::from(signal), 0 | 32 | SIGKILL | SIGSTOP | 65..) {
return Err(Error::new(EINVAL));
}
let _sigguard = tmp_disable_signals();
let ctl = current_sigctl();
let _guard = SIGACTIONS_LOCK.lock();
let action = &PROC_CONTROL_STRUCT.actions[usize::from(signal) - 1];
if let Some(old) = old {
*old = convert_old(action);
}
let Some(new) = new else {
return Ok(());
};
let explicit_handler = new.ip();
let (mask, flags, handler) = match (usize::from(signal), new.kind) {
(_, SigactionKind::Ignore) | (SIGURG | SIGWINCH, SigactionKind::Default) => {
// TODO: POSIX specifies that pending signals shall be discarded if set to SIG_IGN by
// sigaction.
// TODO: handle tmp_disable_signals
(
MASK_DONTCARE,
SigactionFlags::IGNORED,
if matches!(new.kind, SigactionKind::Default) {
default_handler as usize
} else {
0
},
)
}
// TODO: Handle pending signals before these flags are set.
(SIGTSTP | SIGTTOU | SIGTTIN, SigactionKind::Default) => (
MASK_DONTCARE,
SigactionFlags::SIG_SPECIFIC,
default_handler as usize,
),
(SIGCHLD, SigactionKind::Default) => {
let nocldstop_bit = new.flags & SigactionFlags::SIG_SPECIFIC;
(
MASK_DONTCARE,
SigactionFlags::IGNORED | nocldstop_bit,
default_handler as usize,
)
}
(_, SigactionKind::Default) => (new.mask, new.flags, default_handler as usize),
(_, SigactionKind::Handled { .. }) => (new.mask, new.flags, explicit_handler),
};
let new_first = (handler as u64) | (u64::from(flags.bits() & STORED_FLAGS) << 32);
action.first.store(new_first, Ordering::Relaxed);
action.user_data.store(mask, Ordering::Relaxed);
Ok(())
}
fn current_sigctl() -> &'static Sigcontrol {
&unsafe { Tcb::current() }.unwrap().os_specific.control
}
pub struct TmpDisableSignalsGuard {
_inner: (),
}
pub fn tmp_disable_signals() -> TmpDisableSignalsGuard {
unsafe {
let ctl = &current_sigctl().control_flags;
ctl.store(
ctl.load(Ordering::Relaxed) | syscall::flag::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
core::sync::atomic::compiler_fence(Ordering::Acquire);
// TODO: fence?
(*Tcb::current().unwrap().os_specific.arch.get()).disable_signals_depth += 1;
}
TmpDisableSignalsGuard { _inner: () }
}
impl Drop for TmpDisableSignalsGuard {
fn drop(&mut self) {
unsafe {
let depth =
&mut (*Tcb::current().unwrap().os_specific.arch.get()).disable_signals_depth;
*depth -= 1;
if *depth == 0 {
let ctl = &current_sigctl().control_flags;
ctl.store(
ctl.load(Ordering::Relaxed) & !syscall::flag::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
core::sync::atomic::compiler_fence(Ordering::Acquire);
}
}
}
}
bitflags::bitflags! {
// Some flags are ignored by the rt, but they match relibc's 1:1 to simplify conversion.
#[derive(Clone, Copy, Default)]
pub struct SigactionFlags: u32 {
const NOCLDWAIT = 2;
const RESTORER = 4;
const SIGINFO = 0x0200_0000;
const ONSTACK = 0x0400_0000;
const RESTART = 0x0800_0000;
const NODEFER = 0x1000_0000;
const RESETHAND = 0x2000_0000;
const SIG_SPECIFIC = 0x4000_0000;
const IGNORED = 0x8000_0000;
}
}
const STORED_FLAGS: u32 = 0xfe00_0000;
fn default_handler(sig: c_int) {
syscall::exit(sig as usize);
}
#[derive(Clone, Copy)]
pub union SignalHandler {
pub handler: Option<extern "C" fn(c_int)>,
pub sigaction: Option<unsafe extern "C" fn(c_int, *const (), *mut ())>,
}
static SIGACTIONS_LOCK: Mutex<()> = Mutex::new(());
pub(crate) static PROC_CONTROL_STRUCT: SigProcControl = SigProcControl {
pending: AtomicU64::new(0),
actions: [const {
RawAction {
first: AtomicU64::new(0),
user_data: AtomicU64::new(0),
}
}; 64],
sender_infos: [const { AtomicU64::new(0) }; 32],
};
fn combine_allowset([lo, hi]: [u64; 2]) -> u64 {
(lo >> 32) | ((hi >> 32) << 32)
}
const fn sig_bit(sig: u32) -> u64 {
//assert_ne!(sig, 32);
//assert_ne!(sig, 0);
1 << (sig - 1)
}
pub fn setup_sighandler(tcb: &RtTcb) {
{
let _guard = SIGACTIONS_LOCK.lock();
for (sig_idx, action) in PROC_CONTROL_STRUCT.actions.iter().enumerate() {
let sig = sig_idx + 1;
let bits = if matches!(sig, SIGTSTP | SIGTTIN | SIGTTOU) {
SigactionFlags::SIG_SPECIFIC
} else if matches!(sig, SIGCHLD | SIGURG | SIGWINCH) {
SigactionFlags::IGNORED
} else {
SigactionFlags::empty()
};
action.first.store(
(u64::from(bits.bits()) << 32) | default_handler as u64,
Ordering::Relaxed,
);
}
}
let arch = unsafe { &mut *tcb.arch.get() };
{
// The asm decides whether to use the altstack, based on whether the saved stack pointer
// was already on that stack. Thus, setting the altstack to the entire address space, is
// equivalent to not using any altstack at all (the default).
arch.altstack_top = usize::MAX;
arch.altstack_bottom = 0;
// TODO
#[cfg(any(target_arch = "x86", target_arch = "aarch64", target_arch = "riscv64"))]
{
arch.pctl = core::ptr::addr_of!(PROC_CONTROL_STRUCT) as usize;
}
}
#[cfg(target_arch = "x86_64")]
{
let cpuid_eax1_ecx = unsafe { core::arch::x86_64::__cpuid(1) }.ecx;
CPUID_EAX1_ECX.store(cpuid_eax1_ecx, core::sync::atomic::Ordering::Relaxed);
SUPPORTS_AVX.store(u8::from(cpuid_eax1_ecx & 1 << 28 != 0), Ordering::Relaxed);
}
let data = current_setsighandler_struct();
let fd = FdGuard::new(
syscall::dup(**tcb.thread_fd(), b"sighandler").expect("failed to open sighandler fd"),
);
let _ = syscall::write(*fd, &data).expect("failed to write to sighandler fd");
// TODO: Inherited set of ignored signals
set_sigmask(Some(0), None);
}
pub type RtSigarea = RtTcb; // TODO
pub fn current_setsighandler_struct() -> SetSighandlerData {
SetSighandlerData {
user_handler: sighandler_function(),
excp_handler: 0, // TODO
thread_control_addr: core::ptr::addr_of!(
unsafe { Tcb::current() }.unwrap().os_specific.control
) as usize,
proc_control_addr: &PROC_CONTROL_STRUCT as *const SigProcControl as usize,
}
}
#[derive(Clone, Copy, Default, PartialEq)]
pub enum Sigaltstack {
#[default]
Disabled,
Enabled {
onstack: bool,
base: *mut (),
size: usize,
},
}
pub(crate) fn get_sigaltstack(tcb: &SigArea, sp: usize) -> Sigaltstack {
if tcb.altstack_bottom == 0 && tcb.altstack_top == usize::MAX {
Sigaltstack::Disabled
} else {
Sigaltstack::Enabled {
base: tcb.altstack_bottom as *mut (),
size: tcb.altstack_top - tcb.altstack_bottom,
onstack: (tcb.altstack_bottom..tcb.altstack_top).contains(&sp),
}
}
}
pub unsafe fn sigaltstack(
new: Option<&Sigaltstack>,
old_out: Option<&mut Sigaltstack>,
) -> Result<()> {
let _g = tmp_disable_signals();
let tcb = &mut *Tcb::current().unwrap().os_specific.arch.get();
let old = get_sigaltstack(tcb, crate::arch::current_sp());
if matches!(old, Sigaltstack::Enabled { onstack: true, .. }) && new != Some(&old) {
return Err(Error::new(EPERM));
}
if let Some(old_out) = old_out {
*old_out = old;
}
if let Some(new) = new {
match *new {
Sigaltstack::Disabled => {
tcb.altstack_bottom = 0;
tcb.altstack_top = usize::MAX;
}
Sigaltstack::Enabled { onstack: true, .. } => return Err(Error::new(EINVAL)),
Sigaltstack::Enabled {
base,
size,
onstack: false,
} => {
if size < MIN_SIGALTSTACK_SIZE {
return Err(Error::new(ENOMEM));
}
tcb.altstack_bottom = base as usize;
tcb.altstack_top = base as usize + size;
}
}
}
Ok(())
}
pub const MIN_SIGALTSTACK_SIZE: usize = 2048;
pub fn currently_pending_blocked() -> u64 {
let control = &unsafe { Tcb::current().unwrap() }.os_specific.control;
let w0 = control.word[0].load(Ordering::Relaxed);
let w1 = control.word[1].load(Ordering::Relaxed);
let allow = (w0 >> 32) | ((w1 >> 32) << 32);
let thread_pending = (w0 & 0xffff_ffff) | ((w1 >> 32) & 0xffff_ffff);
let proc_pending = PROC_CONTROL_STRUCT.pending.load(Ordering::Relaxed);
core::sync::atomic::fence(Ordering::Acquire); // TODO: Correct ordering?
(thread_pending | proc_pending) & !allow
}
pub enum Unreachable {}
pub fn await_signal_async(inner_allowset: u64) -> Result<Unreachable> {
let _guard = tmp_disable_signals();
let control = &unsafe { Tcb::current().unwrap() }.os_specific.control;
let old_allowset = get_allowset_raw(&control.word);
set_allowset_raw(&control.word, old_allowset, inner_allowset);
let res = syscall::nanosleep(
&TimeSpec {
tv_sec: i64::MAX,
tv_nsec: 0,
},
&mut TimeSpec::default(),
);
set_allowset_raw(&control.word, inner_allowset, old_allowset);
if res == Err(Error::new(EINTR)) {
unsafe {
manually_enter_trampoline();
}
}
res?;
unreachable!()
}
// TODO: deadline-based API
pub fn await_signal_sync(inner_allowset: u64, timeout: Option<&TimeSpec>) -> Result<SiginfoAbi> {
let _guard = tmp_disable_signals();
let control = &unsafe { Tcb::current().unwrap() }.os_specific.control;
let old_allowset = get_allowset_raw(&control.word);
let proc_pending = PROC_CONTROL_STRUCT.pending.load(Ordering::Acquire);
let thread_pending = set_allowset_raw(&control.word, old_allowset, inner_allowset);
// Check if there are already signals matching the requested set, before waiting.
if let Some(info) = try_claim_multiple(proc_pending, thread_pending, inner_allowset, control) {
// TODO: RAII
set_allowset_raw(&control.word, inner_allowset, old_allowset);
return Ok(info);
}
let res = match timeout {
Some(t) => syscall::nanosleep(&t, &mut TimeSpec::default()),
None => syscall::nanosleep(
&TimeSpec {
tv_sec: i64::MAX,
tv_nsec: 0,
},
&mut TimeSpec::default(),
),
};
let thread_pending = set_allowset_raw(&control.word, inner_allowset, old_allowset);
let proc_pending = PROC_CONTROL_STRUCT.pending.load(Ordering::Acquire);
if let Err(error) = res
&& error.errno != EINTR
{
return Err(error);
}
// Then check if there were any signals left after waiting.
try_claim_multiple(proc_pending, thread_pending, inner_allowset, control)
// Normally ETIMEDOUT but not for sigtimedwait.
.ok_or(Error::new(EAGAIN))
}
fn try_claim_multiple(
mut proc_pending: u64,
mut thread_pending: u64,
allowset: u64,
control: &Sigcontrol,
) -> Option<SiginfoAbi> {
while (proc_pending | thread_pending) & allowset != 0 {
let sig_idx = ((proc_pending | thread_pending) & allowset).trailing_zeros();
if thread_pending & allowset & (1 << sig_idx) != 0
&& let Some(res) = try_claim_single(sig_idx, Some(control))
{
return Some(res);
}
thread_pending &= !(1 << sig_idx);
if proc_pending & allowset & (1 << sig_idx) != 0
&& let Some(res) = try_claim_single(sig_idx, None)
{
return Some(res);
}
proc_pending &= !(1 << sig_idx);
}
None
}
fn try_claim_single(sig_idx: u32, thread_control: Option<&Sigcontrol>) -> Option<SiginfoAbi> {
let sig_group = sig_idx / 32;
if sig_group == 1 && thread_control.is_none() {
// Queued (realtime) signal
let mut ret = MaybeUninit::<RtSigInfo>::uninit();
let rt_inf = unsafe {
syscall::syscall2(
syscall::SYS_SIGDEQUEUE,
ret.as_mut_ptr() as usize,
sig_idx as usize - 32,
)
.ok()?;
ret.assume_init()
};
Some(SiginfoAbi {
si_signo: sig_idx as i32 + 1,
si_errno: 0,
si_code: rt_inf.code,
si_pid: rt_inf.pid as i32,
si_uid: rt_inf.uid as i32,
si_status: 0,
si_value: rt_inf.arg,
si_addr: core::ptr::null_mut(),
})
} else {
// Idempotent (standard or thread realtime) signal
let info = SenderInfo::from_raw(match thread_control {
Some(ctl) => {
// Only this thread can clear pending bits, so this will always succeed.
let info = ctl.sender_infos[sig_idx as usize].load(Ordering::Acquire);
// TODO: Ordering
ctl.word[sig_group as usize].fetch_and(!(1 << (sig_idx % 32)), Ordering::Release);
info
}
None => {
let info =
PROC_CONTROL_STRUCT.sender_infos[sig_idx as usize].load(Ordering::Acquire);
if PROC_CONTROL_STRUCT
.pending
.fetch_and(!(1 << sig_idx), Ordering::Release)
& (1 << sig_idx)
== 0
{
// already claimed
return None;
}
info
}
});
Some(SiginfoAbi {
si_signo: sig_idx as i32 + 1,
si_errno: 0,
si_code: 0, // TODO: SI_USER const?
si_pid: info.pid as i32,
si_uid: info.ruid as i32,
si_status: 0,
si_value: 0, // undefined
si_addr: core::ptr::null_mut(),
})
}
}
// TODO: Share code for simple futex-based mutex between relibc's Mutex<()> and this.
use core::{
cell::UnsafeCell,
ops::{Deref, DerefMut},
sync::atomic::{AtomicU32, Ordering},
};
pub struct Mutex<T> {
pub lockword: AtomicU32,
pub inner: UnsafeCell<T>,
}
const UNLOCKED: u32 = 0;
const LOCKED: u32 = 1;
const WAITING: u32 = 2;
unsafe impl<T: Send> Send for Mutex<T> {}
unsafe impl<T: Send> Sync for Mutex<T> {}
impl<T> Mutex<T> {
pub const fn new(t: T) -> Self {
Self {
lockword: AtomicU32::new(0),
inner: UnsafeCell::new(t),
}
}
pub fn lock(&self) -> MutexGuard<'_, T> {
while self
.lockword
.compare_exchange(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
core::hint::spin_loop();
}
MutexGuard { lock: self }
}
}
pub struct MutexGuard<'l, T> {
lock: &'l Mutex<T>,
}
impl<T> Deref for MutexGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.lock.inner.get() }
}
}
impl<T> DerefMut for MutexGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.lock.inner.get() }
}
}
impl<T> Drop for MutexGuard<'_, T> {
fn drop(&mut self) {
self.lock.lockword.store(UNLOCKED, Ordering::Release);
}
}
use core::{
ptr::addr_of,
sync::atomic::{AtomicU32, Ordering},
};
use syscall::{
error::{Error, Result, EINTR},
RtSigInfo, TimeSpec,
};
use crate::{arch::manually_enter_trampoline, proc::FdGuard, signal::tmp_disable_signals, Tcb};
#[inline]
fn wrapper<T>(restart: bool, mut f: impl FnMut() -> Result<T>) -> Result<T> {
loop {
let _guard = tmp_disable_signals();
let rt_sigarea = unsafe { &Tcb::current().unwrap().os_specific };
let res = f();
if let Err(err) = res
&& err == Error::new(EINTR)
{
unsafe {
manually_enter_trampoline();
}
if restart && unsafe { (*rt_sigarea.arch.get()).last_sig_was_restart } {
continue;
}
}
return res;
}
}
// TODO: uninitialized memory?
#[inline]
pub fn posix_read(fd: usize, buf: &mut [u8]) -> Result<usize> {
wrapper(true, || syscall::read(fd, buf))
}
#[inline]
pub fn posix_write(fd: usize, buf: &[u8]) -> Result<usize> {
wrapper(true, || syscall::write(fd, buf))
}
#[inline]
pub fn posix_kill(pid: usize, sig: usize) -> Result<()> {
match wrapper(false, || syscall::kill(pid, sig)) {
Ok(_) | Err(Error { errno: EINTR }) => Ok(()),
Err(error) => Err(error),
}
}
#[inline]
pub fn posix_sigqueue(pid: usize, sig: usize, arg: usize) -> Result<()> {
let siginf = RtSigInfo {
arg,
code: -1, // TODO: SI_QUEUE constant
uid: 0, // TODO
pid: posix_getpid(),
};
match wrapper(false, || unsafe {
syscall::syscall3(syscall::SYS_SIGENQUEUE, pid, sig, addr_of!(siginf) as usize)
}) {
Ok(_) | Err(Error { errno: EINTR }) => Ok(()),
Err(error) => Err(error),
}
}
#[inline]
pub fn posix_getpid() -> u32 {
// SAFETY: read-only except during program/fork child initialization
unsafe { crate::THIS_PID.get().read() }
}
#[inline]
pub fn posix_killpg(pgrp: usize, sig: usize) -> Result<()> {
match wrapper(false, || syscall::kill(usize::wrapping_neg(pgrp), sig)) {
Ok(_) | Err(Error { errno: EINTR }) => Ok(()),
Err(error) => Err(error),
}
}
#[inline]
pub unsafe fn sys_futex_wait(addr: *mut u32, val: u32, deadline: Option<&TimeSpec>) -> Result<()> {
wrapper(true, || {
syscall::syscall5(
syscall::SYS_FUTEX,
addr as usize,
syscall::FUTEX_WAIT,
val as usize,
deadline.map_or(0, |d| d as *const _ as usize),
0,
)
.map(|_| ())
})
}
#[inline]
pub unsafe fn sys_futex_wake(addr: *mut u32, num: u32) -> Result<u32> {
syscall::syscall5(
syscall::SYS_FUTEX,
addr as usize,
syscall::FUTEX_WAKE,
num as usize,
0,
0,
)
.map(|awoken| awoken as u32)
}
pub fn sys_waitpid(pid: usize, status: &mut usize, flags: usize) -> Result<usize> {
wrapper(true, || {
syscall::waitpid(
pid,
status,
syscall::WaitFlags::from_bits(flags).expect("waitpid: invalid bit pattern"),
)
})
}
pub fn posix_kill_thread(thread_fd: usize, signal: u32) -> Result<()> {
let killfd = FdGuard::new(syscall::dup(thread_fd, b"signal")?);
match wrapper(false, || syscall::write(*killfd, &signal.to_ne_bytes())) {
Ok(_) | Err(Error { errno: EINTR }) => Ok(()),
Err(error) => Err(error),
}
}
static UMASK: AtomicU32 = AtomicU32::new(0o022);
/// Controls the set of bits removed from the `mode` mask when new file descriptors are created.
///
/// Must be validated by the caller
//
// TODO: validate here?
#[inline]
pub fn swap_umask(mask: u32) -> u32 {
UMASK.swap(mask, Ordering::AcqRel)
}
#[inline]
pub fn get_umask() -> u32 {
UMASK.load(Ordering::Acquire)
}
use core::mem::size_of;
use syscall::{Result, O_CLOEXEC};
use crate::{arch::*, proc::*, signal::tmp_disable_signals, RtTcb};
/// Spawns a new context sharing the same address space as the current one (i.e. a new thread).
pub unsafe fn rlct_clone_impl(stack: *mut usize) -> Result<FdGuard> {
let cur_thr_fd = RtTcb::current().thread_fd();
let new_thr_fd = FdGuard::new(syscall::open(
"/scheme/thisproc/new-thread/open_via_dup",
O_CLOEXEC,
)?);
copy_str(**cur_thr_fd, *new_thr_fd, "name")?;
// Inherit existing address space
{
let cur_addr_space_fd = FdGuard::new(syscall::dup(**cur_thr_fd, b"addrspace")?);
let new_addr_space_sel_fd = FdGuard::new(syscall::dup(*new_thr_fd, b"current-addrspace")?);
let buf = create_set_addr_space_buf(
*cur_addr_space_fd,
__relibc_internal_rlct_clone_ret as usize,
stack as usize,
);
let _ = syscall::write(*new_addr_space_sel_fd, &buf)?;
}
// Inherit reference to file table
{
let cur_filetable_fd = FdGuard::new(syscall::dup(**cur_thr_fd, b"filetable")?);
let new_filetable_sel_fd = FdGuard::new(syscall::dup(*new_thr_fd, b"current-filetable")?);
let _ = syscall::write(
*new_filetable_sel_fd,
&usize::to_ne_bytes(*cur_filetable_fd),
)?;
}
// Since the signal handler is not yet initialized, signals specifically targeting the thread
// (relibc is only required to implement thread-specific signals that already originate from
// the same process) will be discarded. Process-specific signals will ignore this new thread,
// until it has initialized its own signal handler.
// Unblock context.
let start_fd = FdGuard::new(syscall::dup(*new_thr_fd, b"start")?);
let _ = syscall::write(*start_fd, &[0])?;
Ok(new_thr_fd)
}
pub unsafe fn exit_this_thread(stack_base: *mut (), stack_size: usize) -> ! {
let _guard = tmp_disable_signals();
let tcb = RtTcb::current();
let thread_fd = tcb.thread_fd();
let _ = syscall::funmap(tcb as *const RtTcb as usize, syscall::PAGE_SIZE);
// TODO: modify interface so it writes directly to the thread fd?
let status_fd = syscall::dup(**thread_fd, b"status").unwrap();
let mut buf = [0; size_of::<usize>() * 3];
plain::slice_from_mut_bytes(&mut buf)
.unwrap()
.copy_from_slice(&[usize::MAX, stack_base as usize, stack_size]);
syscall::write(status_fd, &buf).unwrap();
unreachable!()
}
#!/usr/bin/env bash
set -e
if ! which redoxer
then
cargo install redoxer
fi
if [ ! -d "$HOME/.redoxer/toolchain" ]
then
redoxer toolchain
fi
export CARGOFLAGS=""
export CARGO_TEST="redoxer"
export TEST_RUNNER="redoxer exec --folder . --"
redoxer env make "$@"
#!/bin/bash
set -e
target=$1
deps_dir=$2
if [ -z "$target" ] || [ -z "$deps_dir" ]; then
echo "Usage:\n\t./renamesyms.sh TARGET DEPS_DIR"
exit 1
fi
if [ ! -f "$target" ]; then
echo "Target file '$target' does not exist"
exit 1
fi
if [ ! -d "$deps_dir" ] ; then
echo "Deps dir '$deps_dir' does not exist or not a directory"
exit 1
fi
symbols_file=`mktemp`
special_syms=(
__rdl_oom
__rg_alloc
__rg_alloc_zeroed
__rg_dealloc
__rg_oom
__rg_realloc
__rust_alloc
__rust_alloc_error_handler
__rust_alloc_error_handler_should_panic
__rust_alloc_zeroed
__rust_dealloc
__rust_no_alloc_shim_is_unstable
__rust_realloc
)
for dep in `find $deps_dir -type f -name "*.rlib"`; do
"${NM}" --format=posix -g "$dep" 2>/dev/null | sed 's/.*:.*//g' | awk '{if ($2 == "T") print $1}' | sed 's/^\(.*\)$/\1 __relibc_\1/g' >> $symbols_file
done
for special_sym in "${special_syms[@]}"; do
echo "$special_sym __relibc_$special_sym" >> $symbols_file
done
sorted_file=`mktemp`
sort -u "$symbols_file" > "$sorted_file"
rm -f "$symbols_file"
"${OBJCOPY}" --redefine-syms="$sorted_file" "$target"
rm -f "$sorted_file"
nightly-2018-06-19
[toolchain]
channel = "nightly-2025-01-12"
components = ["rust-src"]
max_width = 100
hard_tabs = false
tab_spaces = 4
newline_style = "Unix"
indent_style = "Block"
format_strings = false
blank_lines_lower_bound = 0
blank_lines_upper_bound = 1
brace_style = "SameLineWhere"
disable_all_formatting = false
edition = "2018"
empty_item_single_line = true
fn_single_line = false
where_single_line = false
imports_indent = "Visual"
imports_layout = "Mixed"
reorder_extern_crates = true
reorder_extern_crates_in_group = true
reorder_imports = false
reorder_imported_names = true
spaces_within_parens_and_brackets = false
remove_blank_lines_at_start_or_end_of_block = true
fn_args_density = "Tall"
brace_style = "SameLineWhere"
trailing_comma = "Vertical"
blank_lines_upper_bound = 1
blank_lines_lower_bound = 0
force_explicit_abi = true
write_mode = "Overwrite"
disable_all_formatting = false
skip_children = false
format_strings = false
hard_tabs = false
hide_parse_errors = false
report_todo = "Never"
report_fixme = "Never"
imports_granularity = "Crate"
imports_indent = "Block"
imports_layout = "Mixed"
indent_style = "Block"
max_width = 100
newline_style = "Unix"
skip_children = false
tab_spaces = 4
trailing_comma = "Vertical"
where_single_line = false
#include <stdarg.h>
#include <sys/types.h>
int sys_open(const char* filename, int flags, mode_t mode);
int open(const char* filename, int flags, ...) {
mode_t mode = 0;
va_list ap;
va_start(ap, flags);
mode = va_arg(ap, mode_t);
va_end(ap);
return sys_open(filename, flags, mode);
}
int sys_fcntl(int fildes, int cmd, int args);
int fcntl(int fildes, int cmd, ...) {
int args = 0;
va_list ap;
va_start(ap, cmd);
args = va_arg(ap, int);
va_end(ap);
return sys_fcntl(fildes, cmd, args);
}
#include <stdarg.h>
#include <stddef.h>
typedef struct FILE FILE;
int vfprintf(FILE * stream, const char * fmt, va_list ap);
int fprintf(FILE * stream, const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vfprintf(stream, fmt, ap);
va_end(ap);
return ret;
}
int vprintf(const char * fmt, va_list ap);
int printf(const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vprintf(fmt, ap);
va_end(ap);
return ret;
}
int vsnprintf(char * s, size_t n, const char * fmt, va_list ap);
int snprintf(char * s, size_t n, const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vsnprintf(s, n, fmt, ap);
va_end(ap);
return ret;
}
int vsprintf(char * s, const char * fmt, va_list ap);
int sprintf(char *s, const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vsprintf(s, fmt, ap);
va_end(ap);
return ret;
}
int vfscanf(FILE * stream, const char * fmt, va_list ap);
int fscanf(FILE * stream, const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vfscanf(stream, fmt, ap);
va_end(ap);
return ret;
}
int vscanf(const char * fmt, va_list ap);
int scanf(const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vscanf(fmt, ap);
va_end(ap);
return ret;
}
int vsscanf(const char * input, const char * fmt, va_list ap);
int sscanf(const char * input, const char * fmt, ...) {
int ret;
va_list ap;
va_start(ap, fmt);
ret = vsscanf(input, fmt, ap);
va_end(ap);
return ret;
}