Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • martin/relibc
  • ashton/relibc
  • vincent/relibc
  • boomshroom/relibc
  • njskalski/relibc
  • gmacd/relibc
  • mati865/relibc
  • nicoan/relibc
  • lmiskiew/relibc
  • devnexen/relibc
  • jamesgraves/relibc
  • oddcoder/relibc
  • andar1an/relibc
  • gugz0r/relibc
  • matijaskala/relibc
  • zen3ger/relibc
  • arthurpaulino/relibc
  • Majoneza/relibc
  • 4lDO2/relibc
  • enygmator/relibc
  • JustAnotherDev/relibc
  • doriancodes/relibc
  • adamantinum/relibc
  • wiredtv/relibc
  • stratact/relibc
  • Ramla-I/relibc
  • bitstr0m/relibc
  • bpisch/relibc
  • henritel/relibc
  • smckay/relibc
  • xTibor/relibc
  • devajithvs/relibc
  • andypython/relibc
  • t-nil/relibc
  • DataTriny/relibc
  • SteveLauC/relibc
  • dlrobertson/relibc
  • josh/relibc
  • AgostonSzepessy/relibc
  • TheDarkula/relibc
  • willnode/relibc
  • bamontan/relibc
  • raffaeleragni/relibc
  • redoxeon/relibc
  • darley/relibc
  • ayf/relibc
  • heghe/relibc
  • Ivan/relibc
  • hasheddan/relibc
  • dahc/relibc
  • auwardoctor/relibc
  • kodicraft/relibc
  • jasonhansel/relibc
  • kel/relibc
  • microcolonel/relibc
  • GrayJack/relibc
  • sahitpj/relibc
  • plimkilde/relibc
  • BjornTheProgrammer/relibc
  • defra/relibc
  • jD91mZM2/relibc
  • Schyrsivochter/relibc
  • ebalalic/relibc
  • adchacon/relibc
  • aaronjanse/relibc
  • josh_williams/relibc
  • 8tab/relibc
  • athei/relibc
  • carrot93/relibc
  • RA_GM1/relibc
  • zhaozhao/relibc
  • JCake/relibc
  • KGrewal1/relibc
  • feliwir/relibc
  • emturner/relibc
  • LuigiPiucco/relibc
  • bfrascher/relibc
  • starsheriff/relibc
  • kcired/relibc
  • jamespcfrancis/relibc
  • omar-mohamed-khallaf/relibc
  • neallred/relibc
  • rw_van/relibc
  • Skallwar/relibc
  • matt-vdv/relibc
  • SoyaOhnishi/relibc
  • ArniDagur/relibc
  • tlam/relibc
  • glongo/relibc
  • kamirr/relibc
  • abdullah/relibc
  • saeedtabrizi/relibc
  • sajattack/relibc
  • seanpk/relibc
  • MaikuZ/relibc
  • jamadazi/relibc
  • coolreader18/relibc
  • wt/relibc
  • redox-os/relibc
  • lebensterben/relibc
  • uuuvn/relibc
  • vadorovsky/relibc
  • ids1024/relibc
  • freewilll/relibc
  • LLeny/relibc
  • alfredoyang/relibc
  • batonius/relibc
  • TornaxO7/relibc
  • bjorn3/relibc
  • Arcterus/relibc
  • Tommoa/relibc
  • samuela/relibc
  • mindriot101/relibc
  • lygstate/relibc
114 results
Show changes
Showing
with 3433 additions and 16 deletions
#ifndef _SYS_REDOX_H
#define _SYS_REDOX_H
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __redox__
ssize_t redox_fpath(int fd, void * buf, size_t count);
void * redox_physalloc(size_t size);
int redox_physfree(void * physical_address, size_t size);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif
#ifndef _SYS_TYPES_H
#define _SYS_TYPES_H
#ifndef _SYS_TYPES_INTERNAL_H
#define _SYS_TYPES_INTERNAL_H
#include <stddef.h>
typedef long blksize_t;
......@@ -9,11 +9,11 @@ typedef int gid_t;
typedef int uid_t;
typedef int mode_t;
typedef unsigned long nlink_t;
typedef long off_t;
typedef long long off_t;
typedef int pid_t;
typedef unsigned id_t;
typedef long ssize_t;
typedef long time_t;
typedef long long time_t;
typedef unsigned int useconds_t;
typedef int suseconds_t;
typedef long clock_t;
......@@ -21,18 +21,14 @@ typedef int clockid_t;
typedef void* timer_t;
typedef unsigned long int blkcnt_t;
typedef unsigned long int fsblkcnt_t;
typedef unsigned long int fsfilcnt_t;
typedef unsigned char u_char, uchar;
typedef unsigned short u_short, ushort;
typedef unsigned int u_int, uint;
typedef unsigned long u_long, ulong;
typedef long long quad_t;
typedef unsigned long long u_quad_t;
#ifdef __linux__
#define _SC_PAGE_SIZE 30
#endif
#ifdef __redox__
#define _SC_PAGE_SIZE 8
#endif
#endif /* _SYS_TYPES_H */
typedef char *caddr_t;
#endif /* _SYS_TYPES_INTERNAL_H */
#ifndef _SYS_USER_H
#define _SYS_USER_H
#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_AMD64)
#include <arch/x64/user.h>
#elif defined(__aarch64__)
#include <arch/aarch64/user.h>
#elif defined(__riscv) && __riscv_xlen==64
#include <arch/riscv64/user.h>
#else
#error "Unknown architecture"
#endif
#endif
#ifndef _SYSEXITS_H
#define _SYSEXITS_H
#define EX_OK 0
#define EX_USAGE 64
#define EX_DATAERR 65
#define EX_NOINPUT 66
#define EX_NOUSER 67
#define EX_NOHOST 68
#define EX_UNAVAILABLE 69
#define EX_SOFTWARE 70
#define EX_OSERR 71
#define EX_OSFILE 72
#define EX_CANTCREAT 73
#define EX_IOERR 74
#define EX_TEMPFAIL 75
#define EX_PROTOCOL 76
#define EX_NOPERM 77
#define EX_CONFIG 78
#endif /* _SYSEXITS_H */
[package]
name = "setjmp"
name = "ld_so"
version = "0.1.0"
authors = ["Jeremy Soller <jackpot51@gmail.com>"]
[lib]
name = "ld_so"
crate-type = ["staticlib"]
/* Script for -z combreloc */
/* Copyright (C) 2014-2020 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
SEARCH_DIR("/aarch64-unknown-redox/lib");
SEARCH_DIR("/usr/local/lib64");
SEARCH_DIR("/lib64");
SEARCH_DIR("/usr/lib64");
SEARCH_DIR("/usr/local/lib");
SEARCH_DIR("/lib");
SEARCH_DIR("/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
PROVIDE (__executable_start = SEGMENT_START("text-segment", 0x400000)); . = SEGMENT_START("text-segment", 0x20000000) + SIZEOF_HEADERS;
.interp : { *(.interp) }
.note.gnu.build-id : { *(.note.gnu.build-id) }
.hash : { *(.hash) }
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.rela.dyn :
{
*(.rela.init)
*(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
*(.rela.fini)
*(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
*(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
*(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
*(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
*(.rela.ctors)
*(.rela.dtors)
*(.rela.got)
*(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
*(.rela.ldata .rela.ldata.* .rela.gnu.linkonce.l.*)
*(.rela.lbss .rela.lbss.* .rela.gnu.linkonce.lb.*)
*(.rela.lrodata .rela.lrodata.* .rela.gnu.linkonce.lr.*)
*(.rela.ifunc)
}
.rela.plt :
{
*(.rela.plt)
PROVIDE_HIDDEN (__rela_iplt_start = .);
*(.rela.iplt)
PROVIDE_HIDDEN (__rela_iplt_end = .);
}
. = ALIGN(CONSTANT (MAXPAGESIZE));
.init :
{
KEEP (*(SORT_NONE(.init)))
}
.plt : { *(.plt) *(.iplt) }
.plt.got : { *(.plt.got) }
.plt.sec : { *(.plt.sec) }
.text :
{
*(.text.unlikely .text.*_unlikely .text.unlikely.*)
*(.text.exit .text.exit.*)
*(.text.startup .text.startup.*)
*(.text.hot .text.hot.*)
*(.text .stub .text.* .gnu.linkonce.t.*)
/* .gnu.warning sections are handled specially by elf.em. */
*(.gnu.warning)
}
.fini :
{
KEEP (*(SORT_NONE(.fini)))
}
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
. = ALIGN(CONSTANT (MAXPAGESIZE));
/* Adjust the address for the rodata segment. We want to adjust up to
the same address within the page on the next page up. */
. = SEGMENT_START("rodata-segment", ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)));
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
.eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) }
.gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
.eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
.gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges*) }
/* Thread Local Storage sections */
/* .tdata :
{
PROVIDE_HIDDEN (__tdata_start = .);
*(.tdata .tdata.* .gnu.linkonce.td.*)
}
.tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } */
.preinit_array :
{
PROVIDE_HIDDEN (__preinit_array_start = .);
KEEP (*(.preinit_array))
PROVIDE_HIDDEN (__preinit_array_end = .);
}
/* .init_array :
{
PROVIDE_HIDDEN (__init_array_start = .);
KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
PROVIDE_HIDDEN (__init_array_end = .);
} */
.fini_array :
{
PROVIDE_HIDDEN (__fini_array_start = .);
KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
PROVIDE_HIDDEN (__fini_array_end = .);
}
.ctors :
{
/* gcc uses crtbegin.o to find the start of
the constructors, so we make sure it is
first. Because this is a wildcard, it
doesn't matter if the user does not
actually link against crtbegin.o; the
linker won't look for a file to match a
wildcard. The wildcard also means that it
doesn't matter which directory crtbegin.o
is in. */
KEEP (*crtbegin.o(.ctors))
KEEP (*crtbegin?.o(.ctors))
/* We don't want to include the .ctor section from
the crtend.o file until after the sorted ctors.
The .ctor section from the crtend file contains the
end of ctors marker and it must be last */
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
}
.dtors :
{
KEEP (*crtbegin.o(.dtors))
KEEP (*crtbegin?.o(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
}
.jcr : { KEEP (*(.jcr)) }
.data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
.dynamic : { *(.dynamic) }
.got : { *(.got) *(.igot) }
. = DATA_SEGMENT_RELRO_END (SIZEOF (.got.plt) >= 24 ? 24 : 0, .);
.got.plt : { *(.got.plt) *(.igot.plt) }
.data :
{
*(.data .data.* .gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
}
.data1 : { *(.data1) }
_edata = .; PROVIDE (edata = .);
. = .;
__bss_start = .;
.bss :
{
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
*(COMMON)
/* Align here to ensure that the .bss section occupies space up to
_end. Align after .bss to ensure correct alignment even if the
.bss section disappears because there are no input sections.
FIXME: Why do we need it? When there is no .bss section, we do not
pad the .data section. */
. = ALIGN(. != 0 ? 64 / 8 : 1);
}
.lbss :
{
*(.dynlbss)
*(.lbss .lbss.* .gnu.linkonce.lb.*)
*(LARGE_COMMON)
}
. = ALIGN(64 / 8);
. = SEGMENT_START("ldata-segment", .);
.lrodata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
{
*(.lrodata .lrodata.* .gnu.linkonce.lr.*)
}
.ldata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
{
*(.ldata .ldata.* .gnu.linkonce.l.*)
. = ALIGN(. != 0 ? 64 / 8 : 1);
}
. = ALIGN(64 / 8);
_end = .; PROVIDE (end = .);
. = DATA_SEGMENT_END (.);
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
.gnu.build.attributes : { *(.gnu.build.attributes .gnu.build.attributes.*) }
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to the beginning
of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/* DWARF 3 */
.debug_pubtypes 0 : { *(.debug_pubtypes) }
.debug_ranges 0 : { *(.debug_ranges) }
/* DWARF Extension. */
.debug_macro 0 : { *(.debug_macro) }
.debug_addr 0 : { *(.debug_addr) }
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
/DISCARD/ : {
*(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*)
/*
* XXX: As of now, ld.so links with relibc which has the main functionality. In the next refactor,
* ld.so will be moved out of relibc. So, till that time, we have to discard any sections
* that may reference use thread local storage.
*
* .init_array also depends on TLS and is discarded as we don't need it.
*/
*(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon)
*(.init_array)
}
}
/* Script for -z combreloc */
/* Copyright (C) 2014-2020 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
OUTPUT_FORMAT("elf32-i386", "elf32-i386",
"elf32-i386")
OUTPUT_ARCH(i386)
ENTRY(_start)
SEARCH_DIR("/i686-unknown-redox/lib");
SEARCH_DIR("/usr/local/lib32");
SEARCH_DIR("/lib32");
SEARCH_DIR("/usr/lib32");
SEARCH_DIR("/usr/local/lib");
SEARCH_DIR("/lib");
SEARCH_DIR("/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
PROVIDE (__executable_start = SEGMENT_START("text-segment", 0x400000)); . = SEGMENT_START("text-segment", 0x20000000) + SIZEOF_HEADERS;
.interp : { *(.interp) }
.note.gnu.build-id : { *(.note.gnu.build-id) }
.hash : { *(.hash) }
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.rela.dyn :
{
*(.rela.init)
*(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
*(.rela.fini)
*(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
*(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
*(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
*(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
*(.rela.ctors)
*(.rela.dtors)
*(.rela.got)
*(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
*(.rela.ldata .rela.ldata.* .rela.gnu.linkonce.l.*)
*(.rela.lbss .rela.lbss.* .rela.gnu.linkonce.lb.*)
*(.rela.lrodata .rela.lrodata.* .rela.gnu.linkonce.lr.*)
*(.rela.ifunc)
}
.rela.plt :
{
*(.rela.plt)
PROVIDE_HIDDEN (__rela_iplt_start = .);
*(.rela.iplt)
PROVIDE_HIDDEN (__rela_iplt_end = .);
}
. = ALIGN(CONSTANT (MAXPAGESIZE));
.init :
{
KEEP (*(SORT_NONE(.init)))
}
.plt : { *(.plt) *(.iplt) }
.plt.got : { *(.plt.got) }
.plt.sec : { *(.plt.sec) }
.text :
{
*(.text.unlikely .text.*_unlikely .text.unlikely.*)
*(.text.exit .text.exit.*)
*(.text.startup .text.startup.*)
*(.text.hot .text.hot.*)
*(.text .stub .text.* .gnu.linkonce.t.*)
/* .gnu.warning sections are handled specially by elf.em. */
*(.gnu.warning)
}
.fini :
{
KEEP (*(SORT_NONE(.fini)))
}
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
. = ALIGN(CONSTANT (MAXPAGESIZE));
/* Adjust the address for the rodata segment. We want to adjust up to
the same address within the page on the next page up. */
. = SEGMENT_START("rodata-segment", ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)));
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
.eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) }
.gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
.eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
.gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges*) }
/* Thread Local Storage sections */
/* .tdata :
{
PROVIDE_HIDDEN (__tdata_start = .);
*(.tdata .tdata.* .gnu.linkonce.td.*)
}
.tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } */
.preinit_array :
{
PROVIDE_HIDDEN (__preinit_array_start = .);
KEEP (*(.preinit_array))
PROVIDE_HIDDEN (__preinit_array_end = .);
}
/* .init_array :
{
PROVIDE_HIDDEN (__init_array_start = .);
KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
PROVIDE_HIDDEN (__init_array_end = .);
} */
.fini_array :
{
PROVIDE_HIDDEN (__fini_array_start = .);
KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
PROVIDE_HIDDEN (__fini_array_end = .);
}
.ctors :
{
/* gcc uses crtbegin.o to find the start of
the constructors, so we make sure it is
first. Because this is a wildcard, it
doesn't matter if the user does not
actually link against crtbegin.o; the
linker won't look for a file to match a
wildcard. The wildcard also means that it
doesn't matter which directory crtbegin.o
is in. */
KEEP (*crtbegin.o(.ctors))
KEEP (*crtbegin?.o(.ctors))
/* We don't want to include the .ctor section from
the crtend.o file until after the sorted ctors.
The .ctor section from the crtend file contains the
end of ctors marker and it must be last */
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
}
.dtors :
{
KEEP (*crtbegin.o(.dtors))
KEEP (*crtbegin?.o(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
}
.jcr : { KEEP (*(.jcr)) }
.data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
.dynamic : { *(.dynamic) }
.got : { *(.got) *(.igot) }
. = DATA_SEGMENT_RELRO_END (SIZEOF (.got.plt) >= 24 ? 24 : 0, .);
.got.plt : { *(.got.plt) *(.igot.plt) }
.data :
{
*(.data .data.* .gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
}
.data1 : { *(.data1) }
_edata = .; PROVIDE (edata = .);
. = .;
__bss_start = .;
.bss :
{
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
*(COMMON)
/* Align here to ensure that the .bss section occupies space up to
_end. Align after .bss to ensure correct alignment even if the
.bss section disappears because there are no input sections.
FIXME: Why do we need it? When there is no .bss section, we do not
pad the .data section. */
. = ALIGN(. != 0 ? 64 / 8 : 1);
}
.lbss :
{
*(.dynlbss)
*(.lbss .lbss.* .gnu.linkonce.lb.*)
*(LARGE_COMMON)
}
. = ALIGN(64 / 8);
. = SEGMENT_START("ldata-segment", .);
.lrodata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
{
*(.lrodata .lrodata.* .gnu.linkonce.lr.*)
}
.ldata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
{
*(.ldata .ldata.* .gnu.linkonce.l.*)
. = ALIGN(. != 0 ? 64 / 8 : 1);
}
. = ALIGN(64 / 8);
_end = .; PROVIDE (end = .);
. = DATA_SEGMENT_END (.);
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
.gnu.build.attributes : { *(.gnu.build.attributes .gnu.build.attributes.*) }
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to the beginning
of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/* DWARF 3 */
.debug_pubtypes 0 : { *(.debug_pubtypes) }
.debug_ranges 0 : { *(.debug_ranges) }
/* DWARF Extension. */
.debug_macro 0 : { *(.debug_macro) }
.debug_addr 0 : { *(.debug_addr) }
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
/DISCARD/ : {
*(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*)
/*
* XXX: As of now, ld.so links with relibc which has the main functionality. In the next refactor,
* ld.so will be moved out of relibc. So, till that time, we have to discard any sections
* that may reference use thread local storage.
*
* .init_array also depends on TLS and is discarded as we don't need it.
*/
*(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon)
*(.init_array)
}
}
/* Script for -z combreloc */
/* Copyright (C) 2014-2020 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
OUTPUT_FORMAT("elf64-littleriscv", "elf64-littleriscv", "elf64-littleriscv" )
OUTPUT_ARCH(riscv)
ENTRY(_start)
SEARCH_DIR("/riscv64-unknown-redox/lib");
SEARCH_DIR("/usr/local/lib64");
SEARCH_DIR("/lib64");
SEARCH_DIR("/usr/lib64");
SEARCH_DIR("/usr/local/lib");
SEARCH_DIR("/lib");
SEARCH_DIR("/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
PROVIDE (__executable_start = SEGMENT_START("text-segment", 0x400000)); . = SEGMENT_START("text-segment", 0x20000000) + SIZEOF_HEADERS;
.interp : { *(.interp) }
.note.gnu.build-id : { *(.note.gnu.build-id) }
.hash : { *(.hash) }
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.rela.dyn :
{
*(.rela.init)
*(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
*(.rela.fini)
*(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
*(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
*(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
*(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
*(.rela.ctors)
*(.rela.dtors)
*(.rela.got)
*(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
*(.rela.ldata .rela.ldata.* .rela.gnu.linkonce.l.*)
*(.rela.lbss .rela.lbss.* .rela.gnu.linkonce.lb.*)
*(.rela.lrodata .rela.lrodata.* .rela.gnu.linkonce.lr.*)
*(.rela.ifunc)
}
.rela.plt :
{
*(.rela.plt)
PROVIDE_HIDDEN (__rela_iplt_start = .);
*(.rela.iplt)
PROVIDE_HIDDEN (__rela_iplt_end = .);
}
. = ALIGN(CONSTANT (MAXPAGESIZE));
.plt : { *(.plt) *(.iplt) }
.plt.got : { *(.plt.got) }
.plt.sec : { *(.plt.sec) }
.text :
{
*(.text.unlikely .text.*_unlikely .text.unlikely.*)
*(.text.exit .text.exit.*)
*(.text.startup .text.startup.*)
*(.text.hot .text.hot.*)
*(.text .stub .text.* .gnu.linkonce.t.*)
/* .gnu.warning sections are handled specially by elf.em. */
*(.gnu.warning)
}
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
. = ALIGN(CONSTANT (MAXPAGESIZE));
/* Adjust the address for the rodata segment. We want to adjust up to
the same address within the page on the next page up. */
. = SEGMENT_START("rodata-segment", ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)));
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
.eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) }
.gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
.eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
.gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges*) }
/* Thread Local Storage sections */
/* .tdata :
{
PROVIDE_HIDDEN (__tdata_start = .);
*(.tdata .tdata.* .gnu.linkonce.td.*)
}
.tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } */
.preinit_array :
{
PROVIDE_HIDDEN (__preinit_array_start = .);
KEEP (*(.preinit_array))
PROVIDE_HIDDEN (__preinit_array_end = .);
}
/* .init_array :
{
PROVIDE_HIDDEN (__init_array_start = .);
KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
PROVIDE_HIDDEN (__init_array_end = .);
} */
.fini_array :
{
PROVIDE_HIDDEN (__fini_array_start = .);
KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
PROVIDE_HIDDEN (__fini_array_end = .);
}
.ctors :
{
/* gcc uses crtbegin.o to find the start of
the constructors, so we make sure it is
first. Because this is a wildcard, it
doesn't matter if the user does not
actually link against crtbegin.o; the
linker won't look for a file to match a
wildcard. The wildcard also means that it
doesn't matter which directory crtbegin.o
is in. */
KEEP (*crtbegin.o(.ctors))
KEEP (*crtbegin?.o(.ctors))
/* We don't want to include the .ctor section from
the crtend.o file until after the sorted ctors.
The .ctor section from the crtend file contains the
end of ctors marker and it must be last */
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
}
.dtors :
{
KEEP (*crtbegin.o(.dtors))
KEEP (*crtbegin?.o(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
}
.jcr : { KEEP (*(.jcr)) }
.data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
.dynamic : { *(.dynamic) }
.got : { *(.got) *(.igot) }
. = DATA_SEGMENT_RELRO_END (SIZEOF (.got.plt) >= 24 ? 24 : 0, .);
.got.plt : { *(.got.plt) *(.igot.plt) }
.data :
{
*(.data .data.* .gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
}
.data1 : { *(.data1) }
_edata = .; PROVIDE (edata = .);
. = .;
__bss_start = .;
.bss :
{
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
*(COMMON)
/* Align here to ensure that the .bss section occupies space up to
_end. Align after .bss to ensure correct alignment even if the
.bss section disappears because there are no input sections.
FIXME: Why do we need it? When there is no .bss section, we do not
pad the .data section. */
. = ALIGN(. != 0 ? 64 / 8 : 1);
}
.lbss :
{
*(.dynlbss)
*(.lbss .lbss.* .gnu.linkonce.lb.*)
*(LARGE_COMMON)
}
. = ALIGN(64 / 8);
. = SEGMENT_START("ldata-segment", .);
.lrodata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
{
*(.lrodata .lrodata.* .gnu.linkonce.lr.*)
}
.ldata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
{
*(.ldata .ldata.* .gnu.linkonce.l.*)
. = ALIGN(. != 0 ? 64 / 8 : 1);
}
. = ALIGN(64 / 8);
_end = .; PROVIDE (end = .);
. = DATA_SEGMENT_END (.);
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
.gnu.build.attributes : { *(.gnu.build.attributes .gnu.build.attributes.*) }
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to the beginning
of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/* DWARF 3 */
.debug_pubtypes 0 : { *(.debug_pubtypes) }
.debug_ranges 0 : { *(.debug_ranges) }
/* DWARF Extension. */
.debug_macro 0 : { *(.debug_macro) }
.debug_addr 0 : { *(.debug_addr) }
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
/DISCARD/ : {
*(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*)
/*
* XXX: As of now, ld.so links with relibc which has the main functionality. In the next refactor,
* ld.so will be moved out of relibc. So, till that time, we have to discard any sections
* that may reference use thread local storage.
*
* .init_array also depends on TLS and is discarded as we don't need it.
*/
*(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon)
*(.init_array)
}
}
/* Script for -z combreloc */
/* Copyright (C) 2014-2020 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64",
"elf64-x86-64")
OUTPUT_ARCH(i386:x86-64)
ENTRY(_start)
SEARCH_DIR("/usr/x86_64-pc-linux-gnu/lib64");
SEARCH_DIR("/usr/lib64/binutils/x86_64-pc-linux-gnu/2.33.164");
SEARCH_DIR("/usr/local/lib64");
SEARCH_DIR("/lib64");
SEARCH_DIR("/usr/lib64");
SEARCH_DIR("/usr/x86_64-pc-linux-gnu/lib");
SEARCH_DIR("/usr/lib64/binutils/x86_64-pc-linux-gnu/2.33.1");
SEARCH_DIR("/usr/local/lib");
SEARCH_DIR("/lib");
SEARCH_DIR("/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
PROVIDE (__executable_start = SEGMENT_START("text-segment", 0x400000)); . = SEGMENT_START("text-segment", 0x20000000) + SIZEOF_HEADERS;
.interp : { *(.interp) }
.note.gnu.build-id : { *(.note.gnu.build-id) }
.hash : { *(.hash) }
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.rela.dyn :
{
*(.rela.init)
*(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
*(.rela.fini)
*(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
*(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
*(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
*(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
*(.rela.ctors)
*(.rela.dtors)
*(.rela.got)
*(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
*(.rela.ldata .rela.ldata.* .rela.gnu.linkonce.l.*)
*(.rela.lbss .rela.lbss.* .rela.gnu.linkonce.lb.*)
*(.rela.lrodata .rela.lrodata.* .rela.gnu.linkonce.lr.*)
*(.rela.ifunc)
}
.rela.plt :
{
*(.rela.plt)
PROVIDE_HIDDEN (__rela_iplt_start = .);
*(.rela.iplt)
PROVIDE_HIDDEN (__rela_iplt_end = .);
}
. = ALIGN(CONSTANT (MAXPAGESIZE));
.init :
{
KEEP (*(SORT_NONE(.init)))
}
.plt : { *(.plt) *(.iplt) }
.plt.got : { *(.plt.got) }
.plt.sec : { *(.plt.sec) }
.text :
{
*(.text.unlikely .text.*_unlikely .text.unlikely.*)
*(.text.exit .text.exit.*)
*(.text.startup .text.startup.*)
*(.text.hot .text.hot.*)
*(.text .stub .text.* .gnu.linkonce.t.*)
/* .gnu.warning sections are handled specially by elf.em. */
*(.gnu.warning)
}
.fini :
{
KEEP (*(SORT_NONE(.fini)))
}
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
. = ALIGN(CONSTANT (MAXPAGESIZE));
/* Adjust the address for the rodata segment. We want to adjust up to
the same address within the page on the next page up. */
. = SEGMENT_START("rodata-segment", ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)));
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
.eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) }
.gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
.eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
.gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges*) }
/* Thread Local Storage sections */
/* .tdata : ALIGN(4K)
{
PROVIDE_HIDDEN (__tdata_start = .);
*(.tdata .tdata.* .gnu.linkonce.td.*)
}
.tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } */
.preinit_array :
{
PROVIDE_HIDDEN (__preinit_array_start = .);
KEEP (*(.preinit_array))
PROVIDE_HIDDEN (__preinit_array_end = .);
}
/* .init_array :
{
PROVIDE_HIDDEN (__init_array_start = .);
KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
PROVIDE_HIDDEN (__init_array_end = .);
} */
.fini_array :
{
PROVIDE_HIDDEN (__fini_array_start = .);
KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
PROVIDE_HIDDEN (__fini_array_end = .);
}
.ctors :
{
/* gcc uses crtbegin.o to find the start of
the constructors, so we make sure it is
first. Because this is a wildcard, it
doesn't matter if the user does not
actually link against crtbegin.o; the
linker won't look for a file to match a
wildcard. The wildcard also means that it
doesn't matter which directory crtbegin.o
is in. */
KEEP (*crtbegin.o(.ctors))
KEEP (*crtbegin?.o(.ctors))
/* We don't want to include the .ctor section from
the crtend.o file until after the sorted ctors.
The .ctor section from the crtend file contains the
end of ctors marker and it must be last */
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
}
.dtors :
{
KEEP (*crtbegin.o(.dtors))
KEEP (*crtbegin?.o(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
}
.jcr : { KEEP (*(.jcr)) }
.data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
.dynamic : { *(.dynamic) }
.got : { *(.got) *(.igot) }
. = DATA_SEGMENT_RELRO_END (SIZEOF (.got.plt) >= 24 ? 24 : 0, .);
.got.plt : { *(.got.plt) *(.igot.plt) }
.data :
{
*(.data .data.* .gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
}
.data1 : { *(.data1) }
_edata = .; PROVIDE (edata = .);
. = .;
__bss_start = .;
.bss :
{
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
*(COMMON)
/* Align here to ensure that the .bss section occupies space up to
_end. Align after .bss to ensure correct alignment even if the
.bss section disappears because there are no input sections.
FIXME: Why do we need it? When there is no .bss section, we do not
pad the .data section. */
. = ALIGN(. != 0 ? 64 / 8 : 1);
}
.lbss :
{
*(.dynlbss)
*(.lbss .lbss.* .gnu.linkonce.lb.*)
*(LARGE_COMMON)
}
. = ALIGN(64 / 8);
. = SEGMENT_START("ldata-segment", .);
.lrodata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
{
*(.lrodata .lrodata.* .gnu.linkonce.lr.*)
}
.ldata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
{
*(.ldata .ldata.* .gnu.linkonce.l.*)
. = ALIGN(. != 0 ? 64 / 8 : 1);
}
. = ALIGN(64 / 8);
_end = .; PROVIDE (end = .);
. = DATA_SEGMENT_END (.);
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
.gnu.build.attributes : { *(.gnu.build.attributes .gnu.build.attributes.*) }
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to the beginning
of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/* DWARF 3 */
.debug_pubtypes 0 : { *(.debug_pubtypes) }
.debug_ranges 0 : { *(.debug_ranges) }
/* DWARF Extension. */
.debug_macro 0 : { *(.debug_macro) }
.debug_addr 0 : { *(.debug_addr) }
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
/DISCARD/ : {
*(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*)
/*
* XXX: As of now, ld.so links with relibc which has the main functionality. In the next refactor,
* ld.so will be moved out of relibc. So, till that time, we have to discard any sections
* that may reference use thread local storage.
*
* .init_array also depends on TLS and is discarded as we don't need it.
*/
*(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon)
*(.init_array)
}
}
/* Script for -z combreloc */
/* Copyright (C) 2014-2020 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64",
"elf64-x86-64")
OUTPUT_ARCH(i386:x86-64)
ENTRY(_start)
SEARCH_DIR("/x86_64-unknown-redox/lib");
SEARCH_DIR("/usr/local/lib64");
SEARCH_DIR("/lib64");
SEARCH_DIR("/usr/lib64");
SEARCH_DIR("/usr/local/lib");
SEARCH_DIR("/lib");
SEARCH_DIR("/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
PROVIDE (__executable_start = SEGMENT_START("text-segment", 0x400000)); . = SEGMENT_START("text-segment", 0x20000000) + SIZEOF_HEADERS;
.interp : { *(.interp) }
.note.gnu.build-id : { *(.note.gnu.build-id) }
.hash : { *(.hash) }
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.rela.dyn :
{
*(.rela.init)
*(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
*(.rela.fini)
*(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
*(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
*(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
*(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
*(.rela.ctors)
*(.rela.dtors)
*(.rela.got)
*(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
*(.rela.ldata .rela.ldata.* .rela.gnu.linkonce.l.*)
*(.rela.lbss .rela.lbss.* .rela.gnu.linkonce.lb.*)
*(.rela.lrodata .rela.lrodata.* .rela.gnu.linkonce.lr.*)
*(.rela.ifunc)
}
.rela.plt :
{
*(.rela.plt)
PROVIDE_HIDDEN (__rela_iplt_start = .);
*(.rela.iplt)
PROVIDE_HIDDEN (__rela_iplt_end = .);
}
. = ALIGN(CONSTANT (MAXPAGESIZE));
.init :
{
KEEP (*(SORT_NONE(.init)))
}
.plt : { *(.plt) *(.iplt) }
.plt.got : { *(.plt.got) }
.plt.sec : { *(.plt.sec) }
.text :
{
*(.text.unlikely .text.*_unlikely .text.unlikely.*)
*(.text.exit .text.exit.*)
*(.text.startup .text.startup.*)
*(.text.hot .text.hot.*)
*(.text .stub .text.* .gnu.linkonce.t.*)
/* .gnu.warning sections are handled specially by elf.em. */
*(.gnu.warning)
}
.fini :
{
KEEP (*(SORT_NONE(.fini)))
}
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
. = ALIGN(CONSTANT (MAXPAGESIZE));
/* Adjust the address for the rodata segment. We want to adjust up to
the same address within the page on the next page up. */
. = SEGMENT_START("rodata-segment", ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)));
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
.eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) }
.gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
.eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
.gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges*) }
/* Thread Local Storage sections */
/* .tdata : ALIGN(4K)
{
PROVIDE_HIDDEN (__tdata_start = .);
*(.tdata .tdata.* .gnu.linkonce.td.*)
}
.tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } */
.preinit_array :
{
PROVIDE_HIDDEN (__preinit_array_start = .);
KEEP (*(.preinit_array))
PROVIDE_HIDDEN (__preinit_array_end = .);
}
/* .init_array :
{
PROVIDE_HIDDEN (__init_array_start = .);
KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
PROVIDE_HIDDEN (__init_array_end = .);
} */
.fini_array :
{
PROVIDE_HIDDEN (__fini_array_start = .);
KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
PROVIDE_HIDDEN (__fini_array_end = .);
}
.ctors :
{
/* gcc uses crtbegin.o to find the start of
the constructors, so we make sure it is
first. Because this is a wildcard, it
doesn't matter if the user does not
actually link against crtbegin.o; the
linker won't look for a file to match a
wildcard. The wildcard also means that it
doesn't matter which directory crtbegin.o
is in. */
KEEP (*crtbegin.o(.ctors))
KEEP (*crtbegin?.o(.ctors))
/* We don't want to include the .ctor section from
the crtend.o file until after the sorted ctors.
The .ctor section from the crtend file contains the
end of ctors marker and it must be last */
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
}
.dtors :
{
KEEP (*crtbegin.o(.dtors))
KEEP (*crtbegin?.o(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
}
.jcr : { KEEP (*(.jcr)) }
.data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
.dynamic : { *(.dynamic) }
.got : { *(.got) *(.igot) }
. = DATA_SEGMENT_RELRO_END (SIZEOF (.got.plt) >= 24 ? 24 : 0, .);
.got.plt : { *(.got.plt) *(.igot.plt) }
.data :
{
*(.data .data.* .gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
}
.data1 : { *(.data1) }
_edata = .; PROVIDE (edata = .);
. = .;
__bss_start = .;
.bss :
{
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
*(COMMON)
/* Align here to ensure that the .bss section occupies space up to
_end. Align after .bss to ensure correct alignment even if the
.bss section disappears because there are no input sections.
FIXME: Why do we need it? When there is no .bss section, we do not
pad the .data section. */
. = ALIGN(. != 0 ? 64 / 8 : 1);
}
.lbss :
{
*(.dynlbss)
*(.lbss .lbss.* .gnu.linkonce.lb.*)
*(LARGE_COMMON)
}
. = ALIGN(64 / 8);
. = SEGMENT_START("ldata-segment", .);
.lrodata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
{
*(.lrodata .lrodata.* .gnu.linkonce.lr.*)
}
.ldata ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1)) :
{
*(.ldata .ldata.* .gnu.linkonce.l.*)
. = ALIGN(. != 0 ? 64 / 8 : 1);
}
. = ALIGN(64 / 8);
_end = .; PROVIDE (end = .);
. = DATA_SEGMENT_END (.);
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
.gnu.build.attributes : { *(.gnu.build.attributes .gnu.build.attributes.*) }
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to the beginning
of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/* DWARF 3 */
.debug_pubtypes 0 : { *(.debug_pubtypes) }
.debug_ranges 0 : { *(.debug_ranges) }
/* DWARF Extension. */
.debug_macro 0 : { *(.debug_macro) }
.debug_addr 0 : { *(.debug_addr) }
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
/DISCARD/ : {
*(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*)
/*
* XXX: As of now, ld.so links with relibc which has the main functionality. In the next refactor,
* ld.so will be moved out of relibc. So, till that time, we have to discard any sections
* that may reference use thread local storage.
*
* .init_array also depends on TLS and is discarded as we don't need it.
*/
*(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon)
*(.init_array)
}
}
#![no_std]
#![feature(linkage)]
use core::arch::global_asm;
#[cfg(target_arch = "aarch64")]
global_asm!(
"
.globl _start
_start:
mov x0, sp
and sp, x0, #0xfffffffffffffff0 //align sp
bl relibc_ld_so_start
# TODO: aarch64
udf #0
"
);
#[cfg(target_arch = "x86")]
global_asm!(
"
.globl _start
_start:
push esp
call relibc_ld_so_start
pop esp
# TODO: x86
ud2
"
);
#[cfg(target_arch = "x86_64")]
global_asm!(
"
.globl _start
_start:
# rsi = _start + 5
call 2f
2: pop rsi
# Save original stack and align stack to 16 bytes
mov rbp, rsp
and rsp, 0xFFFFFFFFFFFFFFF0
# Call ld_so_start(stack, entry)
mov rdi, rbp
sub rsi, 5
call relibc_ld_so_start
# Restore original stack, clear registers, and jump to new start function
mov rsp, rbp
xor rcx, rcx
xor rdx, rdx
xor rdi, rdi
xor rsi, rsi
xor r8, r8
xor r9, r9
xor r10, r10
xor r11, r11
fninit
jmp rax
"
);
#[cfg(target_arch = "riscv64")]
global_asm!(
"
.globl _start
_start:
mv a0, sp
jal relibc_ld_so_start
unimp
"
);
#[no_mangle]
pub unsafe extern "C" fn main(_argc: isize, _argv: *const *const i8) -> usize {
// LD
0x1D
}
#[linkage = "weak"]
#[no_mangle]
extern "C" fn relibc_panic(_pi: &::core::panic::PanicInfo) -> ! {
loop {}
}
#[panic_handler]
#[linkage = "weak"]
#[no_mangle]
pub unsafe fn rust_begin_unwind(pi: &::core::panic::PanicInfo) -> ! {
relibc_panic(pi)
}
Subproject commit b7b3b4bc31aa4ae42b2562e6adb151e92aa3a82a
Subproject commit 5992e5a1770452c8073cc0f2c238b5c798429668
Subproject commit 0d996efe5cfe7ce181af35d8817ac4deae644d4a
Subproject commit f9b8c35fe8a58c0216b06d24832408fe2d9c0206
[package]
name = "redox-rt"
authors = ["4lDO2 <4lDO2@protonmail.com>"]
version = "0.1.0"
edition = "2021"
license = "MIT"
description = "Libc-independent runtime for Redox"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitflags = "2"
goblin = { version = "0.7", default-features = false, features = ["elf32", "elf64", "endian_fd"] }
plain = "0.2"
redox_syscall = "0.5.8"
generic-rt = { path = "../generic-rt" }
use core::{mem::offset_of, ptr::NonNull};
use syscall::{data::*, error::*};
use crate::{
proc::{fork_inner, FdGuard},
signal::{inner_c, PosixStackt, RtSigarea, SigStack, PROC_CONTROL_STRUCT},
RtTcb, Tcb,
};
// Setup a stack starting from the very end of the address space, and then growing downwards.
pub(crate) const STACK_TOP: usize = 1 << 47;
pub(crate) const STACK_SIZE: usize = 1024 * 1024;
#[derive(Debug, Default)]
#[repr(C)]
pub struct SigArea {
pub altstack_top: usize,
pub altstack_bottom: usize,
pub tmp_x1_x2: [usize; 2],
pub tmp_x3_x4: [usize; 2],
pub tmp_x5_x6: [usize; 2],
pub tmp_sp: usize,
pub onstack: u64,
pub disable_signals_depth: u64,
pub pctl: usize, // TODO: remove
pub last_sig_was_restart: bool,
pub last_sigstack: Option<NonNull<SigStack>>,
pub tmp_rt_inf: RtSigInfo,
pub tmp_id_inf: u64,
}
#[repr(C)]
#[derive(Debug, Default)]
pub struct ArchIntRegs {
pub x30: usize,
pub x29: usize,
pub x28: usize,
pub x27: usize,
pub x26: usize,
pub x25: usize,
pub x24: usize,
pub x23: usize,
pub x22: usize,
pub x21: usize,
pub x20: usize,
pub x19: usize,
pub x18: usize,
pub x17: usize,
pub x16: usize,
pub x15: usize,
pub x14: usize,
pub x13: usize,
pub x12: usize,
pub x11: usize,
pub x10: usize,
pub x9: usize,
pub x8: usize,
pub x7: usize,
pub x6: usize,
pub x5: usize,
pub x4: usize,
pub x3: usize,
pub x2: usize,
pub x1: usize,
pub sp: usize,
pub nzcv: usize, // user-accessible PSTATE bits
pub pc: usize,
pub x0: usize,
}
/// Deactive TLS, used before exec() on Redox to not trick target executable into thinking TLS
/// is already initialized as if it was a thread.
pub unsafe fn deactivate_tcb(open_via_dup: usize) -> Result<()> {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(syscall::dup(open_via_dup, b"regs/env")?);
env.tpidr_el0 = 0;
let _ = syscall::write(*file, &mut env)?;
Ok(())
}
pub fn copy_env_regs(cur_pid_fd: usize, new_pid_fd: usize) -> Result<()> {
// Copy environment registers.
{
let cur_env_regs_fd = FdGuard::new(syscall::dup(cur_pid_fd, b"regs/env")?);
let new_env_regs_fd = FdGuard::new(syscall::dup(new_pid_fd, b"regs/env")?);
let mut env_regs = syscall::EnvRegisters::default();
let _ = syscall::read(*cur_env_regs_fd, &mut env_regs)?;
let _ = syscall::write(*new_env_regs_fd, &env_regs)?;
}
Ok(())
}
unsafe extern "C" fn fork_impl(initial_rsp: *mut usize) -> usize {
Error::mux(fork_inner(initial_rsp))
}
unsafe extern "C" fn child_hook(cur_filetable_fd: usize, new_pid_fd: usize) {
let _ = syscall::close(cur_filetable_fd);
// TODO: Currently pidfd == threadfd, but this will not be the case later.
RtTcb::current()
.thr_fd
.get()
.write(Some(FdGuard::new(new_pid_fd)));
}
asmfunction!(__relibc_internal_fork_wrapper -> usize: ["
stp x29, x30, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x19, x20, [sp, #-16]!
sub sp, sp, #32
//TODO: store floating point regs
mov x0, sp
bl {fork_impl}
add sp, sp, #32
ldp x19, x20, [sp], #16
ldp x21, x22, [sp], #16
ldp x23, x24, [sp], #16
ldp x25, x26, [sp], #16
ldp x27, x28, [sp], #16
ldp x29, x30, [sp], #16
ret
"] <= [fork_impl = sym fork_impl]);
asmfunction!(__relibc_internal_fork_ret: ["
ldp x0, x1, [sp]
bl {child_hook}
//TODO: load floating point regs
mov x0, xzr
add sp, sp, #32
ldp x19, x20, [sp], #16
ldp x21, x22, [sp], #16
ldp x23, x24, [sp], #16
ldp x25, x26, [sp], #16
ldp x27, x28, [sp], #16
ldp x29, x30, [sp], #16
ret
"] <= [child_hook = sym child_hook]);
// https://devblogs.microsoft.com/oldnewthing/20220811-00/?p=106963
asmfunction!(__relibc_internal_sigentry: ["
// Clear any active reservation.
clrex
// The old pc and x0 are saved in the sigcontrol struct.
mrs x0, tpidr_el0 // ABI ptr
ldr x0, [x0] // TCB ptr
// Save x1-x6 and sp
stp x1, x2, [x0, #{tcb_sa_off} + {sa_tmp_x1_x2}]
stp x3, x4, [x0, #{tcb_sa_off} + {sa_tmp_x3_x4}]
stp x5, x6, [x0, #{tcb_sa_off} + {sa_tmp_x5_x6}]
mov x1, sp
str x1, [x0, #{tcb_sa_off} + {sa_tmp_sp}]
ldr x6, [x0, #{tcb_sa_off} + {sa_pctl}]
1:
// Load x1 with the thread's bits
add x5, x0, #{tcb_sc_off} + {sc_word}
ldaxr x1, [x5]
// First check if there are standard thread signals,
and x4, x1, x1, lsr #32 // x4 := x1 & (x1 >> 32)
cbnz x4, 3f // jump if x4 != 0
clrex
// and if not, load process pending bitset.
add x5, x6, #{pctl_pending}
ldaxr x2, [x5]
// Check if there are standard proc signals:
lsr x3, x1, #32 // mask
and w3, w2, w3 // pending unblocked proc
cbz w3, 4f // skip 'fetch_andn' step if zero
// If there was one, find which one, and try clearing the bit (last value in x3, addr in x6)
// this picks the MSB rather than the LSB, unlike x86. POSIX does not require any specific
// ordering though.
clz w3, w3
mov w4, #31
sub w3, w4, w3
// x3 now contains the sig_idx
mov x4, #1
lsl x4, x4, x3 // bit to remove
sub x4, x2, x4 // bit was certainly set, so sub is allowed
// x4 is now the new mask to be set
add x5, x6, #{pctl_pending}
add x2, x5, #{pctl_sender_infos}
add x2, x2, w3, uxtb 3
ldar x2, [x2]
// Try clearing the bit, retrying on failure.
stxr w1, x4, [x5] // try setting pending set to x4, set w1 := 0 on success
cbnz w1, 1b // retry everything if this fails
mov x1, x3
b 2f
4:
// Check for realtime signals, thread/proc.
clrex
// Load the pending set again. TODO: optimize this?
add x1, x6, #{pctl_pending}
ldaxr x2, [x1]
lsr x2, x2, #32
add x5, x0, #{tcb_sc_off} + {sc_word} + 8
ldar x1, [x5]
orr x2, x1, x2
and x2, x2, x2, lsr #32
cbz x2, 7f
rbit x3, x2
clz x3, x3
mov x4, #31
sub x2, x4, x3
// x2 now contains sig_idx - 32
// If realtime signal was directed at thread, handle it as an idempotent signal.
lsr x3, x1, x2
tbnz x3, #0, 5f
mov x5, x0
mov x4, x8
mov x8, #{SYS_SIGDEQUEUE}
mov x0, x1
add x1, x0, #{tcb_sa_off} + {sa_tmp_rt_inf}
svc 0
mov x0, x5
mov x8, x4
cbnz x0, 1b
b 2f
5:
// A realtime signal was sent to this thread, try clearing its bit.
// x3 contains last rt signal word, x2 contains rt_idx
clrex
// Calculate the absolute sig_idx
add x1, x3, 32
// Load si_pid and si_uid
add x2, x0, #{tcb_sc_off} + {sc_sender_infos}
add x2, x2, w1, uxtb #3
ldar x2, [x2]
add x3, x0, #{tcb_sc_off} + {sc_word} + 8
ldxr x2, [x3]
// Calculate new mask
mov x4, #1
lsl x4, x4, x2
sub x2, x2, x4 // remove bit
stxr w5, x2, [x3]
cbnz w5, 1b
str x2, [x0, #{tcb_sa_off} + {sa_tmp_id_inf}]
b 2f
3:
// A standard signal was sent to this thread, try clearing its bit.
clz x1, x1
mov x2, #31
sub x1, x2, x1
// Load si_pid and si_uid
add x2, x0, #{tcb_sc_off} + {sc_sender_infos}
add x2, x2, w1, uxtb #3
ldar x2, [x2]
// Clear bit from mask
mov x3, #1
lsl x3, x3, x1
sub x4, x4, x3
// Try updating the mask
stxr w3, x1, [x5]
cbnz w3, 1b
str x2, [x0, #{tcb_sa_off} + {sa_tmp_id_inf}]
2:
ldr x3, [x0, #{tcb_sa_off} + {sa_pctl}]
add x2, x2, {pctl_actions}
add x2, x3, w1, uxtb #4 // actions_base + sig_idx * sizeof Action
// TODO: NOT ATOMIC (tearing allowed between regs)!
ldxp x2, x3, [x2]
clrex
// Calculate new sp wrt redzone and alignment
mov x4, sp
sub x4, x4, {REDZONE_SIZE}
and x4, x4, -{STACK_ALIGN}
mov sp, x4
// skip sigaltstack step if SA_ONSTACK is clear
// tbz x2, #{SA_ONSTACK_BIT}, 2f
ldr x2, [x0, #{tcb_sc_off} + {sc_saved_pc}]
ldr x3, [x0, #{tcb_sc_off} + {sc_saved_x0}]
stp x2, x3, [sp, #-16]!
ldr x2, [x0, #{tcb_sa_off} + {sa_tmp_sp}]
mrs x3, nzcv
stp x2, x3, [sp, #-16]!
ldp x2, x3, [x0, #{tcb_sa_off} + {sa_tmp_x1_x2}]
stp x2, x3, [sp, #-16]!
ldp x3, x4, [x0, #{tcb_sa_off} + {sa_tmp_x3_x4}]
stp x4, x3, [sp, #-16]!
ldp x5, x6, [x0, #{tcb_sa_off} + {sa_tmp_x5_x6}]
stp x6, x5, [sp, #-16]!
stp x8, x7, [sp, #-16]!
stp x10, x9, [sp, #-16]!
stp x12, x11, [sp, #-16]!
stp x14, x13, [sp, #-16]!
stp x16, x15, [sp, #-16]!
stp x18, x17, [sp, #-16]!
stp x20, x19, [sp, #-16]!
stp x22, x21, [sp, #-16]!
stp x24, x23, [sp, #-16]!
stp x26, x25, [sp, #-16]!
stp x28, x27, [sp, #-16]!
stp x30, x29, [sp, #-16]!
str w1, [sp, #-4]
sub sp, sp, #64
mov x0, sp
bl {inner}
add sp, sp, #64
ldp x30, x29, [sp], #16
ldp x28, x27, [sp], #16
ldp x26, x25, [sp], #16
ldp x24, x23, [sp], #16
ldp x22, x21, [sp], #16
ldp x20, x19, [sp], #16
ldp x18, x17, [sp], #16
ldp x16, x15, [sp], #16
ldp x14, x13, [sp], #16
ldp x12, x11, [sp], #16
ldp x10, x9, [sp], #16
ldp x8, x7, [sp], #16
ldp x6, x5, [sp], #16
ldp x4, x3, [sp], #16
ldp x2, x1, [sp], #16
ldr x0, [sp, #8]
msr nzcv, x0
8:
// x18 is reserved by ABI as 'platform register', so clobbering it should be safe.
mov x18, sp
ldr x0, [x18]
mov sp, x0
ldp x18, x0, [x18, #16]
br x18
7:
// Spurious signal, i.e. all bitsets were 0 at the time they were checked
clrex
ldr x1, [x0, #{tcb_sc_off} + {sc_flags}]
and x1, x1, ~1
str x1, [x0, #{tcb_sc_off} + {sc_flags}]
ldp x1, x2, [x0, #{tcb_sa_off} + {sa_tmp_x1_x2}]
ldp x3, x4, [x0, #{tcb_sa_off} + {sa_tmp_x3_x4}]
ldp x5, x6, [x0, #{tcb_sa_off} + {sa_tmp_x5_x6}]
ldr x18, [x0, #{tcb_sc_off} + {sc_saved_pc}]
ldr x0, [x0, #{tcb_sc_off} + {sc_saved_x0}]
br x18
"] <= [
pctl_pending = const (offset_of!(SigProcControl, pending)),
pctl_actions = const (offset_of!(SigProcControl, actions)),
pctl_sender_infos = const (offset_of!(SigProcControl, sender_infos)),
tcb_sc_off = const (offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control)),
tcb_sa_off = const (offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, arch)),
sa_tmp_x1_x2 = const offset_of!(SigArea, tmp_x1_x2),
sa_tmp_x3_x4 = const offset_of!(SigArea, tmp_x3_x4),
sa_tmp_x5_x6 = const offset_of!(SigArea, tmp_x5_x6),
sa_tmp_sp = const offset_of!(SigArea, tmp_sp),
sa_tmp_rt_inf = const offset_of!(SigArea, tmp_rt_inf),
sa_tmp_id_inf = const offset_of!(SigArea, tmp_id_inf),
sa_pctl = const offset_of!(SigArea, pctl),
sc_saved_pc = const offset_of!(Sigcontrol, saved_ip),
sc_saved_x0 = const offset_of!(Sigcontrol, saved_archdep_reg),
sc_sender_infos = const offset_of!(Sigcontrol, sender_infos),
sc_word = const offset_of!(Sigcontrol, word),
sc_flags = const offset_of!(Sigcontrol, control_flags),
inner = sym inner_c,
SA_ONSTACK_BIT = const 58, // (1 << 58) >> 32 = 0x0400_0000
SYS_SIGDEQUEUE = const syscall::SYS_SIGDEQUEUE,
STACK_ALIGN = const 16,
REDZONE_SIZE = const 128,
]);
asmfunction!(__relibc_internal_rlct_clone_ret: ["
# Load registers
ldp x8, x0, [sp], #16
ldp x1, x2, [sp], #16
ldp x3, x4, [sp], #16
# Call entry point
blr x8
ret
"] <= []);
pub fn current_sp() -> usize {
let sp: usize;
unsafe {
core::arch::asm!("mov {}, sp", out(reg) sp);
}
sp
}
pub unsafe fn manually_enter_trampoline() {
let ctl = &Tcb::current().unwrap().os_specific.control;
ctl.saved_archdep_reg.set(0);
let ip_location = &ctl.saved_ip as *const _ as usize;
core::arch::asm!("
bl 2f
b 3f
2:
str lr, [x0]
b __relibc_internal_sigentry
3:
", inout("x0") ip_location => _, out("lr") _);
}
pub unsafe fn arch_pre(stack: &mut SigStack, os: &mut SigArea) -> PosixStackt {
PosixStackt {
sp: core::ptr::null_mut(), // TODO
size: 0, // TODO
flags: 0, // TODO
}
}
use core::{mem::offset_of, ptr::NonNull, sync::atomic::Ordering};
use syscall::*;
use crate::{
proc::{fork_inner, FdGuard},
signal::{inner_fastcall, PosixStackt, RtSigarea, SigStack, PROC_CONTROL_STRUCT},
RtTcb,
};
// Setup a stack starting from the very end of the address space, and then growing downwards.
pub(crate) const STACK_TOP: usize = 1 << 31;
pub(crate) const STACK_SIZE: usize = 1024 * 1024;
#[derive(Debug, Default)]
#[repr(C)]
pub struct SigArea {
pub altstack_top: usize,
pub altstack_bottom: usize,
pub tmp_eip: usize,
pub tmp_esp: usize,
pub tmp_eax: usize,
pub tmp_ecx: usize,
pub tmp_edx: usize,
pub tmp_rt_inf: RtSigInfo,
pub tmp_id_inf: u64,
pub tmp_mm0: u64,
pub pctl: usize, // TODO: reference pctl directly
pub disable_signals_depth: u64,
pub last_sig_was_restart: bool,
pub last_sigstack: Option<NonNull<SigStack>>,
}
#[derive(Debug, Default)]
#[repr(C, align(16))]
pub struct ArchIntRegs {
pub fxsave: [u16; 29],
// ensure fxsave region is 16 byte aligned
pub _pad: [usize; 2], // fxsave "available" +0
pub ebp: usize, // fxsave "available" +8
pub esi: usize, // avail +12
pub edi: usize, // avail +16
pub ebx: usize, // avail +20
pub eax: usize, // avail +24
pub ecx: usize, // avail +28
pub edx: usize, // avail +32
pub eflags: usize, // avail +36
pub eip: usize, // avail +40
pub esp: usize, // avail +44
}
/// Deactive TLS, used before exec() on Redox to not trick target executable into thinking TLS
/// is already initialized as if it was a thread.
pub unsafe fn deactivate_tcb(open_via_dup: usize) -> Result<()> {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(syscall::dup(open_via_dup, b"regs/env")?);
env.fsbase = 0;
env.gsbase = 0;
let _ = syscall::write(*file, &mut env)?;
Ok(())
}
pub fn copy_env_regs(cur_pid_fd: usize, new_pid_fd: usize) -> Result<()> {
// Copy environment registers.
{
let cur_env_regs_fd = FdGuard::new(syscall::dup(cur_pid_fd, b"regs/env")?);
let new_env_regs_fd = FdGuard::new(syscall::dup(new_pid_fd, b"regs/env")?);
let mut env_regs = syscall::EnvRegisters::default();
let _ = syscall::read(*cur_env_regs_fd, &mut env_regs)?;
let _ = syscall::write(*new_env_regs_fd, &env_regs)?;
}
Ok(())
}
unsafe extern "cdecl" fn fork_impl(initial_rsp: *mut usize) -> usize {
Error::mux(fork_inner(initial_rsp))
}
unsafe extern "cdecl" fn child_hook(cur_filetable_fd: usize, new_pid_fd: usize) {
let _ = syscall::close(cur_filetable_fd);
crate::child_hook_common(FdGuard::new(new_pid_fd));
}
asmfunction!(__relibc_internal_fork_wrapper -> usize: ["
push ebp
mov ebp, esp
// Push preserved registers
push ebx
push edi
push esi
push ebp
sub esp, 32
//TODO stmxcsr [esp+16]
fnstcw [esp+24]
push esp
call {fork_impl}
pop esp
jmp 2f
"] <= [fork_impl = sym fork_impl]);
asmfunction!(__relibc_internal_fork_ret: ["
// Arguments already on the stack
call {child_hook}
//TODO ldmxcsr [esp+16]
fldcw [esp+24]
xor eax, eax
.p2align 4
2:
add esp, 32
// Pop preserved registers
pop ebp
pop esi
pop edi
pop ebx
pop ebp
ret
"] <= [child_hook = sym child_hook]);
asmfunction!(__relibc_internal_sigentry: ["
// Save some registers
mov gs:[{tcb_sa_off} + {sa_tmp_esp}], esp
mov gs:[{tcb_sa_off} + {sa_tmp_eax}], eax
mov gs:[{tcb_sa_off} + {sa_tmp_edx}], edx
mov gs:[{tcb_sa_off} + {sa_tmp_ecx}], ecx
1:
// Read standard signal word - first for this thread
mov edx, gs:[{tcb_sc_off} + {sc_word} + 4]
mov eax, gs:[{tcb_sc_off} + {sc_word}]
and eax, edx
bsf eax, eax
jnz 9f
mov ecx, gs:[{tcb_sa_off} + {sa_pctl}]
// Read standard signal word - for the process
mov eax, [ecx + {pctl_pending}]
and eax, edx
bsf eax, eax
jz 3f
// Read si_pid and si_uid, atomically.
movq gs:[{tcb_sa_off} + {sa_tmp_mm0}], mm0
movq mm0, [ecx + {pctl_sender_infos} + eax * 8]
movq gs:[{tcb_sa_off} + {sa_tmp_id_inf}], mm0
movq mm0, gs:[{tcb_sa_off} + {sa_tmp_mm0}]
// Try clearing the pending bit, otherwise retry if another thread did that first
lock btr [ecx + {pctl_pending}], eax
jnc 1b
jmp 2f
3:
// Read realtime thread and process signal word together
mov edx, [ecx + {pctl_pending} + 4]
mov eax, gs:[{tcb_sc_off} + {sc_word} + 8]
or eax, edx
and eax, gs:[{tcb_sc_off} + {sc_word} + 12]
jz 7f // spurious signal
bsf eax, eax
// If thread was specifically targeted, send the signal to it first.
bt edx, eax
jc 8f
mov edx, ebx
lea ecx, [eax+32]
mov eax, {SYS_SIGDEQUEUE}
mov edx, gs:[0]
add edx, {tcb_sa_off} + {sa_tmp_rt_inf}
int 0x80
mov ebx, edx
test eax, eax
jnz 1b
mov eax, ecx
jmp 2f
8:
add eax, 32
9:
// Read si_pid and si_uid, atomically.
movq gs:[{tcb_sa_off} + {sa_tmp_mm0}], mm0
movq mm0, gs:[{tcb_sc_off} + {sc_sender_infos} + eax * 8]
movq gs:[{tcb_sa_off} + {sa_tmp_id_inf}], mm0
movq mm0, gs:[{tcb_sa_off} + {sa_tmp_mm0}]
mov edx, eax
shr edx, 5
mov ecx, eax
and ecx, 31
lock btr gs:[{tcb_sc_off} + {sc_word} + edx * 8], ecx
add eax, 64
2:
and esp, -{STACK_ALIGN}
mov edx, eax
add edx, edx
bt dword ptr [{pctl} + {pctl_actions} + edx * 8 + 4], 28
jnc 4f
mov edx, gs:[{tcb_sa_off} + {sa_altstack_top}]
cmp esp, edx
ja 3f
cmp esp, gs:[{tcb_sa_off} + {sa_altstack_bottom}]
jnbe 4f
3:
mov esp, edx
4:
// Now that we have a stack, we can finally start populating the signal stack.
push dword ptr gs:[{tcb_sa_off} + {sa_tmp_esp}]
push dword ptr gs:[{tcb_sc_off} + {sc_saved_eip}]
push dword ptr gs:[{tcb_sc_off} + {sc_saved_eflags}]
push dword ptr gs:[{tcb_sa_off} + {sa_tmp_edx}]
push dword ptr gs:[{tcb_sa_off} + {sa_tmp_ecx}]
push dword ptr gs:[{tcb_sa_off} + {sa_tmp_eax}]
push ebx
push edi
push esi
push ebp
sub esp, 2 * 4 + 29 * 16
fxsave [esp]
mov [esp - 4], eax
sub esp, 48
mov ecx, esp
call {inner}
fxrstor [esp + 48]
add esp, 48 + 29 * 16 + 2 * 4
pop ebp
pop esi
pop edi
pop ebx
pop eax
pop ecx
pop edx
popfd
pop dword ptr gs:[{tcb_sa_off} + {sa_tmp_eip}]
.globl __relibc_internal_sigentry_crit_first
__relibc_internal_sigentry_crit_first:
pop esp
.globl __relibc_internal_sigentry_crit_second
__relibc_internal_sigentry_crit_second:
jmp dword ptr gs:[{tcb_sa_off} + {sa_tmp_eip}]
7:
mov eax, gs:[0]
lea esp, [eax + {tcb_sc_off} + {sc_saved_eflags}]
popfd
mov esp, gs:[{tcb_sa_off} + {sa_tmp_esp}]
mov eax, gs:[{tcb_sc_off} + {sc_saved_eip}]
mov gs:[{tcb_sa_off} + {sa_tmp_eip}], eax
mov eax, gs:[{tcb_sa_off} + {sa_tmp_eax}]
mov ecx, gs:[{tcb_sa_off} + {sa_tmp_ecx}]
mov edx, gs:[{tcb_sa_off} + {sa_tmp_edx}]
and dword ptr gs:[{tcb_sc_off} + {sc_control}], ~1
.globl __relibc_internal_sigentry_crit_third
__relibc_internal_sigentry_crit_third:
jmp dword ptr gs:[{tcb_sa_off} + {sa_tmp_eip}]
"] <= [
inner = sym inner_fastcall,
sa_tmp_eip = const offset_of!(SigArea, tmp_eip),
sa_tmp_esp = const offset_of!(SigArea, tmp_esp),
sa_tmp_eax = const offset_of!(SigArea, tmp_eax),
sa_tmp_ecx = const offset_of!(SigArea, tmp_ecx),
sa_tmp_edx = const offset_of!(SigArea, tmp_edx),
sa_tmp_mm0 = const offset_of!(SigArea, tmp_mm0),
sa_tmp_rt_inf = const offset_of!(SigArea, tmp_rt_inf),
sa_tmp_id_inf = const offset_of!(SigArea, tmp_id_inf),
sa_altstack_top = const offset_of!(SigArea, altstack_top),
sa_altstack_bottom = const offset_of!(SigArea, altstack_bottom),
sa_pctl = const offset_of!(SigArea, pctl),
sc_control = const offset_of!(Sigcontrol, control_flags),
sc_saved_eflags = const offset_of!(Sigcontrol, saved_archdep_reg),
sc_saved_eip = const offset_of!(Sigcontrol, saved_ip),
sc_word = const offset_of!(Sigcontrol, word),
sc_sender_infos = const offset_of!(Sigcontrol, sender_infos),
tcb_sa_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, arch),
tcb_sc_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control),
pctl_actions = const offset_of!(SigProcControl, actions),
pctl_sender_infos = const offset_of!(SigProcControl, sender_infos),
pctl_pending = const offset_of!(SigProcControl, pending),
pctl = sym PROC_CONTROL_STRUCT,
STACK_ALIGN = const 16,
SYS_SIGDEQUEUE = const syscall::SYS_SIGDEQUEUE,
]);
asmfunction!(__relibc_internal_rlct_clone_ret -> usize: ["
# Load registers
pop eax
sub esp, 8
mov DWORD PTR [esp], 0x00001F80
# TODO: ldmxcsr [esp]
mov WORD PTR [esp], 0x037F
fldcw [esp]
add esp, 8
# Call entry point
call eax
ret
"] <= []);
extern "C" {
fn __relibc_internal_sigentry_crit_first();
fn __relibc_internal_sigentry_crit_second();
fn __relibc_internal_sigentry_crit_third();
}
pub unsafe fn arch_pre(stack: &mut SigStack, area: &mut SigArea) -> PosixStackt {
if stack.regs.eip == __relibc_internal_sigentry_crit_first as usize {
let stack_ptr = stack.regs.esp as *const usize;
stack.regs.esp = stack_ptr.read();
stack.regs.eip = stack_ptr.sub(1).read();
} else if stack.regs.eip == __relibc_internal_sigentry_crit_second as usize
|| stack.regs.eip == __relibc_internal_sigentry_crit_third as usize
{
stack.regs.eip = area.tmp_eip;
}
PosixStackt {
sp: stack.regs.esp as *mut (),
size: 0, // TODO
flags: 0, // TODO
}
}
#[no_mangle]
pub unsafe fn manually_enter_trampoline() {
let c = &crate::Tcb::current().unwrap().os_specific.control;
c.control_flags.store(
c.control_flags.load(Ordering::Relaxed) | syscall::flag::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
c.saved_archdep_reg.set(0); // TODO: Just reset DF on x86?
core::arch::asm!("
call 2f
jmp 3f
2:
pop dword ptr gs:[{tcb_sc_off} + {sc_saved_eip}]
jmp __relibc_internal_sigentry
3:
",
tcb_sc_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control),
sc_saved_eip = const offset_of!(Sigcontrol, saved_ip),
);
}
/// Get current stack pointer, weak granularity guarantees.
pub fn current_sp() -> usize {
let sp: usize;
unsafe {
core::arch::asm!("mov {}, esp", out(reg) sp);
}
sp
}
#[cfg(target_arch = "aarch64")]
pub use self::aarch64::*;
#[cfg(target_arch = "aarch64")]
pub mod aarch64;
#[cfg(target_arch = "x86")]
pub use self::i686::*;
#[cfg(target_arch = "x86")]
pub mod i686;
#[cfg(target_arch = "x86_64")]
pub use self::x86_64::*;
#[cfg(target_arch = "x86_64")]
pub mod x86_64;
#[cfg(target_arch = "riscv64")]
pub use self::riscv64::*;
#[cfg(target_arch = "riscv64")]
pub mod riscv64;
use crate::{
proc::{fork_inner, FdGuard},
signal::{get_sigaltstack, inner_c, PosixStackt, RtSigarea, SigStack},
RtTcb, Tcb,
};
use core::{mem::offset_of, ptr::NonNull, sync::atomic::Ordering};
use syscall::{data::*, error::*};
// Setup a stack starting from the very end of the address space, and then growing downwards.
pub(crate) const STACK_TOP: usize = 1 << 47;
pub(crate) const STACK_SIZE: usize = 1024 * 1024;
#[derive(Debug, Default)]
#[repr(C)]
pub struct SigArea {
pub tmp_sp: u64,
pub tmp_t1: u64,
pub tmp_t2: u64,
pub tmp_t3: u64,
pub tmp_t4: u64,
pub tmp_a0: u64,
pub tmp_a1: u64,
pub tmp_a2: u64,
pub tmp_a7: u64,
pub pctl: usize, // TODO: remove
pub tmp_ip: u64,
pub tmp_rt_inf: RtSigInfo,
pub tmp_id_inf: u64,
pub altstack_top: usize,
pub altstack_bottom: usize,
pub disable_signals_depth: u64,
pub last_sig_was_restart: bool,
pub last_sigstack: Option<NonNull<SigStack>>,
}
#[repr(C)]
#[derive(Debug, Default)]
pub struct ArchIntRegs {
pub int_regs: [u64; 31],
pub pc: u64,
pub fp_regs: [u64; 32],
pub fcsr: u32,
_pad: u32,
}
/// Deactive TLS, used before exec() on Redox to not trick target executable into thinking TLS
/// is already initialized as if it was a thread.
pub unsafe fn deactivate_tcb(open_via_dup: usize) -> Result<()> {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(syscall::dup(open_via_dup, b"regs/env")?);
env.tp = 0;
let _ = syscall::write(*file, &mut env)?;
Ok(())
}
pub fn copy_env_regs(cur_pid_fd: usize, new_pid_fd: usize) -> Result<()> {
// Copy environment registers.
{
let cur_env_regs_fd = FdGuard::new(syscall::dup(cur_pid_fd, b"regs/env")?);
let new_env_regs_fd = FdGuard::new(syscall::dup(new_pid_fd, b"regs/env")?);
let mut env_regs = syscall::EnvRegisters::default();
let _ = syscall::read(*cur_env_regs_fd, &mut env_regs)?;
let _ = syscall::write(*new_env_regs_fd, &env_regs)?;
}
Ok(())
}
unsafe extern "C" fn fork_impl(initial_rsp: *mut usize) -> usize {
Error::mux(fork_inner(initial_rsp))
}
unsafe extern "C" fn child_hook(cur_filetable_fd: usize, new_pid_fd: usize) {
let _ = syscall::close(cur_filetable_fd);
crate::child_hook_common(FdGuard::new(new_pid_fd));
}
asmfunction!(__relibc_internal_fork_wrapper -> usize: ["
.attribute arch, \"rv64gc\" # rust bug 80608
addi sp, sp, -200
sd s0, 0(sp)
sd s1, 8(sp)
sd s2, 16(sp)
sd s3, 24(sp)
sd s4, 32(sp)
sd s5, 40(sp)
sd s6, 48(sp)
sd s7, 56(sp)
sd s8, 64(sp)
sd s9, 72(sp)
sd s10, 80(sp)
sd s11, 88(sp)
sd ra, 96(sp)
fsd fs0, 104(sp)
fsd fs1, 112(sp)
fsd fs2, 120(sp)
fsd fs3, 128(sp)
fsd fs4, 136(sp)
fsd fs5, 144(sp)
fsd fs6, 152(sp)
fsd fs7, 160(sp)
fsd fs8, 168(sp)
fsd fs9, 176(sp)
fsd fs10, 184(sp)
fsd fs11, 192(sp)
addi sp, sp, -32
mv a0, sp
jal {fork_impl}
addi sp, sp, 32
ld s0, 0(sp)
ld s1, 8(sp)
ld s2, 16(sp)
ld s3, 24(sp)
ld s4, 32(sp)
ld s5, 40(sp)
ld s6, 48(sp)
ld s7, 56(sp)
ld s8, 64(sp)
ld s9, 72(sp)
ld s10, 80(sp)
ld s11, 88(sp)
ld ra, 96(sp)
fld fs0, 104(sp)
fld fs1, 112(sp)
fld fs2, 120(sp)
fld fs3, 128(sp)
fld fs4, 136(sp)
fld fs5, 144(sp)
fld fs6, 152(sp)
fld fs7, 160(sp)
fld fs8, 168(sp)
fld fs9, 176(sp)
fld fs10, 184(sp)
fld fs11, 192(sp)
addi sp, sp, 200
ret
"] <= [fork_impl = sym fork_impl]);
asmfunction!(__relibc_internal_fork_ret: ["
.attribute arch, \"rv64gc\" # rust bug 80608
ld a0, 0(sp)
ld a1, 8(sp)
jal {child_hook}
mv a0, x0
addi sp, sp, 32
ld s0, 0(sp)
ld s1, 8(sp)
ld s2, 16(sp)
ld s3, 24(sp)
ld s4, 32(sp)
ld s5, 40(sp)
ld s6, 48(sp)
ld s7, 56(sp)
ld s8, 64(sp)
ld s9, 72(sp)
ld s10, 80(sp)
ld s11, 88(sp)
ld ra, 96(sp)
fld fs0, 104(sp)
fld fs1, 112(sp)
fld fs2, 120(sp)
fld fs3, 128(sp)
fld fs4, 136(sp)
fld fs5, 144(sp)
fld fs6, 152(sp)
fld fs7, 160(sp)
fld fs8, 168(sp)
fld fs9, 176(sp)
fld fs10, 184(sp)
fld fs11, 192(sp)
addi sp, sp, 200
ret
"] <= [child_hook = sym child_hook]);
asmfunction!(__relibc_internal_sigentry: ["
.attribute arch, \"rv64gc\" # rust bug 80608
// Save some registers
ld t0, -8(tp) // Tcb
sd sp, ({tcb_sa_off} + {sa_tmp_sp})(t0)
sd t1, ({tcb_sa_off} + {sa_tmp_t1})(t0)
sd t2, ({tcb_sa_off} + {sa_tmp_t2})(t0)
sd t3, ({tcb_sa_off} + {sa_tmp_t3})(t0)
sd t4, ({tcb_sa_off} + {sa_tmp_t4})(t0)
ld t4, ({tcb_sa_off} + {sa_off_pctl})(t0)
// First, select signal, always pick first available bit
99:
// Read first signal word
ld t1, ({tcb_sc_off} + {sc_word})(t0)
srli t2, t1, 32 // bitset to low word
and t1, t1, t2 // masked bitset in low word
beqz t1, 3f
// Found in first thread signal word
mv t3, x0
2: andi t2, t1, 1
bnez t2, 10f
addi t3, t3, 1
srli t1, t1, 1
j 2b
// If no unblocked thread signal was found, check for process.
// This is competitive; we need to atomically check if *we* cleared the process-wide pending
// bit, otherwise restart.
3: lw t1, {pctl_off_pending}(t4)
and t1, t1, t2
beqz t1, 3f
// Found in first process signal word
li t3, -1
2: andi t2, t1, 1
addi t3, t3, 1
srli t1, t1, 1
beqz t2, 2b
slli t1, t3, 3 // * 8 == size_of SenderInfo
add t1, t1, t4
ld t1, {pctl_off_sender_infos}(t1)
sd t1, ({tcb_sa_off} + {sa_tmp_id_inf})(t0)
li t1, 1
sll t1, t1, t3
not t1, t1
addi t2, t4, {pctl_off_pending}
amoand.w.aq t2, t1, (t2)
and t1, t1, t2
bne t1, t2, 9f
3:
// Read second signal word - both process and thread simultaneously.
// This must be done since POSIX requires low realtime signals to be picked first.
ld t1, ({tcb_sc_off} + {sc_word} + 8)(t0)
lw t2, ({pctl_off_pending} + 4)(t4)
or t4, t1, t2
srli t2, t1, 32
and t4, t2, t4
beqz t4, 7f
li t3, -1
2: andi t2, t4, 1
addi t3, t3, 1
srli t4, t4, 1
beqz t2, 2b
li t2, 1
sll t2, t2, t3
and t1, t1, t2
addi t3, t3, 32
bnez t1, 10f // thread signal
// otherwise, try (competitively) dequeueing realtime signal
sd a0, ({tcb_sa_off} + {sa_tmp_a0})(t0)
sd a1, ({tcb_sa_off} + {sa_tmp_a1})(t0)
sd a2, ({tcb_sa_off} + {sa_tmp_a2})(t0)
sd a7, ({tcb_sa_off} + {sa_tmp_a7})(t0)
li a0, {SYS_SIGDEQUEUE}
addi a1, t3, -32
add a2, t0, {tcb_sa_off} + {sa_tmp_rt_inf} // out pointer of dequeued realtime sig
ecall
bnez a0, 99b // assumes error can only be EAGAIN
j 9f
10: // thread signal. t3 holds signal number
srli t1, t3, 5
bnez t1, 2f // FIXME senderinfo?
sll t2, t3, 3 // * 8 == size_of SenderInfo
add t2, t2, t0
ld t2, ({tcb_sc_off} + {sc_sender_infos})(t2)
sd t2, ({tcb_sa_off} + {sa_tmp_id_inf})(t0)
2: andi t4, t3, 31
li t2, 1
sll t2, t2, t4
not t2, t2
sll t1, t1, 3
add t1, t1, t0
addi t1, t1, {tcb_sc_off} + {sc_word}
amoand.w.aq x0, t2, (t1)
addi t3, t3, 64 // indicate signal was targeted at thread
9: // process signal t3 holds signal number
// By now we have selected a signal, stored in eax (6-bit). We now need to choose whether or
// not to switch to the alternate signal stack. If SA_ONSTACK is clear for this signal, then
// skip the sigaltstack logic.
ld t4, ({tcb_sa_off} + {sa_off_pctl})(t0)
andi t1, t3, 63
slli t1, t1, 4 // * 16 == size_of RawAction
add t1, t1, t4
ld t1, {pctl_off_actions}(t1)
slli t1, t1, 63-58 // SA_ONSTACK in sign bit
bgez t1, 3f
// If current RSP is above altstack region, switch to altstack
ld t1, ({tcb_sa_off} + {sa_altstack_top})(t0)
bgtu sp, t1, 2f
ld t2, ({tcb_sa_off} + {sa_altstack_bottom})(t0)
bgtu sp, t3, 3f
2: mv sp, t1
3:
// form mcontext on stack
addi sp, sp, -33 * 8
fsd f0, (0 * 8)(sp)
fsd f1, (1 * 8)(sp)
fsd f2, (2 * 8)(sp)
fsd f3, (3 * 8)(sp)
fsd f4, (4 * 8)(sp)
fsd f5, (5 * 8)(sp)
fsd f6, (6 * 8)(sp)
fsd f7, (7 * 8)(sp)
fsd f8, (8 * 8)(sp)
fsd f9, (9 * 8)(sp)
fsd f10, (10 * 8)(sp)
fsd f11, (11 * 8)(sp)
fsd f12, (12 * 8)(sp)
fsd f13, (13 * 8)(sp)
fsd f14, (14 * 8)(sp)
fsd f15, (15 * 8)(sp)
fsd f16, (16 * 8)(sp)
fsd f17, (17 * 8)(sp)
fsd f18, (18 * 8)(sp)
fsd f19, (19 * 8)(sp)
fsd f20, (20 * 8)(sp)
fsd f21, (21 * 8)(sp)
fsd f22, (22 * 8)(sp)
fsd f23, (23 * 8)(sp)
fsd f24, (24 * 8)(sp)
fsd f25, (25 * 8)(sp)
fsd f26, (26 * 8)(sp)
fsd f27, (27 * 8)(sp)
fsd f28, (28 * 8)(sp)
fsd f29, (29 * 8)(sp)
fsd f30, (30 * 8)(sp)
fsd f31, (31 * 8)(sp)
csrr t1, fcsr
sw t1, (32 * 8)(sp)
addi sp, sp, -32 * 8
sd x1, 0(sp)
ld t1, ({tcb_sa_off} + {sa_tmp_sp})(t0)
sd t1, (1 * 8)(sp) // x2 is sp
sd x3, (2 * 8)(sp)
sd x4, (3 * 8)(sp)
ld t1, ({tcb_sc_off} + {sc_saved_t0})(t0)
sd t1, (4 * 8)(sp) // x5 is t0
ld t1, ({tcb_sa_off} + {sa_tmp_t1})(t0)
sd t1, (5 * 8)(sp) // x6 is t1
ld t1, ({tcb_sa_off} + {sa_tmp_t2})(t0)
sd t1, (6 * 8)(sp) // x7 is t2
sd x8, (7 * 8)(sp)
sd x9, (8 * 8)(sp)
sd x10, (9 * 8)(sp)
sd x11, (10 * 8)(sp)
sd x12, (11 * 8)(sp)
sd x13, (12 * 8)(sp)
sd x14, (13 * 8)(sp)
sd x15, (14 * 8)(sp)
sd x16, (15 * 8)(sp)
sd x17, (16 * 8)(sp)
sd x18, (17 * 8)(sp)
sd x19, (18 * 8)(sp)
sd x20, (19 * 8)(sp)
sd x21, (20 * 8)(sp)
sd x22, (21 * 8)(sp)
sd x23, (22 * 8)(sp)
sd x24, (23 * 8)(sp)
sd x25, (24 * 8)(sp)
sd x26, (25 * 8)(sp)
sd x27, (26 * 8)(sp)
ld t1, ({tcb_sa_off} + {sa_tmp_t3})(t0)
sd t1, (27 * 8)(sp) // t3 is x28
ld t1, ({tcb_sa_off} + {sa_tmp_t4})(t0)
sd t1, (28 * 8)(sp) // t4 is x29
sd x30, (29 * 8)(sp)
sd x31, (30 * 8)(sp)
ld t1, ({tcb_sc_off} + {sc_saved_ip})(t0)
sd t1, (31 * 8)(sp)
// form ucontext
addi sp, sp, -64
sw t3, 60(sp)
mv t0, sp
jal {inner}
addi sp, sp, 64
addi t0, sp, 32 * 8
fld f0, (0 * 8)(t0)
fld f1, (1 * 8)(t0)
fld f2, (2 * 8)(t0)
fld f3, (3 * 8)(t0)
fld f4, (4 * 8)(t0)
fld f5, (5 * 8)(t0)
fld f6, (6 * 8)(t0)
fld f7, (7 * 8)(t0)
fld f8, (8 * 8)(t0)
fld f9, (9 * 8)(t0)
fld f10, (10 * 8)(t0)
fld f11, (11 * 8)(t0)
fld f12, (12 * 8)(t0)
fld f13, (13 * 8)(t0)
fld f14, (14 * 8)(t0)
fld f15, (15 * 8)(t0)
fld f16, (16 * 8)(t0)
fld f17, (17 * 8)(t0)
fld f18, (18 * 8)(t0)
fld f19, (19 * 8)(t0)
fld f20, (20 * 8)(t0)
fld f21, (21 * 8)(t0)
fld f22, (22 * 8)(t0)
fld f23, (23 * 8)(t0)
fld f24, (24 * 8)(t0)
fld f25, (25 * 8)(t0)
fld f26, (26 * 8)(t0)
fld f27, (27 * 8)(t0)
fld f28, (28 * 8)(t0)
fld f29, (29 * 8)(t0)
fld f30, (30 * 8)(t0)
fld f31, (31 * 8)(t0)
lw t1, (32 * 8)(t0)
csrw fcsr, t1
ld x1, 0(sp)
// skip sp
// skip gp
ld x4, (3 * 8)(sp)
ld x5, (4 * 8)(sp)
ld x6, (5 * 8)(sp)
ld x7, (6 * 8)(sp)
ld x8, (7 * 8)(sp)
ld x9, (8 * 8)(sp)
ld x10, (9 * 8)(sp)
ld x11, (10 * 8)(sp)
ld x12, (11 * 8)(sp)
ld x13, (12 * 8)(sp)
ld x14, (13 * 8)(sp)
ld x15, (14 * 8)(sp)
ld x16, (15 * 8)(sp)
ld x17, (16 * 8)(sp)
ld x18, (17 * 8)(sp)
ld x19, (18 * 8)(sp)
ld x20, (19 * 8)(sp)
ld x21, (20 * 8)(sp)
ld x22, (21 * 8)(sp)
ld x23, (22 * 8)(sp)
ld x24, (23 * 8)(sp)
ld x25, (24 * 8)(sp)
ld x26, (25 * 8)(sp)
ld x27, (26 * 8)(sp)
ld x28, (27 * 8)(sp)
ld x29, (28 * 8)(sp)
ld x30, (29 * 8)(sp)
ld x31, (30 * 8)(sp)
ld gp, (31 * 8)(sp) // new IP; this clobbers register x3/gp which is ABI reserved
.global __relibc_internal_sigentry_crit_first
__relibc_internal_sigentry_crit_first:
ld sp, (1 * 8)(sp)
.global __relibc_internal_sigentry_crit_second
__relibc_internal_sigentry_crit_second:
jr gp
7:
// A spurious signal occurred. Signals are still disabled here, but will need to be re-enabled.
// restore stack
ld sp, ({tcb_sa_off} + {sa_tmp_sp})(t0)
// move saved IP away from control, allowing arch_pre to save us if interrupted.
ld t1, ({tcb_sc_off} + {sc_saved_ip})(t0)
sd t1, ({tcb_sa_off} + {sa_tmp_ip})(t0)
// restore regs
ld t2, ({tcb_sa_off} + {sa_tmp_t2})(t0)
ld t3, ({tcb_sa_off} + {sa_tmp_t3})(t0)
ld t4, ({tcb_sa_off} + {sa_tmp_t4})(t0)
// move saved t0 away from control as well
mv t1, t0
ld t0, ({tcb_sc_off} + {sc_saved_t0})(t0)
// Re-enable signals. This code can be interrupted after this signal, so we need to define
// 'crit_third'.
ld gp, ({tcb_sc_off} + {sc_control})(t1)
andi gp, gp, ~1
sd gp, ({tcb_sc_off} + {sc_control})(t1)
.globl __relibc_internal_sigentry_crit_third
__relibc_internal_sigentry_crit_third:
ld gp, ({tcb_sa_off} + {sa_tmp_ip})(t1)
.globl __relibc_internal_sigentry_crit_fourth
__relibc_internal_sigentry_crit_fourth:
ld t1, ({tcb_sa_off} + {sa_tmp_t1})(t1)
.globl __relibc_internal_sigentry_crit_fifth
__relibc_internal_sigentry_crit_fifth:
jr gp
"] <= [
tcb_sc_off = const (offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control)),
sc_word = const offset_of!(Sigcontrol, word),
sc_saved_t0 = const offset_of!(Sigcontrol, saved_archdep_reg),
sc_saved_ip = const offset_of!(Sigcontrol, saved_ip),
sc_sender_infos = const offset_of!(Sigcontrol, sender_infos),
sc_control = const offset_of!(Sigcontrol, control_flags),
tcb_sa_off = const (offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, arch)),
sa_off_pctl = const offset_of!(SigArea, pctl),
sa_tmp_sp = const offset_of!(SigArea, tmp_sp),
sa_tmp_t1 = const offset_of!(SigArea, tmp_t1),
sa_tmp_t2 = const offset_of!(SigArea, tmp_t2),
sa_tmp_t3 = const offset_of!(SigArea, tmp_t3),
sa_tmp_t4 = const offset_of!(SigArea, tmp_t4),
sa_tmp_a0 = const offset_of!(SigArea, tmp_a0),
sa_tmp_a1 = const offset_of!(SigArea, tmp_a1),
sa_tmp_a2 = const offset_of!(SigArea, tmp_a2),
sa_tmp_a7 = const offset_of!(SigArea, tmp_a7),
sa_tmp_ip = const offset_of!(SigArea, tmp_ip),
sa_tmp_id_inf = const offset_of!(SigArea, tmp_id_inf),
sa_tmp_rt_inf = const offset_of!(SigArea, tmp_rt_inf),
sa_altstack_top = const offset_of!(SigArea, altstack_top),
sa_altstack_bottom = const offset_of!(SigArea, altstack_bottom),
pctl_off_actions = const offset_of!(SigProcControl, actions),
inner = sym inner_c,
pctl_off_pending = const offset_of!(SigProcControl, pending),
pctl_off_sender_infos = const offset_of!(SigProcControl, sender_infos),
SYS_SIGDEQUEUE = const syscall::SYS_SIGDEQUEUE,
]);
asmfunction!(__relibc_internal_rlct_clone_ret: ["
ld t0, 0(sp)
ld a0, 8(sp)
ld a1, 16(sp)
ld a2, 24(sp)
ld a3, 32(sp)
ld a4, 40(sp)
ld a5, 48(sp)
addi sp, sp, 56
jalr t0
ret
"] <= []);
pub fn current_sp() -> usize {
let sp: usize;
unsafe {
core::arch::asm!(
"mv {}, sp",
out(reg) sp,
options(nomem));
}
sp
}
pub unsafe fn manually_enter_trampoline() {
let ctl = &Tcb::current().unwrap().os_specific.control;
ctl.control_flags.store(
ctl.control_flags.load(Ordering::Relaxed) | syscall::flag::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
ctl.saved_archdep_reg.set(0);
let ip_location = &ctl.saved_ip as *const _ as usize;
core::arch::asm!("
jal 2f
j 3f
2:
sd ra, 0(t0)
la t0, __relibc_internal_sigentry
jalr x0, t0
3:
", inout("t0") ip_location => _, out("ra") _);
}
extern "C" {
fn __relibc_internal_sigentry_crit_first();
fn __relibc_internal_sigentry_crit_second();
fn __relibc_internal_sigentry_crit_third();
fn __relibc_internal_sigentry_crit_fourth();
fn __relibc_internal_sigentry_crit_fifth();
}
pub unsafe fn arch_pre(stack: &mut SigStack, area: &mut SigArea) -> PosixStackt {
// It is impossible to update SP and PC atomically. Instead, we abuse the fact that
// signals are disabled in the prologue of the signal trampoline, which allows us to emulate
// atomicity inside the critical section, consisting of one instruction at 'crit_first', and
// one at 'crit_second', see asm.
if stack.regs.pc == __relibc_internal_sigentry_crit_first as u64 {
// Reexecute 'ld sp, (1 * 8)(sp)'
let stack_ptr = stack.regs.int_regs[1] as *const u64; // x2
stack.regs.int_regs[1] = stack_ptr.add(1).read();
// and 'jr gp' steps.
stack.regs.pc = stack.regs.int_regs[2];
} else if stack.regs.pc == __relibc_internal_sigentry_crit_second as u64
|| stack.regs.pc == __relibc_internal_sigentry_crit_fifth as u64
{
// just reexecute the jump
stack.regs.pc = stack.regs.int_regs[2];
} else if stack.regs.pc == __relibc_internal_sigentry_crit_third as u64 {
// ld gp, ({tcb_sa_off} + {sa_tmp_ip})(t1)
stack.regs.int_regs[2] = area.tmp_ip;
// ld t1, ({tcb_sa_off} + {sa_tmp_t1})(t1)
stack.regs.int_regs[5] = area.tmp_t1;
// j gp
stack.regs.pc = stack.regs.int_regs[2];
} else if stack.regs.pc == __relibc_internal_sigentry_crit_fourth as u64 {
// ld t1, ({tcb_sa_off} + {sa_tmp_t1})(t1)
stack.regs.int_regs[5] = area.tmp_t1;
// jr gp
stack.regs.pc = stack.regs.int_regs[2];
}
get_sigaltstack(area, stack.regs.int_regs[1] as usize).into()
}
use core::{
mem::offset_of,
ptr::NonNull,
sync::atomic::{AtomicU8, Ordering},
};
use syscall::{
data::{SigProcControl, Sigcontrol},
error::*,
RtSigInfo,
};
use crate::{
proc::{fork_inner, FdGuard},
signal::{
get_sigaltstack, inner_c, PosixStackt, RtSigarea, SigStack, Sigaltstack,
PROC_CONTROL_STRUCT,
},
RtTcb, Tcb,
};
// Setup a stack starting from the very end of the address space, and then growing downwards.
pub(crate) const STACK_TOP: usize = 1 << 47;
pub(crate) const STACK_SIZE: usize = 1024 * 1024;
#[derive(Debug, Default)]
#[repr(C)]
pub struct SigArea {
pub tmp_rip: usize,
pub tmp_rsp: usize,
pub tmp_rax: usize,
pub tmp_rdx: usize,
pub tmp_rdi: usize,
pub tmp_rsi: usize,
pub tmp_rt_inf: RtSigInfo,
pub tmp_id_inf: u64,
pub altstack_top: usize,
pub altstack_bottom: usize,
pub disable_signals_depth: u64,
pub last_sig_was_restart: bool,
pub last_sigstack: Option<NonNull<SigStack>>,
}
#[repr(C, align(16))]
#[derive(Debug, Default)]
pub struct ArchIntRegs {
pub ymm_upper: [u128; 16],
pub fxsave: [u128; 29],
pub r15: usize, // fxsave "available" +0
pub r14: usize, // available +8
pub r13: usize, // available +16
pub r12: usize, // available +24
pub rbp: usize, // available +32
pub rbx: usize, // available +40
pub r11: usize, // outside fxsave, and so on
pub r10: usize,
pub r9: usize,
pub r8: usize,
pub rax: usize,
pub rcx: usize,
pub rdx: usize,
pub rsi: usize,
pub rdi: usize,
pub rflags: usize,
pub rip: usize,
pub rsp: usize,
}
/// Deactive TLS, used before exec() on Redox to not trick target executable into thinking TLS
/// is already initialized as if it was a thread.
pub unsafe fn deactivate_tcb(open_via_dup: usize) -> Result<()> {
let mut env = syscall::EnvRegisters::default();
let file = FdGuard::new(syscall::dup(open_via_dup, b"regs/env")?);
env.fsbase = 0;
env.gsbase = 0;
let _ = syscall::write(*file, &mut env)?;
Ok(())
}
pub fn copy_env_regs(cur_pid_fd: usize, new_pid_fd: usize) -> Result<()> {
// Copy environment registers.
{
let cur_env_regs_fd = FdGuard::new(syscall::dup(cur_pid_fd, b"regs/env")?);
let new_env_regs_fd = FdGuard::new(syscall::dup(new_pid_fd, b"regs/env")?);
let mut env_regs = syscall::EnvRegisters::default();
let _ = syscall::read(*cur_env_regs_fd, &mut env_regs)?;
let _ = syscall::write(*new_env_regs_fd, &env_regs)?;
}
Ok(())
}
unsafe extern "sysv64" fn fork_impl(initial_rsp: *mut usize) -> usize {
Error::mux(fork_inner(initial_rsp))
}
unsafe extern "sysv64" fn child_hook(cur_filetable_fd: usize, new_pid_fd: usize) {
let _ = syscall::close(cur_filetable_fd);
crate::child_hook_common(FdGuard::new(new_pid_fd));
}
asmfunction!(__relibc_internal_fork_wrapper -> usize: ["
push rbp
mov rbp, rsp
push rbx
push rbp
push r12
push r13
push r14
push r15
sub rsp, 32
stmxcsr [rsp+16]
fnstcw [rsp+24]
mov rdi, rsp
call {fork_impl}
add rsp, 80
pop rbp
ret
"] <= [fork_impl = sym fork_impl]);
asmfunction!(__relibc_internal_fork_ret: ["
mov rdi, [rsp]
mov rsi, [rsp + 8]
call {child_hook}
ldmxcsr [rsp + 16]
fldcw [rsp + 24]
xor rax, rax
add rsp, 32
pop r15
pop r14
pop r13
pop r12
pop rbp
pop rbx
pop rbp
ret
"] <= [child_hook = sym child_hook]);
asmfunction!(__relibc_internal_rlct_clone_ret: ["
# Load registers
pop rax
pop rdi
pop rsi
pop rdx
pop rcx
pop r8
pop r9
mov DWORD PTR [rsp - 8], 0x00001F80
ldmxcsr [rsp - 8]
mov WORD PTR [rsp - 8], 0x037F
fldcw [rsp - 8]
# Call entry point
call rax
ret
"] <= []);
asmfunction!(__relibc_internal_sigentry: ["
// Save some registers
mov fs:[{tcb_sa_off} + {sa_tmp_rsp}], rsp
mov fs:[{tcb_sa_off} + {sa_tmp_rax}], rax
mov fs:[{tcb_sa_off} + {sa_tmp_rdx}], rdx
mov fs:[{tcb_sa_off} + {sa_tmp_rdi}], rdi
mov fs:[{tcb_sa_off} + {sa_tmp_rsi}], rsi
// First, select signal, always pick first available bit
1:
// Read standard signal word - first targeting this thread
mov rax, fs:[{tcb_sc_off} + {sc_word}]
mov rdx, rax
shr rdx, 32
and eax, edx
bsf eax, eax
jnz 2f
// If no unblocked thread signal was found, check for process.
// This is competitive; we need to atomically check if *we* cleared the process-wide pending
// bit, otherwise restart.
mov eax, [rip + {pctl} + {pctl_off_pending}]
and eax, edx
bsf eax, eax
jz 8f
lea rdi, [rip + {pctl} + {pctl_off_sender_infos}]
mov rdi, [rdi + rax * 8]
lock btr [rip + {pctl} + {pctl_off_pending}], eax
mov fs:[{tcb_sa_off} + {sa_tmp_id_inf}], rdi
jc 9f
8:
// Read second signal word - both process and thread simultaneously.
// This must be done since POSIX requires low realtime signals to be picked first.
mov edx, fs:[{tcb_sc_off} + {sc_word} + 8]
mov eax, [rip + {pctl} + {pctl_off_pending} + 4]
or eax, edx
and eax, fs:[{tcb_sc_off} + {sc_word} + 12]
bsf eax, eax
jz 7f
bt edx, eax // check if signal was sent to thread specifically
jc 2f // if so, continue as usual
// otherwise, try (competitively) dequeueing realtime signal
mov esi, eax
mov eax, {SYS_SIGDEQUEUE}
mov rdi, fs:[0]
add rdi, {tcb_sa_off} + {sa_tmp_rt_inf} // out pointer of dequeued realtime sig
syscall
test eax, eax
jnz 1b // assumes error can only be EAGAIN
lea eax, [esi + 32]
jmp 9f
2:
mov edx, eax
shr edx, 5
mov rdi, fs:[{tcb_sc_off} + {sc_sender_infos} + eax * 8]
lock btr fs:[{tcb_sc_off} + {sc_word} + edx * 4], eax
mov fs:[{tcb_sa_off} + {sa_tmp_id_inf}], rdi
add eax, 64 // indicate signal was targeted at thread
9:
sub rsp, {REDZONE_SIZE}
and rsp, -{STACK_ALIGN}
// By now we have selected a signal, stored in eax (6-bit). We now need to choose whether or
// not to switch to the alternate signal stack. If SA_ONSTACK is clear for this signal, then
// skip the sigaltstack logic.
lea rdx, [rip + {pctl} + {pctl_off_actions}]
mov ecx, eax
and ecx, 63
// LEA doesn't support 16x, so just do two x8s.
lea rdx, [rdx + 8 * rcx]
lea rdx, [rdx + 8 * rcx]
bt qword ptr [rdx], {SA_ONSTACK_BIT}
jnc 4f
// Otherwise, the altstack is already active. The sigaltstack being disabled, is equivalent
// to setting 'top' to usize::MAX and 'bottom' to 0.
// If current RSP is above altstack region, switch to altstack
mov rdx, fs:[{tcb_sa_off} + {sa_altstack_top}]
cmp rsp, rdx
cmova rsp, rdx
// If current RSP is below altstack region, also switch to altstack
cmp rsp, fs:[{tcb_sa_off} + {sa_altstack_bottom}]
cmovbe rsp, rdx
.p2align 4
4:
// Now that we have a stack, we can finally start initializing the signal stack!
push fs:[{tcb_sa_off} + {sa_tmp_rsp}]
push fs:[{tcb_sc_off} + {sc_saved_rip}]
push fs:[{tcb_sc_off} + {sc_saved_rflags}]
push fs:[{tcb_sa_off} + {sa_tmp_rdi}]
push fs:[{tcb_sa_off} + {sa_tmp_rsi}]
push fs:[{tcb_sa_off} + {sa_tmp_rdx}]
push rcx
push fs:[{tcb_sa_off} + {sa_tmp_rax}]
push r8
push r9
push r10
push r11
push rbx
push rbp
push r12
push r13
push r14
push r15
sub rsp, (29 + 16) * 16 // fxsave region minus available bytes
fxsave64 [rsp + 16 * 16]
// TODO: self-modifying?
cmp byte ptr [rip + {supports_avx}], 0
je 5f
// Prefer vextractf128 over vextracti128 since the former only requires AVX version 1.
vextractf128 [rsp + 15 * 16], ymm0, 1
vextractf128 [rsp + 14 * 16], ymm1, 1
vextractf128 [rsp + 13 * 16], ymm2, 1
vextractf128 [rsp + 12 * 16], ymm3, 1
vextractf128 [rsp + 11 * 16], ymm4, 1
vextractf128 [rsp + 10 * 16], ymm5, 1
vextractf128 [rsp + 9 * 16], ymm6, 1
vextractf128 [rsp + 8 * 16], ymm7, 1
vextractf128 [rsp + 7 * 16], ymm8, 1
vextractf128 [rsp + 6 * 16], ymm9, 1
vextractf128 [rsp + 5 * 16], ymm10, 1
vextractf128 [rsp + 4 * 16], ymm11, 1
vextractf128 [rsp + 3 * 16], ymm12, 1
vextractf128 [rsp + 2 * 16], ymm13, 1
vextractf128 [rsp + 16], ymm14, 1
vextractf128 [rsp], ymm15, 1
5:
mov [rsp - 4], eax
sub rsp, 64 // alloc space for ucontext fields
mov rdi, rsp
call {inner}
add rsp, 64
fxrstor64 [rsp + 16 * 16]
cmp byte ptr [rip + {supports_avx}], 0
je 6f
vinsertf128 ymm0, ymm0, [rsp + 15 * 16], 1
vinsertf128 ymm1, ymm1, [rsp + 14 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 13 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 12 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 11 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 10 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 9 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 8 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 7 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 6 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 5 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 4 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 3 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 2 * 16], 1
vinsertf128 ymm2, ymm2, [rsp + 16], 1
vinsertf128 ymm2, ymm2, [rsp], 1
6:
add rsp, (29 + 16) * 16
pop r15
pop r14
pop r13
pop r12
pop rbp
pop rbx
pop r11
pop r10
pop r9
pop r8
pop rax
pop rcx
pop rdx
pop rsi
pop rdi
popfq
pop qword ptr fs:[{tcb_sa_off} + {sa_tmp_rip}]
// x86 lacks atomic instructions for setting both the stack and instruction pointer
// simultaneously, except the slow microcoded IRETQ instruction. Thus, we let the arch_pre
// function emulate atomicity between the pop rsp and indirect jump.
.globl __relibc_internal_sigentry_crit_first
__relibc_internal_sigentry_crit_first:
pop rsp
.globl __relibc_internal_sigentry_crit_second
__relibc_internal_sigentry_crit_second:
jmp qword ptr fs:[{tcb_sa_off} + {sa_tmp_rip}]
7:
// A spurious signal occurred. Signals are still disabled here, but will need to be re-enabled.
// restore flags
mov rax, fs:[0] // load FS base
// TODO: Use lahf/sahf rather than pushfq/popfq?
lea rsp, [rax + {tcb_sc_off} + {sc_saved_rflags}]
popfq
// restore stack
mov rsp, fs:[{tcb_sa_off} + {sa_tmp_rsp}]
// move saved RIP away from control, allowing arch_pre to save us if interrupted.
mov rax, fs:[{tcb_sc_off} + {sc_saved_rip}]
mov fs:[{tcb_sa_off} + {sa_tmp_rip}], rax
// restore regs
mov rax, fs:[{tcb_sa_off} + {sa_tmp_rax}]
mov rdx, fs:[{tcb_sa_off} + {sa_tmp_rdx}]
// Re-enable signals. This code can be interrupted after this signal, so we need to define
// 'crit_third'.
and qword ptr fs:[{tcb_sc_off} + {sc_control}], ~1
.globl __relibc_internal_sigentry_crit_third
__relibc_internal_sigentry_crit_third:
jmp qword ptr fs:[{tcb_sa_off} + {sa_tmp_rip}]
"] <= [
inner = sym inner_c,
sa_tmp_rip = const offset_of!(SigArea, tmp_rip),
sa_tmp_rsp = const offset_of!(SigArea, tmp_rsp),
sa_tmp_rax = const offset_of!(SigArea, tmp_rax),
sa_tmp_rdx = const offset_of!(SigArea, tmp_rdx),
sa_tmp_rdi = const offset_of!(SigArea, tmp_rdi),
sa_tmp_rsi = const offset_of!(SigArea, tmp_rsi),
sa_tmp_rt_inf = const offset_of!(SigArea, tmp_rt_inf),
sa_tmp_id_inf = const offset_of!(SigArea, tmp_id_inf),
sa_altstack_top = const offset_of!(SigArea, altstack_top),
sa_altstack_bottom = const offset_of!(SigArea, altstack_bottom),
sc_saved_rflags = const offset_of!(Sigcontrol, saved_archdep_reg),
sc_saved_rip = const offset_of!(Sigcontrol, saved_ip),
sc_word = const offset_of!(Sigcontrol, word),
sc_sender_infos = const offset_of!(Sigcontrol, sender_infos),
sc_control = const offset_of!(Sigcontrol, control_flags),
tcb_sa_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, arch),
tcb_sc_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control),
pctl_off_actions = const offset_of!(SigProcControl, actions),
pctl_off_pending = const offset_of!(SigProcControl, pending),
pctl_off_sender_infos = const offset_of!(SigProcControl, sender_infos),
pctl = sym PROC_CONTROL_STRUCT,
supports_avx = sym SUPPORTS_AVX,
REDZONE_SIZE = const 128,
STACK_ALIGN = const 16,
SA_ONSTACK_BIT = const 58, // (1 << 58) >> 32 = 0x0400_0000
SYS_SIGDEQUEUE = const syscall::SYS_SIGDEQUEUE,
]);
extern "C" {
fn __relibc_internal_sigentry_crit_first();
fn __relibc_internal_sigentry_crit_second();
fn __relibc_internal_sigentry_crit_third();
}
/// Fixes some edge cases, and calculates the value for uc_stack.
pub unsafe fn arch_pre(stack: &mut SigStack, area: &mut SigArea) -> PosixStackt {
// It is impossible to update RSP and RIP atomically on x86_64, without using IRETQ, which is
// almost as slow as calling a SIGRETURN syscall would be. Instead, we abuse the fact that
// signals are disabled in the prologue of the signal trampoline, which allows us to emulate
// atomicity inside the critical section, consisting of one instruction at 'crit_first', one at
// 'crit_second', and one at 'crit_third', see asm.
if stack.regs.rip == __relibc_internal_sigentry_crit_first as usize {
// Reexecute pop rsp and jump steps. This case needs to be different from the one below,
// since rsp has not been overwritten with the previous context's stack, just yet. At this
// point, we know [rsp+0] contains the saved RSP, and [rsp-8] contains the saved RIP.
let stack_ptr = stack.regs.rsp as *const usize;
stack.regs.rsp = stack_ptr.read();
stack.regs.rip = stack_ptr.sub(1).read();
} else if stack.regs.rip == __relibc_internal_sigentry_crit_second as usize
|| stack.regs.rip == __relibc_internal_sigentry_crit_third as usize
{
// Almost finished, just reexecute the jump before tmp_rip is overwritten by this
// deeper-level signal.
stack.regs.rip = area.tmp_rip;
}
get_sigaltstack(area, stack.regs.rsp).into()
}
pub(crate) static SUPPORTS_AVX: AtomicU8 = AtomicU8::new(0);
// __relibc will be prepended to the name, so no_mangle is fine
#[no_mangle]
pub unsafe fn manually_enter_trampoline() {
let c = &Tcb::current().unwrap().os_specific.control;
c.control_flags.store(
c.control_flags.load(Ordering::Relaxed) | syscall::flag::INHIBIT_DELIVERY.bits(),
Ordering::Release,
);
c.saved_archdep_reg.set(0); // TODO: Just reset DF on x86?
core::arch::asm!("
lea rax, [rip + 2f]
mov fs:[{tcb_sc_off} + {sc_saved_rip}], rax
jmp __relibc_internal_sigentry
2:
",
out("rax") _,
tcb_sc_off = const offset_of!(crate::Tcb, os_specific) + offset_of!(RtSigarea, control),
sc_saved_rip = const offset_of!(Sigcontrol, saved_ip),
);
}
/// Get current stack pointer, weak granularity guarantees.
pub fn current_sp() -> usize {
let sp: usize;
unsafe {
core::arch::asm!("mov {}, rsp", out(reg) sp);
}
sp
}