From 434e799f2a884824f0b8b1c942e5de16d0f7929f Mon Sep 17 00:00:00 2001 From: Jeremy Soller <jeremy@system76.com> Date: Sun, 14 Apr 2019 19:05:25 -0600 Subject: [PATCH] Place TCB at thread-specific location to avoid overlapping TCBs --- src/arch/x86_64/gdt.rs | 8 +++++- src/consts.rs | 1 + src/context/switch.rs | 1 + src/syscall/process.rs | 62 ++++++++++++++++++++++++++++++------------ 4 files changed, 53 insertions(+), 19 deletions(-) diff --git a/src/arch/x86_64/gdt.rs b/src/arch/x86_64/gdt.rs index 9228093c..1e56c4e0 100644 --- a/src/arch/x86_64/gdt.rs +++ b/src/arch/x86_64/gdt.rs @@ -8,6 +8,8 @@ use x86::shared::dtables::{self, DescriptorTablePointer}; use x86::shared::segmentation::{self, SegmentDescriptor, SegmentSelector}; use x86::shared::task; +use paging::PAGE_SIZE; + pub const GDT_NULL: usize = 0; pub const GDT_KERNEL_CODE: usize = 1; pub const GDT_KERNEL_DATA: usize = 2; @@ -91,6 +93,10 @@ pub static mut TSS: TaskStateSegment = TaskStateSegment { iomap_base: 0xFFFF }; +pub unsafe fn set_tcb(pid: usize) { + GDT[GDT_USER_TLS].set_offset((::USER_TCB_OFFSET + pid * PAGE_SIZE) as u32); +} + #[cfg(feature = "pti")] pub unsafe fn set_tss_stack(stack: usize) { use arch::x86_64::pti::{PTI_CPU_STACK, PTI_CONTEXT_STACK}; @@ -141,7 +147,7 @@ pub unsafe fn init_paging(tcb_offset: usize, stack_offset: usize) { GDT[GDT_KERNEL_TLS].set_offset(tcb_offset as u32); // Set the User TLS segment to the offset of the user TCB - GDT[GDT_USER_TLS].set_offset(::USER_TCB_OFFSET as u32); + set_tcb(0); // We can now access our TSS, which is a thread local GDT[GDT_TSS].set_offset(&TSS as *const _ as u32); diff --git a/src/consts.rs b/src/consts.rs index 3bf54f6f..d927ba52 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -32,6 +32,7 @@ pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK)/PML4_SIZE; /// Offset to user TCB + /// Each process has 4096 bytes, at an offset of 4096 * PID pub const USER_TCB_OFFSET: usize = 0xB000_0000; /// Offset to user arguments diff --git a/src/context/switch.rs b/src/context/switch.rs index 7aa83c30..07df3d2a 100644 --- a/src/context/switch.rs +++ b/src/context/switch.rs @@ -130,6 +130,7 @@ pub unsafe fn switch() -> bool { if let Some(ref stack) = (*to_ptr).kstack { gdt::set_tss_stack(stack.as_ptr() as usize + stack.len()); } + gdt::set_tcb((&mut *to_ptr).id.into()); CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst); } diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 1739dbc3..0890cf8c 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -7,7 +7,7 @@ use core::ops::DerefMut; use spin::Mutex; use memory::allocate_frames; -use paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress}; +use paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, PAGE_SIZE}; use paging::entry::EntryFlags; use paging::mapper::MapperFlushAll; use paging::temporary_page::TemporaryPage; @@ -386,7 +386,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> { // TODO: Clone ksig? - // Setup heap + // Setup image, heap, and grants if flags & CLONE_VM == CLONE_VM { // Copy user image mapping, if found if ! image.is_empty() { @@ -484,12 +484,30 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> { context.sigstack = Some(sigstack); } + // Set up TCB + let tcb_addr = ::USER_TCB_OFFSET + context.id.into() * PAGE_SIZE; + println!("clone: Map TCB {:#x}", tcb_addr); + let mut tcb_mem = context::memory::Memory::new( + VirtualAddress::new(tcb_addr), + PAGE_SIZE, + EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE, + true + ); + // Setup user TLS if let Some(mut tls) = tls_option { + unsafe { + *(tcb_addr as *mut usize) = ::USER_TLS_OFFSET + tls.mem.size(); + } + tls.mem.move_to(VirtualAddress::new(::USER_TLS_OFFSET), &mut new_table, &mut temporary_page); context.tls = Some(tls); } + + tcb_mem.move_to(VirtualAddress::new(tcb_addr), &mut new_table, &mut temporary_page); + context.image.push(tcb_mem.to_shared()); + context.name = name; context.cwd = cwd; @@ -586,22 +604,24 @@ fn fexec_noreturn( entry = elf.entry(); // Always map TCB - context.image.push(context::memory::Memory::new( - VirtualAddress::new(::USER_TCB_OFFSET), - 4096, + let tcb_addr = ::USER_TCB_OFFSET + context.id.into() * PAGE_SIZE; + println!("exec: Map TCB {:#x}", tcb_addr); + let tcb_mem = context::memory::Memory::new( + VirtualAddress::new(tcb_addr), + PAGE_SIZE, EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE, true - ).to_shared()); + ); for segment in elf.segments() { match segment.p_type { program_header::PT_LOAD => { - let voff = segment.p_vaddr % 4096; - let vaddr = segment.p_vaddr - voff; + let voff = segment.p_vaddr as usize % PAGE_SIZE; + let vaddr = segment.p_vaddr as usize - voff; let mut memory = context::memory::Memory::new( - VirtualAddress::new(vaddr as usize), - segment.p_memsz as usize + voff as usize, + VirtualAddress::new(vaddr), + segment.p_memsz as usize + voff, EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE, true ); @@ -635,13 +655,11 @@ fn fexec_noreturn( ((segment.p_memsz + (segment.p_align - 1))/segment.p_align) * segment.p_align } else { segment.p_memsz - }; - let rounded_size = ((aligned_size + 4095)/4096) * 4096; + } as usize; + let rounded_size = ((aligned_size + PAGE_SIZE - 1)/PAGE_SIZE) * PAGE_SIZE; let rounded_offset = rounded_size - aligned_size; - let tcb_offset = ::USER_TLS_OFFSET + rounded_size as usize; - unsafe { *(::USER_TCB_OFFSET as *mut usize) = tcb_offset; } - tls_option = Some(context::memory::Tls { + let tls = context::memory::Tls { master: VirtualAddress::new(segment.p_vaddr as usize), file_size: segment.p_filesz as usize, mem: context::memory::Memory::new( @@ -651,11 +669,19 @@ fn fexec_noreturn( true ), offset: rounded_offset as usize, - }); + }; + + unsafe { + *(tcb_addr as *mut usize) = ::USER_TLS_OFFSET + tls.mem.size(); + } + + tls_option = Some(tls); }, _ => (), } } + + context.image.push(tcb_mem.to_shared()); } // Data no longer required, can deallocate @@ -896,8 +922,8 @@ pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]> return fexec_kernel(interp_fd, args_vec.into_boxed_slice(), vars); }, program_header::PT_LOAD => { - let voff = segment.p_vaddr % 4096; - let vaddr = segment.p_vaddr - voff; + let voff = segment.p_vaddr as usize % PAGE_SIZE; + let vaddr = segment.p_vaddr as usize - voff; // Due to the Userspace and kernel TLS bases being located right above 2GB, // limit any loadable sections to lower than that. Eventually we will need -- GitLab