From dd0616cc8f682a35bd6123b2d1228314607dc3d4 Mon Sep 17 00:00:00 2001 From: Jeremy Soller <jeremy@system76.com> Date: Mon, 3 May 2021 12:42:16 -0600 Subject: [PATCH] Use RMM for TLB flushing --- src/acpi/dmar/mod.rs | 2 +- src/acpi/hpet.rs | 2 +- src/acpi/madt.rs | 4 +- src/acpi/mod.rs | 4 +- src/acpi/rsdp.rs | 2 +- src/allocator/mod.rs | 6 +- src/arch/x86_64/device/ioapic.rs | 2 +- src/arch/x86_64/device/local_apic.rs | 2 +- src/arch/x86_64/graphical_debug/mod.rs | 6 +- src/arch/x86_64/idt.rs | 2 +- src/arch/x86_64/paging/mapper.rs | 93 ++++-------------------- src/arch/x86_64/paging/mod.rs | 8 +- src/arch/x86_64/paging/temporary_page.rs | 4 +- src/context/memory.rs | 50 ++++++------- src/ptrace.rs | 14 ++-- src/syscall/process.rs | 10 +-- 16 files changed, 72 insertions(+), 139 deletions(-) diff --git a/src/acpi/dmar/mod.rs b/src/acpi/dmar/mod.rs index 4e5c3636..710357d5 100644 --- a/src/acpi/dmar/mod.rs +++ b/src/acpi/dmar/mod.rs @@ -93,7 +93,7 @@ pub struct DmarDrhd { impl DmarDrhd { pub fn get(&self, active_table: &mut ActivePageTable) -> &'static mut Drhd { let result = active_table.identity_map(Frame::containing_address(PhysicalAddress::new(self.base as usize)), EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE); - result.flush(active_table); + result.flush(); unsafe { &mut *(self.base as *mut Drhd) } } } diff --git a/src/acpi/hpet.rs b/src/acpi/hpet.rs index 405afa5f..56272f5c 100644 --- a/src/acpi/hpet.rs +++ b/src/acpi/hpet.rs @@ -70,7 +70,7 @@ impl GenericAddressStructure { let page = Page::containing_address(VirtualAddress::new(self.address as usize)); let frame = Frame::containing_address(PhysicalAddress::new(self.address as usize)); let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE); - result.flush(active_table); + result.flush(); } pub unsafe fn read_u64(&self, offset: usize) -> u64{ diff --git a/src/acpi/madt.rs b/src/acpi/madt.rs index 7a32a073..2d954107 100644 --- a/src/acpi/madt.rs +++ b/src/acpi/madt.rs @@ -59,7 +59,7 @@ impl Madt { let trampoline_frame = Frame::containing_address(PhysicalAddress::new(TRAMPOLINE)); let trampoline_page = Page::containing_address(VirtualAddress::new(TRAMPOLINE)); let result = active_table.map_to(trampoline_page, trampoline_frame, EntryFlags::PRESENT | EntryFlags::WRITABLE); - result.flush(active_table); + result.flush(); // Write trampoline, make sure TRAMPOLINE page is free for use for i in 0..TRAMPOLINE_DATA.len() { @@ -150,7 +150,7 @@ impl Madt { // Unmap trampoline let (result, _frame) = active_table.unmap_return(trampoline_page, false); - result.flush(active_table); + result.flush(); } } } diff --git a/src/acpi/mod.rs b/src/acpi/mod.rs index 32bc56ec..2212050d 100644 --- a/src/acpi/mod.rs +++ b/src/acpi/mod.rs @@ -46,7 +46,7 @@ pub fn get_sdt(sdt_address: usize, active_table: &mut ActivePageTable) -> &'stat if active_table.translate_page(page).is_none() { let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().data())); let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE); - result.flush(active_table); + result.flush(); } } @@ -60,7 +60,7 @@ pub fn get_sdt(sdt_address: usize, active_table: &mut ActivePageTable) -> &'stat if active_table.translate_page(page).is_none() { let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().data())); let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE); - result.flush(active_table); + result.flush(); } } } diff --git a/src/acpi/rsdp.rs b/src/acpi/rsdp.rs index c66fd2a5..e64b3882 100644 --- a/src/acpi/rsdp.rs +++ b/src/acpi/rsdp.rs @@ -92,7 +92,7 @@ impl RSDP { for frame in Frame::range_inclusive(start_frame, end_frame) { let page = Page::containing_address(VirtualAddress::new(frame.start_address().data())); let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE); - result.flush(active_table); + result.flush(); } } diff --git a/src/allocator/mod.rs b/src/allocator/mod.rs index bed1b75d..be3ffff3 100644 --- a/src/allocator/mod.rs +++ b/src/allocator/mod.rs @@ -1,6 +1,6 @@ use crate::paging::{ActivePageTable, Page, VirtualAddress}; use crate::paging::entry::EntryFlags; -use crate::paging::mapper::MapperFlushAll; +use crate::paging::mapper::PageFlushAll; #[cfg(not(feature="slab"))] pub use self::linked_list::Allocator; @@ -15,7 +15,7 @@ mod linked_list; mod slab; unsafe fn map_heap(active_table: &mut ActivePageTable, offset: usize, size: usize) { - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let heap_start_page = Page::containing_address(VirtualAddress::new(offset)); let heap_end_page = Page::containing_address(VirtualAddress::new(offset + size-1)); @@ -24,7 +24,7 @@ unsafe fn map_heap(active_table: &mut ActivePageTable, offset: usize, size: usiz flush_all.consume(result); } - flush_all.flush(active_table); + flush_all.flush(); } pub unsafe fn init(active_table: &mut ActivePageTable) { diff --git a/src/arch/x86_64/device/ioapic.rs b/src/arch/x86_64/device/ioapic.rs index c9b3d687..d980e359 100644 --- a/src/arch/x86_64/device/ioapic.rs +++ b/src/arch/x86_64/device/ioapic.rs @@ -240,7 +240,7 @@ pub unsafe fn handle_ioapic(active_table: &mut ActivePageTable, madt_ioapic: &'s assert_eq!(active_table.translate_page(page), None); let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::GLOBAL | EntryFlags::WRITABLE | EntryFlags::NO_CACHE); - result.flush(active_table); + result.flush(); let ioapic_registers = page.start_address().data() as *const u32; let ioapic = IoApic::new(ioapic_registers, madt_ioapic.gsi_base); diff --git a/src/arch/x86_64/device/local_apic.rs b/src/arch/x86_64/device/local_apic.rs index d416944a..3c483498 100644 --- a/src/arch/x86_64/device/local_apic.rs +++ b/src/arch/x86_64/device/local_apic.rs @@ -50,7 +50,7 @@ impl LocalApic { let page = Page::containing_address(VirtualAddress::new(self.address)); let frame = Frame::containing_address(PhysicalAddress::new(self.address - crate::PHYS_OFFSET)); let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE); - result.flush(active_table); + result.flush(); } self.init_ap(); diff --git a/src/arch/x86_64/graphical_debug/mod.rs b/src/arch/x86_64/graphical_debug/mod.rs index ea2fe047..d3471158 100644 --- a/src/arch/x86_64/graphical_debug/mod.rs +++ b/src/arch/x86_64/graphical_debug/mod.rs @@ -3,7 +3,7 @@ use spin::Mutex; use crate::memory::Frame; use crate::paging::{ActivePageTable, Page, PhysicalAddress, VirtualAddress}; use crate::paging::entry::EntryFlags; -use crate::paging::mapper::MapperFlushAll; +use crate::paging::mapper::PageFlushAll; pub use self::debug::DebugDisplay; use self::display::Display; @@ -41,7 +41,7 @@ pub fn init(active_table: &mut ActivePageTable) { let onscreen = physbaseptr + crate::PHYS_OFFSET; { - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let start_page = Page::containing_address(VirtualAddress::new(onscreen)); let end_page = Page::containing_address(VirtualAddress::new(onscreen + size * 4)); for page in Page::range_inclusive(start_page, end_page) { @@ -67,7 +67,7 @@ pub fn fini(active_table: &mut ActivePageTable) { let onscreen = display.onscreen.as_mut_ptr() as usize; let size = display.width * display.height; { - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let start_page = Page::containing_address(VirtualAddress::new(onscreen)); let end_page = Page::containing_address(VirtualAddress::new(onscreen + size * 4)); for page in Page::range_inclusive(start_page, end_page) { diff --git a/src/arch/x86_64/idt.rs b/src/arch/x86_64/idt.rs index 1e516ade..28304440 100644 --- a/src/arch/x86_64/idt.rs +++ b/src/arch/x86_64/idt.rs @@ -193,7 +193,7 @@ pub unsafe fn init_generic(is_bsp: bool, idt: &mut Idt) { } else { active_table.map_to(page, Frame::containing_address(physical_address), flags) }; - flusher.flush(&mut active_table); + flusher.flush(); } base_virtual_address diff --git a/src/arch/x86_64/paging/mapper.rs b/src/arch/x86_64/paging/mapper.rs index feb57508..fd0a5502 100644 --- a/src/arch/x86_64/paging/mapper.rs +++ b/src/arch/x86_64/paging/mapper.rs @@ -1,80 +1,13 @@ -use core::mem; use core::ptr::Unique; use crate::memory::{allocate_frames, deallocate_frames, Frame}; -use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress}; +use super::{Page, PAGE_SIZE, PhysicalAddress, VirtualAddress}; use super::entry::EntryFlags; use super::table::{self, Table, Level4}; +use super::RmmA; -/// In order to enforce correct paging operations in the kernel, these types -/// are returned on any mapping operation to get the code involved to specify -/// how it intends to flush changes to a page table -#[must_use = "The page table must be flushed, or the changes unsafely ignored"] -pub struct MapperFlush(Page); - -impl MapperFlush { - /// Create a new page flush promise - pub fn new(page: Page) -> MapperFlush { - MapperFlush(page) - } - - /// Flush this page in the active table - pub fn flush(self, table: &mut ActivePageTable) { - table.flush(self.0); - mem::forget(self); - } - - /// Ignore the flush. This is unsafe, and a reason should be provided for use - pub unsafe fn ignore(self) { - mem::forget(self); - } -} - -/// A flush cannot be dropped, it must be consumed -impl Drop for MapperFlush { - fn drop(&mut self) { - panic!("Mapper flush was not utilized"); - } -} - -/// To allow for combining multiple flushes into one, we have a way of flushing -/// the active table, which can consume `MapperFlush` structs -#[must_use = "The page table must be flushed, or the changes unsafely ignored"] -pub struct MapperFlushAll(bool); - -impl MapperFlushAll { - /// Create a new promise to flush all mappings - pub fn new() -> MapperFlushAll { - MapperFlushAll(false) - } - - /// Consume a single page flush - pub fn consume(&mut self, flush: MapperFlush) { - self.0 = true; - mem::forget(flush); - } - - /// Flush the active page table - pub fn flush(self, table: &mut ActivePageTable) { - if self.0 { - table.flush_all(); - } - mem::forget(self); - } - - /// Ignore the flush. This is unsafe, and a reason should be provided for use - pub unsafe fn ignore(self) { - mem::forget(self); - } -} - -/// A flush cannot be dropped, it must be consumed -impl Drop for MapperFlushAll { - fn drop(&mut self) { - panic!("Mapper flush all was not utilized"); - } -} +pub use rmm::{PageFlush, PageFlushAll}; #[derive(Debug)] pub struct Mapper { @@ -98,7 +31,7 @@ impl Mapper { } /// Map a page to a frame - pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags) -> MapperFlush { + pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags) -> PageFlush<RmmA> { let p3 = self.p4_mut().next_table_create(page.p4_index()); let p2 = p3.next_table_create(page.p3_index()); let p1 = p2.next_table_create(page.p2_index()); @@ -110,27 +43,27 @@ impl Mapper { frame.start_address().data(), flags); p1.increment_entry_count(); p1[page.p1_index()].set(frame, flags | EntryFlags::PRESENT); - MapperFlush::new(page) + PageFlush::new(page.start_address()) } /// Map a page to the next free frame - pub fn map(&mut self, page: Page, flags: EntryFlags) -> MapperFlush { + pub fn map(&mut self, page: Page, flags: EntryFlags) -> PageFlush<RmmA> { let frame = allocate_frames(1).expect("out of frames"); self.map_to(page, frame, flags) } /// Update flags for a page - pub fn remap(&mut self, page: Page, flags: EntryFlags) -> MapperFlush { + pub fn remap(&mut self, page: Page, flags: EntryFlags) -> PageFlush<RmmA> { let p3 = self.p4_mut().next_table_mut(page.p4_index()).expect("failed to remap: no p3"); let p2 = p3.next_table_mut(page.p3_index()).expect("failed to remap: no p2"); let p1 = p2.next_table_mut(page.p2_index()).expect("failed to remap: no p1"); let frame = p1[page.p1_index()].pointed_frame().expect("failed to remap: not mapped"); p1[page.p1_index()].set(frame, flags | EntryFlags::PRESENT); - MapperFlush::new(page) + PageFlush::new(page.start_address()) } /// Identity map a frame - pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags) -> MapperFlush { + pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags) -> PageFlush<RmmA> { let page = Page::containing_address(VirtualAddress::new(frame.start_address().data())); self.map_to(page, frame, flags) } @@ -203,16 +136,16 @@ impl Mapper { } /// Unmap a page - pub fn unmap(&mut self, page: Page) -> MapperFlush { + pub fn unmap(&mut self, page: Page) -> PageFlush<RmmA> { let frame = self.unmap_inner(page, false); deallocate_frames(frame, 1); - MapperFlush::new(page) + PageFlush::new(page.start_address()) } /// Unmap a page, return frame without free - pub fn unmap_return(&mut self, page: Page, keep_parents: bool) -> (MapperFlush, Frame) { + pub fn unmap_return(&mut self, page: Page, keep_parents: bool) -> (PageFlush<RmmA>, Frame) { let frame = self.unmap_inner(page, keep_parents); - (MapperFlush::new(page), frame) + (PageFlush::new(page.start_address()), frame) } pub fn translate_page(&self, page: Page) -> Option<Frame> { diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs index 7ef1b5d7..dd1a8de6 100644 --- a/src/arch/x86_64/paging/mod.rs +++ b/src/arch/x86_64/paging/mod.rs @@ -9,7 +9,7 @@ use x86::msr; use crate::memory::Frame; use self::entry::EntryFlags; -use self::mapper::{Mapper, MapperFlushAll}; +use self::mapper::{Mapper, PageFlushAll}; use self::temporary_page::TemporaryPage; pub use rmm::{ @@ -94,7 +94,7 @@ unsafe fn init_pat() { } /// Map TSS -unsafe fn map_tss(cpu_id: usize, mapper: &mut Mapper) -> MapperFlushAll { +unsafe fn map_tss(cpu_id: usize, mapper: &mut Mapper) -> PageFlushAll<RmmA> { extern "C" { /// The starting byte of the thread data segment static mut __tdata_start: u8; @@ -110,7 +110,7 @@ unsafe fn map_tss(cpu_id: usize, mapper: &mut Mapper) -> MapperFlushAll { let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id; let end = start + size; - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let start_page = Page::containing_address(VirtualAddress::new(start)); let end_page = Page::containing_address(VirtualAddress::new(end - 1)); for page in Page::range_inclusive(start_page, end_page) { @@ -194,7 +194,7 @@ pub unsafe fn init( let mut active_table = ActivePageTable::new_unlocked(); let flush_all = map_tss(cpu_id, &mut active_table); - flush_all.flush(&mut active_table); + flush_all.flush(); return (active_table, init_tcb(cpu_id)); } diff --git a/src/arch/x86_64/paging/temporary_page.rs b/src/arch/x86_64/paging/temporary_page.rs index 7ffd92c9..a599dcbb 100644 --- a/src/arch/x86_64/paging/temporary_page.rs +++ b/src/arch/x86_64/paging/temporary_page.rs @@ -25,7 +25,7 @@ impl TemporaryPage { pub fn map(&mut self, frame: Frame, flags: EntryFlags, active_table: &mut ActivePageTable) -> VirtualAddress { assert!(active_table.translate_page(self.page).is_none(), "temporary page is already mapped"); let result = active_table.map_to(self.page, frame, flags); - result.flush(active_table); + result.flush(); self.page.start_address() } @@ -38,6 +38,6 @@ impl TemporaryPage { /// Unmaps the temporary page in the active table. pub fn unmap(&mut self, active_table: &mut ActivePageTable) { let (result, _frame) = active_table.unmap_return(self.page, true); - result.flush(active_table); + result.flush(); } } diff --git a/src/context/memory.rs b/src/context/memory.rs index bd24a24e..c3b9ad93 100644 --- a/src/context/memory.rs +++ b/src/context/memory.rs @@ -17,7 +17,7 @@ use crate::ipi::{ipi, IpiKind, IpiTarget}; use crate::memory::Frame; use crate::paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress}; use crate::paging::entry::EntryFlags; -use crate::paging::mapper::MapperFlushAll; +use crate::paging::mapper::PageFlushAll; use crate::paging::temporary_page::TemporaryPage; /// Round down to the nearest multiple of page size @@ -314,7 +314,7 @@ impl Grant { pub fn physmap(from: PhysicalAddress, to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let start_page = Page::containing_address(to); let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1)); @@ -324,7 +324,7 @@ impl Grant { flush_all.consume(result); } - flush_all.flush(&mut active_table); + flush_all.flush(); Grant { region: Region { @@ -341,7 +341,7 @@ impl Grant { pub fn map(to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let start_page = Page::containing_address(to); let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1)); @@ -350,7 +350,7 @@ impl Grant { flush_all.consume(result); } - flush_all.flush(&mut active_table); + flush_all.flush(); Grant { region: Region { @@ -408,7 +408,7 @@ impl Grant { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let start_page = Page::containing_address(self.region.start); let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1)); @@ -427,14 +427,14 @@ impl Grant { } } - flush_all.flush(&mut active_table); + flush_all.flush(); if self.owned { unsafe { intrinsics::copy(self.region.start.data() as *const u8, new_start.data() as *mut u8, self.region.size); } - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); for page in Page::range_inclusive(start_page, end_page) { //TODO: One function to do both? @@ -445,7 +445,7 @@ impl Grant { flush_all.consume(result); } - flush_all.flush(&mut active_table); + flush_all.flush(); } Grant { @@ -465,7 +465,7 @@ impl Grant { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let start_page = Page::containing_address(self.region.start); let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1)); @@ -483,7 +483,7 @@ impl Grant { }); } - flush_all.flush(&mut active_table); + flush_all.flush(); self.region.start = new_start; } @@ -501,7 +501,7 @@ impl Grant { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let start_page = Page::containing_address(self.start_address()); let end_page = Page::containing_address(self.final_address()); @@ -514,7 +514,7 @@ impl Grant { flush_all.consume(result); } - flush_all.flush(&mut active_table); + flush_all.flush(); if let Some(desc) = self.desc_opt.take() { println!("Grant::unmap: close desc {:?}", desc); @@ -705,14 +705,14 @@ impl Memory { fn map(&mut self, clear: bool) { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); for page in self.pages() { let result = active_table.map(page, self.flags); flush_all.consume(result); } - flush_all.flush(&mut active_table); + flush_all.flush(); if clear { assert!(self.flags.contains(EntryFlags::WRITABLE)); @@ -725,14 +725,14 @@ impl Memory { fn unmap(&mut self) { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); for page in self.pages() { let result = active_table.unmap(page); flush_all.consume(result); } - flush_all.flush(&mut active_table); + flush_all.flush(); } /// A complicated operation to move a piece of memory to a new page table @@ -740,7 +740,7 @@ impl Memory { pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); for page in self.pages() { let (result, frame) = active_table.unmap_return(page, false); @@ -754,7 +754,7 @@ impl Memory { }); } - flush_all.flush(&mut active_table); + flush_all.flush(); self.start = new_start; } @@ -762,14 +762,14 @@ impl Memory { pub fn remap(&mut self, new_flags: EntryFlags) { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); for page in self.pages() { let result = active_table.remap(page, new_flags); flush_all.consume(result); } - flush_all.flush(&mut active_table); + flush_all.flush(); self.flags = new_flags; } @@ -779,7 +779,7 @@ impl Memory { //TODO: Calculate page changes to minimize operations if new_size > self.size { - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size)); let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size - 1)); @@ -790,7 +790,7 @@ impl Memory { } } - flush_all.flush(&mut active_table); + flush_all.flush(); if clear { unsafe { @@ -798,7 +798,7 @@ impl Memory { } } } else if new_size < self.size { - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size)); let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size - 1)); @@ -809,7 +809,7 @@ impl Memory { } } - flush_all.flush(&mut active_table); + flush_all.flush(); } self.size = new_size; diff --git a/src/ptrace.rs b/src/ptrace.rs index 1bbc5ef3..6f9ab012 100644 --- a/src/ptrace.rs +++ b/src/ptrace.rs @@ -7,7 +7,7 @@ use crate::{ interrupt::InterruptStack, paging::{ entry::EntryFlags, - mapper::MapperFlushAll, + mapper::PageFlushAll, temporary_page::TemporaryPage, ActivePageTable, InactivePageTable, Page, PAGE_SIZE, VirtualAddress } @@ -486,27 +486,27 @@ where F: FnOnce(*mut u8) -> Result<()> // Map all the physical frames into linear pages let pages = frames.len(); let mut page = start; - let mut flusher = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); for (frame, mut flags) in frames { flags |= EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE; - flusher.consume(active_page_table.map_to(page, frame, flags)); + flush_all.consume(active_page_table.map_to(page, frame, flags)); page = page.next(); } - flusher.flush(&mut active_page_table); + flush_all.flush(); let res = f((start.start_address().data() + offset.data() % PAGE_SIZE) as *mut u8); // Unmap all the pages (but allow no deallocation!) let mut page = start; - let mut flusher = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); for _ in 0..pages { - flusher.consume(active_page_table.unmap_return(page, true).0); + flush_all.consume(active_page_table.unmap_return(page, true).0); page = page.next(); } - flusher.flush(&mut active_page_table); + flush_all.flush(); res } diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 61642d73..cd405087 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -20,7 +20,7 @@ use crate::interrupt; use crate::ipi::{ipi, IpiKind, IpiTarget}; use crate::memory::allocate_frames; use crate::paging::entry::EntryFlags; -use crate::paging::mapper::MapperFlushAll; +use crate::paging::mapper::PageFlushAll; use crate::paging::temporary_page::TemporaryPage; use crate::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, PAGE_SIZE}; use crate::{ptrace, syscall}; @@ -1316,7 +1316,7 @@ pub fn mprotect(address: usize, size: usize, flags: MapFlags) -> Result<usize> { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = MapperFlushAll::new(); + let flush_all = PageFlushAll::new(); let start_page = Page::containing_address(VirtualAddress::new(address)); let end_page = Page::containing_address(VirtualAddress::new(end_address)); @@ -1326,11 +1326,11 @@ pub fn mprotect(address: usize, size: usize, flags: MapFlags) -> Result<usize> { let mut page_flags = if let Some(page_flags) = active_table.translate_page_flags(page) { page_flags } else { - flush_all.flush(&mut active_table); + flush_all.flush(); return Err(Error::new(EFAULT)); }; if !page_flags.contains(EntryFlags::PRESENT) { - flush_all.flush(&mut active_table); + flush_all.flush(); return Err(Error::new(EFAULT)); } @@ -1356,7 +1356,7 @@ pub fn mprotect(address: usize, size: usize, flags: MapFlags) -> Result<usize> { flush_all.consume(flush); } - flush_all.flush(&mut active_table); + flush_all.flush(); Ok(0) } -- GitLab