Skip to content
Snippets Groups Projects
Verified Commit d6e17976 authored by Jacob Lorentzon's avatar Jacob Lorentzon :speech_balloon:
Browse files

Make Mapper::map fallible.

parent df145ea0
No related branches found
No related tags found
1 merge request!187No more recursive mapping
use crate::paging::{ActivePageTable, Page, PageFlags, VirtualAddress};
use crate::paging::mapper::PageFlushAll;
use crate::paging::{ActivePageTable, Page, PageFlags, VirtualAddress, mapper::PageFlushAll, entry::EntryFlags};
#[cfg(not(feature="slab"))]
pub use self::linked_list::Allocator;
......@@ -19,7 +18,8 @@ unsafe fn map_heap(active_table: &mut ActivePageTable, offset: usize, size: usiz
let heap_start_page = Page::containing_address(VirtualAddress::new(offset));
let heap_end_page = Page::containing_address(VirtualAddress::new(offset + size-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
let result = active_table.map(page, PageFlags::new().write(true));
let result = active_table.map(page, PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))))
.expect("failed to map kernel heap");
flush_all.consume(result);
}
......
......@@ -13,6 +13,7 @@ bitflags! {
pub struct EntryFlags: usize {
const NO_CACHE = 1 << 4;
const HUGE_PAGE = 1 << 7;
const GLOBAL = 1 << 8;
}
}
......
use crate::memory::{allocate_frames, deallocate_frames, Frame};
use super::{linear_phys_to_virt, Page, PAGE_SIZE, PageFlags, PhysicalAddress, VirtualAddress};
use crate::memory::{allocate_frames, deallocate_frames, Enomem, Frame};
use super::RmmA;
use super::table::{Table, Level4};
......@@ -36,8 +36,7 @@ impl<'table> Mapper<'table> {
/// For this to be safe, the caller must have exclusive access to the frame argument. The frame
/// must also be valid, and the frame must not outlive the lifetime.
pub unsafe fn from_p4_unchecked(frame: &mut Frame) -> Self {
let phys = frame.start_address();
let virt = linear_phys_to_virt(phys)
let virt = linear_phys_to_virt(frame.start_address())
.expect("expected page table frame to fit within linear mapping");
Self {
......@@ -70,9 +69,9 @@ impl<'table> Mapper<'table> {
}
/// Map a page to the next free frame
pub fn map(&mut self, page: Page, flags: PageFlags<RmmA>) -> PageFlush<RmmA> {
let frame = allocate_frames(1).expect("out of frames");
self.map_to(page, frame, flags)
pub fn map(&mut self, page: Page, flags: PageFlags<RmmA>) -> Result<PageFlush<RmmA>, Enomem> {
let frame = allocate_frames(1).ok_or(Enomem)?;
Ok(self.map_to(page, frame, flags))
}
/// Update flags for a page
......
......@@ -8,6 +8,7 @@ use x86::msr;
use crate::memory::Frame;
use self::entry::EntryFlags;
use self::mapper::{Mapper, PageFlushAll};
use self::table::{Level4, Table};
......@@ -94,8 +95,8 @@ unsafe fn init_pat() {
);
}
/// Map TSS
unsafe fn map_tss(cpu_id: usize, mapper: &mut Mapper) -> PageFlushAll<RmmA> {
/// Map percpu
unsafe fn map_percpu(cpu_id: usize, mapper: &mut Mapper) -> PageFlushAll<RmmA> {
extern "C" {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
......@@ -115,7 +116,11 @@ unsafe fn map_tss(cpu_id: usize, mapper: &mut Mapper) -> PageFlushAll<RmmA> {
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = mapper.map(page, PageFlags::new().write(true));
let result = mapper.map(
page,
PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))),
)
.expect("failed to allocate page table frames while mapping percpu");
flush_all.consume(result);
}
flush_all
......@@ -188,7 +193,7 @@ pub unsafe fn init(
let mut active_table = ActivePageTable::new_unlocked(TableKind::User);
let flush_all = map_tss(cpu_id, &mut active_table);
let flush_all = map_percpu(cpu_id, &mut active_table);
flush_all.flush();
return (active_table, init_tcb(cpu_id));
......@@ -205,7 +210,7 @@ pub unsafe fn init_ap(
let mut new_table = InactivePageTable::from_address(bsp_table);
{
let flush_all = map_tss(cpu_id, &mut new_table.mapper());
let flush_all = map_percpu(cpu_id, &mut new_table.mapper());
// The flush can be ignored as this is not the active table. See later active_table.switch
flush_all.ignore();
};
......
......@@ -339,7 +339,9 @@ impl Grant {
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = active_table.map(page, flags);
let result = active_table
.map(page, flags)
.expect("TODO: handle ENOMEM in Grant::map");
flush_all.consume(result);
}
......@@ -408,7 +410,8 @@ impl Grant {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
if self.owned {
let result = active_table.map(new_page, PageFlags::new().write(true));
let result = active_table.map(new_page, PageFlags::new().write(true))
.expect("TODO: handle ENOMEM in Grant::secret_clone");
flush_all.consume(result);
} else {
let result = active_table.map_to(new_page, frame, flags);
......@@ -692,7 +695,9 @@ impl Memory {
let flush_all = PageFlushAll::new();
for page in self.pages() {
let result = active_table.map(page, self.flags);
let result = active_table
.map(page, self.flags)
.expect("TODO: handle ENOMEM in Memory::map");
flush_all.consume(result);
}
......@@ -769,7 +774,9 @@ impl Memory {
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_none() {
let result = active_table.map(page, self.flags);
let result = active_table
.map(page, self.flags)
.expect("TODO: Handle OOM in Memory::resize");
flush_all.consume(result);
}
}
......
......@@ -118,3 +118,6 @@ impl Iterator for FrameIter {
}
}
}
#[derive(Debug)]
pub struct Enomem;
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment