From df145ea0a9c57cb34bde3f25c5b0fecc6449c3c1 Mon Sep 17 00:00:00 2001
From: 4lDO2 <4lDO2@protonmail.com>
Date: Fri, 25 Dec 2020 17:52:07 +0100
Subject: [PATCH] Utilize linear_phys_to_virt where applicable.

---
 src/arch/x86_64/paging/mapper.rs | 5 +++--
 src/arch/x86_64/paging/mod.rs    | 3 ++-
 src/arch/x86_64/paging/table.rs  | 5 +++--
 3 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/src/arch/x86_64/paging/mapper.rs b/src/arch/x86_64/paging/mapper.rs
index a89ea678..ab0c5c7f 100644
--- a/src/arch/x86_64/paging/mapper.rs
+++ b/src/arch/x86_64/paging/mapper.rs
@@ -1,5 +1,5 @@
 use crate::memory::{allocate_frames, deallocate_frames, Frame};
-use super::{Page, PAGE_SIZE, PageFlags, PhysicalAddress, VirtualAddress};
+use super::{linear_phys_to_virt, Page, PAGE_SIZE, PageFlags, PhysicalAddress, VirtualAddress};
 
 use super::RmmA;
 use super::table::{Table, Level4};
@@ -37,7 +37,8 @@ impl<'table> Mapper<'table> {
     /// must also be valid, and the frame must not outlive the lifetime.
     pub unsafe fn from_p4_unchecked(frame: &mut Frame) -> Self {
         let phys = frame.start_address();
-        let virt = VirtualAddress::new(phys.data() + crate::KERNEL_OFFSET);
+        let virt = linear_phys_to_virt(phys)
+            .expect("expected page table frame to fit within linear mapping");
 
         Self {
             p4: &mut *(virt.data() as *mut Table<Level4>),
diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs
index eff4acfb..02dac6de 100644
--- a/src/arch/x86_64/paging/mod.rs
+++ b/src/arch/x86_64/paging/mod.rs
@@ -310,7 +310,8 @@ impl InactivePageTable {
         // case it is outside the pre-mapped physical address range, or if such a range is too
         // large to fit the whole physical address space in the virtual address space.
         {
-            let table = VirtualAddress::new(frame.start_address().data() + crate::KERNEL_OFFSET);
+            let table = linear_phys_to_virt(frame.start_address())
+                .expect("cannot initialize InactivePageTable (currently) without the frame being linearly mapped");
             // now we are able to zero the table
 
             // SAFETY: The caller must ensure exclusive access to the pointed-to virtual address of
diff --git a/src/arch/x86_64/paging/table.rs b/src/arch/x86_64/paging/table.rs
index ff928225..e6467bf0 100644
--- a/src/arch/x86_64/paging/table.rs
+++ b/src/arch/x86_64/paging/table.rs
@@ -5,7 +5,7 @@ use core::marker::PhantomData;
 use core::ops::{Index, IndexMut};
 
 use crate::memory::allocate_frames;
-use crate::paging::VirtualAddress;
+use crate::paging::{linear_phys_to_virt, VirtualAddress};
 
 use super::{ENTRY_COUNT, PageFlags};
 use super::entry::{Entry, EntryFlags};
@@ -112,7 +112,8 @@ impl<L> Table<L> where L: HierarchicalLevel {
                 return None;
             }
             let next_table_physaddr = next_table_frame.start_address();
-            let next_table_virtaddr = VirtualAddress::new(next_table_physaddr.data() + crate::KERNEL_OFFSET);
+            let next_table_virtaddr = linear_phys_to_virt(next_table_physaddr)
+                .expect("expected page table frame to fit within linear mapping");
 
             Some(next_table_virtaddr)
         })
-- 
GitLab