From 760fc2ba5ae9aac53e73ef913b352365ca6a3a90 Mon Sep 17 00:00:00 2001
From: 4lDO2 <4lDO2@protonmail.com>
Date: Thu, 22 Jun 2023 18:00:41 +0200
Subject: [PATCH] WIP: Global page info structs.

---
 src/arch/x86_64/rmm.rs   | 24 +++++++---
 src/arch/x86_64/start.rs |  4 +-
 src/context/memory.rs    | 10 +----
 src/memory/mod.rs        | 95 +++++++++++++++++++++++++++++++++++++++-
 4 files changed, 116 insertions(+), 17 deletions(-)

diff --git a/src/arch/x86_64/rmm.rs b/src/arch/x86_64/rmm.rs
index 96e7fc6a..10c24fe7 100644
--- a/src/arch/x86_64/rmm.rs
+++ b/src/arch/x86_64/rmm.rs
@@ -2,7 +2,7 @@ use core::{
     cmp,
     mem,
     slice,
-    sync::atomic::{self, AtomicUsize, Ordering},
+    sync::atomic::{self, AtomicUsize, Ordering}, cell::SyncUnsafeCell,
 };
 use rmm::{
     KILOBYTE,
@@ -102,7 +102,7 @@ unsafe fn inner<A: Arch>(
             }
         }
 
-        // Map kernel at KERNEL_OFFSET and identity map too
+        // Map kernel at KERNEL_OFFSET and map linearly too
         for i in 0..kernel_size_aligned / A::PAGE_SIZE {
             let phys = PhysicalAddress::new(kernel_base + i * A::PAGE_SIZE);
             let virt = VirtualAddress::new(crate::KERNEL_OFFSET + i * A::PAGE_SIZE);
@@ -226,10 +226,18 @@ impl core::fmt::Debug for LockedAllocator {
     }
 }
 
-static mut AREAS: [MemoryArea; 512] = [MemoryArea {
+static AREAS: SyncUnsafeCell<[MemoryArea; 512]> = SyncUnsafeCell::new([MemoryArea {
     base: PhysicalAddress::new(0),
     size: 0,
-}; 512];
+}; 512]);
+static AREA_COUNT: SyncUnsafeCell<u16> = SyncUnsafeCell::new(0);
+
+pub fn areas() -> &'static [MemoryArea] {
+    // SAFETY: Both AREAS and AREA_COUNT are initialized once and then never changed.
+    //
+    // TODO: Memory hotplug?
+    unsafe { &(&*AREAS.get())[..AREA_COUNT.get().read().into()] }
+}
 
 pub static FRAME_ALLOCATOR: LockedAllocator = LockedAllocator;
 
@@ -420,13 +428,15 @@ pub unsafe fn init(
             continue;
         }
 
-        AREAS[area_i].base = PhysicalAddress::new(base);
-        AREAS[area_i].size = size;
+        let areas = &mut *AREAS.get();
+        areas[area_i].base = PhysicalAddress::new(base);
+        areas[area_i].size = size;
         area_i += 1;
     }
+    AREA_COUNT.get().write(area_i as u16);
 
     let allocator = inner::<A>(
-        &AREAS,
+        areas(),
         kernel_base, kernel_size_aligned,
         stack_base, stack_size_aligned,
         env_base, env_size_aligned,
diff --git a/src/arch/x86_64/start.rs b/src/arch/x86_64/start.rs
index 71e58fd9..5620c692 100644
--- a/src/arch/x86_64/start.rs
+++ b/src/arch/x86_64/start.rs
@@ -7,7 +7,7 @@ use core::cell::Cell;
 use core::slice;
 use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
 
-use crate::allocator;
+use crate::{allocator, memory};
 #[cfg(feature = "acpi")]
 use crate::acpi;
 use crate::arch::pti;
@@ -263,6 +263,8 @@ pub unsafe extern fn kstart_ap(args_ptr: *const KernelArgsAp) -> ! {
         // Initialize devices (for AP)
         device::init_ap();
 
+        memory::init_mm();
+
         AP_READY.store(true, Ordering::SeqCst);
 
         cpu_id
diff --git a/src/context/memory.rs b/src/context/memory.rs
index 68ebe472..aa219df2 100644
--- a/src/context/memory.rs
+++ b/src/context/memory.rs
@@ -529,17 +529,11 @@ pub struct GrantInfo {
     pub(crate) provider: Provider,
 }
 
-/// The arch-specific user page tables are throwaway, and this enum contains all required
-/// information to update lazy mappings in the event of page faults.
 #[derive(Debug)]
 pub enum Provider {
     /// The grant was initialized with (lazy) zeroed memory, and any changes will make it owned by
     /// the frame allocator.
-    //
-    // TODO: strong-count-only Arc?
-    //
-    // https://internals.rust-lang.org/t/pre-rfc-rc-and-arc-with-only-strong-count/5828
-    Allocated { pages: Box<[Option<PageInfo>]> },
+    Allocated,
     /// The grant is not owned, but borrowed from physical memory frames that do not belong to the
     /// frame allocator.
     PhysBorrowed { base: Frame },
@@ -547,7 +541,7 @@ pub enum Provider {
     ///
     /// All grants in the specified range must be of type Allocated.
     // TODO: Vec?
-    External { address_space: Arc<RwLock<AddrSpace>>, src_base: Page, cow: bool, pages: Option<Box<[Option<PageInfo>]>> },
+    External { address_space: Arc<RwLock<AddrSpace>>, src_base: Page, cow: bool },
     /// The memory is borrowed from another address space, but managed by a scheme via fmap.
     // TODO: This is probably a very heavy way to keep track of fmap'd files, perhaps move to the
     // ~~context~~ address space?
diff --git a/src/memory/mod.rs b/src/memory/mod.rs
index fe8467de..72b77b68 100644
--- a/src/memory/mod.rs
+++ b/src/memory/mod.rs
@@ -3,14 +3,23 @@
 
 use core::cmp;
 use core::num::NonZeroUsize;
+use core::ops::Deref;
+use core::sync::atomic::AtomicUsize;
 
 use crate::arch::rmm::LockedAllocator;
+use crate::common::try_box_slice_new;
 pub use crate::paging::{PAGE_SIZE, PhysicalAddress};
+use crate::rmm::areas;
 
+use alloc::boxed::Box;
+use alloc::collections::BTreeMap;
+use alloc::sync::Arc;
+use alloc::vec::Vec;
 use rmm::{
     FrameAllocator,
     FrameCount,
 };
+use spin::RwLock;
 use crate::syscall::flag::{PartialAllocStrategy, PhysallocFlags};
 use crate::syscall::error::{ENOMEM, Error};
 
@@ -112,11 +121,14 @@ impl Frame {
     pub fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
         FrameIter { start, end }
     }
-    pub fn next_by(&self, n: usize) -> Self {
+    pub fn next_by(self, n: usize) -> Self {
         Self {
             number: self.number.get().checked_add(n).and_then(NonZeroUsize::new).expect("overflow in Frame::next_by"),
         }
     }
+    pub fn offset_from(self, from: Self) -> usize {
+        from.number.get().checked_sub(self.number.get()).expect("overflow in Frame::offset_from")
+    }
 }
 
 pub struct FrameIter {
@@ -176,3 +188,84 @@ impl Drop for RaiiFrame {
         crate::memory::deallocate_frames(self.inner, 1);
     }
 }
+
+pub struct PageInfo {
+    refcount: AtomicUsize,
+    cow_refcount: AtomicUsize,
+    flags: FrameFlags,
+    _padding: usize,
+}
+bitflags::bitflags! {
+    struct FrameFlags: usize {
+        const NONE = 0;
+    }
+}
+
+// TODO: Very read-heavy RwLock?
+pub static SECTIONS: RwLock<Box<[&'static Section]>> = RwLock::new(Box::new([]));
+
+pub struct Section {
+    base: Frame,
+    frames: Box<[PageInfo]>,
+}
+
+pub const MAX_SECTION_SIZE_BITS: u32 = 27;
+pub const MAX_SECTION_SIZE: usize = 1 << MAX_SECTION_SIZE_BITS;
+pub const MAX_SECTION_PAGE_COUNT: usize = MAX_SECTION_SIZE / PAGE_SIZE;
+
+#[cold]
+pub fn init_mm() {
+    let mut guard = SECTIONS.write();
+    let mut sections = Vec::new();
+
+    for memory_map_area in areas().iter().filter(|area| area.size > 0) {
+        let mut pages_left = memory_map_area.size.div_floor(PAGE_SIZE);
+        let mut base = Frame::containing_address(memory_map_area.base);
+
+        while pages_left > 0 {
+            let section_page_count = core::cmp::min(pages_left, MAX_SECTION_PAGE_COUNT);
+
+            sections.push(Box::leak(Box::new(Section {
+                base,
+                // TODO: zeroed?
+                frames: try_box_slice_new(PageInfo::new, section_page_count).expect("failed to allocate pages array"),
+            })) as &'static Section);
+
+            pages_left -= section_page_count;
+            base = base.next_by(section_page_count);
+        }
+    }
+
+    sections.sort_unstable_by_key(|s| s.base);
+
+    *guard = sections.into_boxed_slice();
+}
+impl PageInfo {
+    pub fn new() -> Self {
+        Self {
+            refcount: AtomicUsize::new(0),
+            cow_refcount: AtomicUsize::new(0),
+            flags: FrameFlags::NONE,
+            _padding: 0,
+        }
+    }
+}
+pub fn get_page(frame: Frame) -> Option<&'static PageInfo> {
+    let sections = SECTIONS.read();
+
+    let idx = sections
+        .binary_search_by_key(&frame, |section| section.base)
+        .unwrap_or_else(|e| e);
+
+    let section = sections.get(idx)?;
+
+    section.frames.get(frame.offset_from(section.base))
+
+    /*
+    sections
+        .range(..=frame)
+        .next_back()
+        .filter(|(base, section)| frame <= base.next_by(section.frames.len()))
+        .map(|(base, section)| PageInfoHandle { section, idx: frame.offset_from(*base) })
+    */
+}
-- 
GitLab