From 91fc7c96abb2f5cf38f0f9b08f7f5e64d8a5165a Mon Sep 17 00:00:00 2001
From: Jeremy Soller <jackpot51@gmail.com>
Date: Fri, 16 Sep 2016 17:51:27 -0600
Subject: [PATCH] work on shared memory accross threads

---
 context/context.rs |   6 +--
 context/memory.rs  |  36 ++++++++++++++
 elf.rs             |  10 +++-
 syscall/process.rs | 115 +++++++++++++++++++++++++--------------------
 4 files changed, 111 insertions(+), 56 deletions(-)

diff --git a/context/context.rs b/context/context.rs
index 8ed1becd..4ab62a49 100644
--- a/context/context.rs
+++ b/context/context.rs
@@ -3,7 +3,7 @@ use collections::Vec;
 
 use arch;
 use super::file::File;
-use super::memory::Memory;
+use super::memory::{Memory, SharedMemory};
 
 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
 pub enum Status {
@@ -26,9 +26,9 @@ pub struct Context {
     /// Kernel stack
     pub kstack: Option<Box<[u8]>>,
     /// Executable image
-    pub image: Vec<Memory>,
+    pub image: Vec<SharedMemory>,
     /// User heap
-    pub heap: Option<Memory>,
+    pub heap: Option<SharedMemory>,
     /// User stack
     pub stack: Option<Memory>,
     /// The open files in the scheme
diff --git a/context/memory.rs b/context/memory.rs
index 71ed47c6..34e69a30 100644
--- a/context/memory.rs
+++ b/context/memory.rs
@@ -1,3 +1,6 @@
+use alloc::arc::{Arc, Weak};
+use spin::Mutex;
+
 use arch::externs::memset;
 use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, VirtualAddress};
 use arch::paging::entry::{self, EntryFlags};
@@ -10,6 +13,35 @@ pub struct Memory {
     flags: EntryFlags
 }
 
+#[derive(Debug)]
+pub enum SharedMemory {
+    Owned(Arc<Mutex<Memory>>),
+    Borrowed(Weak<Mutex<Memory>>)
+}
+
+impl SharedMemory {
+    pub fn with<F, T>(&self, f: F) -> T where F: FnOnce(&mut Memory) -> T {
+        match *self {
+            SharedMemory::Owned(ref memory_lock) => {
+                let mut memory = memory_lock.lock();
+                f(&mut *memory)
+            },
+            SharedMemory::Borrowed(ref memory_weak) => {
+                let memory_lock = memory_weak.upgrade().expect("SharedMemory::Borrowed no longer valid");
+                let mut memory = memory_lock.lock();
+                f(&mut *memory)
+            }
+        }
+    }
+
+    pub fn borrow(&self) -> SharedMemory {
+        match *self {
+            SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)),
+            SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone())
+        }
+    }
+}
+
 impl Memory {
     pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, flush: bool, clear: bool) -> Self {
         let mut memory = Memory {
@@ -23,6 +55,10 @@ impl Memory {
         memory
     }
 
+    pub fn to_shared(self) -> SharedMemory {
+        SharedMemory::Owned(Arc::new(Mutex::new(self)))
+    }
+
     pub fn start_address(&self) -> VirtualAddress {
         self.start
     }
diff --git a/elf.rs b/elf.rs
index 7fb7f9a8..180ff23d 100644
--- a/elf.rs
+++ b/elf.rs
@@ -97,10 +97,18 @@ impl<'a> Elf<'a> {
 
                     memory.remap(flags, true);
 
-                    context.image.push(memory);
+                    context.image.push(memory.to_shared());
                 }
             }
 
+            context.heap = Some(context::memory::Memory::new(
+                VirtualAddress::new(arch::USER_HEAP_OFFSET),
+                0,
+                entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
+                true,
+                true
+            ).to_shared());
+
             // Map stack
             context.stack = Some(context::memory::Memory::new(
                 VirtualAddress::new(arch::USER_STACK_OFFSET),
diff --git a/syscall/process.rs b/syscall/process.rs
index 3ac8fc74..64e5a072 100644
--- a/syscall/process.rs
+++ b/syscall/process.rs
@@ -15,30 +15,29 @@ use syscall::{self, Error, Result};
 pub fn brk(address: usize) -> Result<usize> {
     let contexts = context::contexts();
     let context_lock = contexts.current().ok_or(Error::NoProcess)?;
-    let mut context = context_lock.write();
+    let context = context_lock.read();
+
+    let current = if let Some(ref heap_shared) = context.heap {
+        heap_shared.with(|heap| {
+            heap.start_address().get() + heap.size()
+        })
+    } else {
+        panic!("user heap not initialized");
+    };
 
-    let mut current = arch::USER_HEAP_OFFSET;
-    if let Some(ref heap) = context.heap {
-        current = heap.start_address().get() + heap.size();
-    }
     if address == 0 {
         //println!("Brk query {:X}", current);
         Ok(current)
     } else if address >= arch::USER_HEAP_OFFSET {
         //TODO: out of memory errors
-        if let Some(ref mut heap) = context.heap {
-            heap.resize(address - arch::USER_HEAP_OFFSET, true, true);
-            return Ok(address);
+        if let Some(ref heap_shared) = context.heap {
+            heap_shared.with(|heap| {
+                heap.resize(address - arch::USER_HEAP_OFFSET, true, true);
+            });
+        } else {
+            panic!("user heap not initialized");
         }
 
-        context.heap = Some(context::memory::Memory::new(
-            VirtualAddress::new(arch::USER_HEAP_OFFSET),
-            address - arch::USER_HEAP_OFFSET,
-            entry::WRITABLE | entry::NO_EXECUTE | entry::USER_ACCESSIBLE,
-            true,
-            true
-        ));
-
         Ok(address)
     } else {
         //TODO: Return correct error
@@ -85,38 +84,46 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
             if flags & CLONE_VM == CLONE_VM {
                 panic!("unimplemented: CLONE_VM");
             } else {
-                for memory in context.image.iter() {
-                    let mut new_memory = context::memory::Memory::new(
-                        VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET),
-                        memory.size(),
-                        entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
-                        true,
-                        false
-                    );
-                    unsafe {
-                        arch::externs::memcpy(new_memory.start_address().get() as *mut u8,
-                                              memory.start_address().get() as *const u8,
-                                              memory.size());
-                    }
-                    new_memory.remap(memory.flags(), true);
-                    image.push(new_memory);
+                for memory_shared in context.image.iter() {
+                    memory_shared.with(|memory| {
+                        let mut new_memory = context::memory::Memory::new(
+                            VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET),
+                            memory.size(),
+                            entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
+                            true,
+                            false
+                        );
+
+                        unsafe {
+                            arch::externs::memcpy(new_memory.start_address().get() as *mut u8,
+                                                  memory.start_address().get() as *const u8,
+                                                  memory.size());
+                        }
+
+                        new_memory.remap(memory.flags(), true);
+                        image.push(new_memory.to_shared());
+                    });
                 }
 
-                if let Some(ref heap) = context.heap {
-                    let mut new_heap = context::memory::Memory::new(
-                        VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET),
-                        heap.size(),
-                        entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
-                        true,
-                        false
-                    );
-                    unsafe {
-                        arch::externs::memcpy(new_heap.start_address().get() as *mut u8,
-                                              heap.start_address().get() as *const u8,
-                                              heap.size());
-                    }
-                    new_heap.remap(heap.flags(), true);
-                    heap_option = Some(new_heap);
+                if let Some(ref heap_shared) = context.heap {
+                    heap_shared.with(|heap| {
+                        let mut new_heap = context::memory::Memory::new(
+                            VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET),
+                            heap.size(),
+                            entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
+                            true,
+                            false
+                        );
+
+                        unsafe {
+                            arch::externs::memcpy(new_heap.start_address().get() as *mut u8,
+                                                  heap.start_address().get() as *const u8,
+                                                  heap.size());
+                        }
+
+                        new_heap.remap(heap.flags(), true);
+                        heap_option = Some(new_heap.to_shared());
+                    });
                 }
             }
 
@@ -220,15 +227,19 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
                 context.kstack = Some(stack);
             }
 
-            for memory in image.iter_mut() {
-                let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET);
-                memory.move_to(start, &mut new_table, &mut temporary_page, true);
+            for memory_shared in image.iter_mut() {
+                memory_shared.with(|memory| {
+                    let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET);
+                    memory.move_to(start, &mut new_table, &mut temporary_page, true);
+                });
             }
             context.image = image;
 
-            if let Some(mut heap) = heap_option.take() {
-                heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true);
-                context.heap = Some(heap);
+            if let Some(heap_shared) = heap_option.take() {
+                heap_shared.with(|heap| {
+                    heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true);
+                });
+                context.heap = Some(heap_shared);
             }
 
             if let Some(mut stack) = stack_option.take() {
-- 
GitLab