Compare commits

..

47 Commits

Author SHA1 Message Date
EDiasAlberto
7965c007c8 feat: add general SPT lock for operations across threads 2024-12-06 06:03:22 +00:00
Themis Demetriades
65da1659e5 feat: merged shared-read-only-executables with the rest of VM 2024-12-06 04:15:13 +00:00
Themis Demetriades
3897e83963 fix: use correct page_get function within page eviction 2024-12-06 01:43:41 +00:00
Demetriades, Themis
96b350d623 Merge branch 'vm/mmap-write-back-on-eviction' into 'vm/virtual-memory/themis-synch'
Write back mmap file pages to file upon eviction

See merge request lab2425_autumn/pintos_22!59
2024-12-06 01:01:50 +00:00
Themis Demetriades
31403ac7cb fix: obtain correct page table entry when performing eviction 2024-12-06 00:56:03 +00:00
1da0c7d48c fix: properly assign frame owners and deallocate in all required places 2024-12-06 00:29:57 +00:00
Demetriades, Themis
8220b931a9 Merge branch 'vm/virtual-memory/frame-synch/saleh' into 'vm/virtual-memory/themis-synch'
Merge frame pinning to themis-synch

See merge request lab2425_autumn/pintos_22!60
2024-12-06 00:21:02 +00:00
sBubshait
1efa1fef9a Merge frame pinning into themis-synch 2024-12-05 23:56:25 +00:00
sBubshait
fc088a19ac Merge remote-tracking branch 'origin/vm/frame-pinning' into vm/virtual-memory/frame-synch/saleh
# Conflicts:
#	src/userprog/syscall.c
2024-12-05 23:48:52 +00:00
sBubshait
a34bbbed08 Update frame: When evicting an mmapped file page, write it back to the file if it is dirty 2024-12-05 22:53:48 +00:00
833c1b0520 fix: only swap out shared pages once 2024-12-05 22:37:14 +00:00
9aa9cdb91e feat: implement proper destruction of pages, including for shared pages 2024-12-05 22:23:50 +00:00
Themis Demetriades
2811ea0eb3 fix: SPT never removes entries until process termination or special case 2024-12-05 22:05:02 +00:00
dd46200256 feat: initial shared file page management and initialization 2024-12-05 21:46:49 +00:00
sBubshait
6da855fe47 Implement validation of pointers and strings in syscalls with pinning and unpinning to protect against eviction 2024-12-05 21:12:31 +00:00
4dd6b6e928 fix: do not leak when inserting the same page twice, just update 2024-12-05 19:38:27 +00:00
0f1f7b9a6f refactor: extract init_pages 2024-12-05 19:35:39 +00:00
sBubshait
e03273756d Update frame table to add a pinned flag and protect those from being evicted 2024-12-05 17:52:01 +00:00
Themis Demetriades
7860f3863f fix: add check to mmap to ensure file isn't mapped over stack segment (ed1223) 2024-12-05 17:11:02 +00:00
Themis Demetriades
d03e253046 feat: implement synchronisation to protecting access to PTEs of SPTs during eviction 2024-12-05 16:51:15 +00:00
EDiasAlberto
5cf79b5389 fix: add check to mmap to ensure file isn't mapped over stack segment 2024-12-05 16:05:08 +00:00
EDiasAlberto
e779e8ac7c fix: modify stack growth to use frame allocation to allow for page swapping 2024-12-05 04:39:50 +00:00
sBubshait
16db01d3d8 Refactor: Check if page is in a swap in fetch_page instead of the page fault handler 2024-12-05 03:17:18 +00:00
sBubshait
c12cd95093 Fix issues with merging, duplicate references and definition of VM 2024-12-05 02:27:48 +00:00
sBubshait
f13fd435cd Merge remote-tracking branch 'origin/vm/page-swap-synch' into vm/virtual-memory/saleh
# Conflicts:
#	.gitlab-ci.yml
#	src/Makefile.build
#	src/threads/thread.c
#	src/userprog/exception.c
#	src/userprog/process.c
#	src/vm/frame.c
#	src/vm/page.c
#	src/vm/page.h
#	src/vm/stackgrowth.c
#	src/vm/stackgrowth.h
2024-12-05 02:21:53 +00:00
EDiasAlberto
ac31fb1e1e feat: set accessed bit to allocated frames in page_load and get_usr_kpage 2024-12-05 01:41:23 +00:00
sBubshait
1a8eb1bbe5 Merge branch 'vm/memory-mapped-files' into vm/virtual-memory/saleh 2024-12-05 01:24:50 +00:00
sBubshait
52ec8fe779 Fix Bug: Grow stack if necessary in case of a page fault in the kernel context 2024-12-05 01:15:46 +00:00
sBubshait
f171a05108 Merge branch 'vm/stack-growth/saleh' into vm/virtual-memory/saleh
# Conflicts:
#	src/userprog/exception.c
#	src/userprog/process.c
#	src/userprog/syscall.c
#	src/vm/frame.c
#	src/vm/page.c
#	src/vm/page.h
2024-12-05 00:51:03 +00:00
sBubshait
5265fed288 Refactor stack growth to be helper functions in exception for easier merging 2024-12-05 00:27:40 +00:00
sBubshait
61f6374006 Update gitlab CI to only run the tests associated with this feature (mmapped-files). 2024-12-04 22:06:09 +00:00
sBubshait
26a2d40325 Implement implicitly unmapping all mmapped files when a process exits. Refactor to reduce duplication 2024-12-04 22:00:59 +00:00
sBubshait
806d6bc19e Refactor: Move destroying mmap data into process_exit instead of thread 2024-12-04 21:59:38 +00:00
sBubshait
ecbb4e74a5 Implement the unmap system call, writing back to the file if a page is dirty before removing from SPT 2024-12-04 19:07:28 +00:00
sBubshait
02b79d1934 Update mmap to add temporarily page_set_swap until swap is implemented 2024-12-04 18:13:07 +00:00
sBubshait
857cae3578 Update mmap to add a get helper function to find a mmap entry from its mapping 2024-12-04 18:08:05 +00:00
sBubshait
941e1e067a Update SPT page entry to change type from EXECUTABLE to PAGE_FILE to capture mmaps in addition to executables 2024-12-04 17:51:30 +00:00
sBubshait
ad6e4b4059 Implement syscall_mmap to validate and then map all file data into a user address in memory 2024-12-04 17:42:53 +00:00
sBubshait
72fa0c1bbb Fix Bug: Initialise the mmap table for the newly created thread rather than the current thread 2024-12-04 17:41:14 +00:00
sBubshait
67f16cb2a6 Update syscall.c to allow mmap and unmap system calls through helper handler functions for each 2024-12-04 15:31:53 +00:00
sBubshait
6b0f708d8f Update mmap to add an insert helper function to allocate and add new mmap entries to the hash table 2024-12-04 15:26:00 +00:00
sBubshait
6e838aa06a Fix Bug in thread.c: Only initialise and destroy mmap files table if VM is defined 2024-12-04 15:24:11 +00:00
sBubshait
a2f46f3b72 Add a mmap destroy function to cleanup all mmap hash table entries upon thread exit 2024-12-04 15:14:02 +00:00
sBubshait
1ce09a49a1 Add helper functions to initialise the memory-mapped files table and counter 2024-12-04 15:08:43 +00:00
sBubshait
b3042b5aa6 Update thread structure to add mmap files table and a counter for mappings of the thread 2024-12-04 14:56:52 +00:00
sBubshait
85aabd86cd Update gitlab ci file to include mmap tests in the automated testing pipeline 2024-12-04 14:55:28 +00:00
sBubshait
acc768e177 Add mmap module in vm defining mmap_entry structure and some helper functions 2024-12-04 13:01:01 +00:00
18 changed files with 1083 additions and 718 deletions

View File

@@ -32,9 +32,10 @@ test_userprog:
extends: .pintos_tests
variables:
DIR: userprog
IGNORE: (tests/userprog/no-vm/multi-oom)
test_vm:
extends: .pintos_tests
variables:
DIR: vm
IGNORE: (tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-read|tests/vm/mmap-close|tests/vm/mmap-overlap|tests/vm/mmap-twice|tests/vm/mmap-write|tests/vm/mmap-exit|tests/vm/mmap-shuffle|tests/vm/mmap-clean|tests/vm/mmap-inherit|tests/vm/mmap-misalign|tests/vm/mmap-null|tests/vm/mmap-over-code|tests/vm/mmap-over-data|tests/vm/mmap-over-stk|tests/vm/mmap-remove)
IGNORE: (tests/vm/pt-grow-stack|tests/vm/pt-grow-pusha|tests/vm/pt-big-stk-obj|tests/vm/pt-overflowstk|tests/vm/pt-write-code2|tests/vm/pt-grow-stk-sc|tests/vm/page-linear|tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-over-stk)

View File

@@ -64,8 +64,8 @@ userprog_SRC += userprog/tss.c # TSS management.
# Virtual memory code.
vm_SRC += vm/frame.c # Frame table manager.
vm_SRC += vm/page.c # Page table manager.
vm_SRC += vm/mmap.c # Memory-mapped files.
vm_SRC += devices/swap.c # Swap block manager.
vm_SRC += vm/stackgrowth.c # Stack growth functions.
#vm_SRC = vm/file.c # Some other file.
# Filesystem code.

View File

@@ -31,9 +31,9 @@
#else
#include "tests/threads/tests.h"
#endif
#include "vm/page.h"
#ifdef VM
#include "vm/frame.h"
#include "vm/page.h"
#include "devices/swap.h"
#endif
#ifdef FILESYS
@@ -105,6 +105,7 @@ main (void)
paging_init ();
#ifdef VM
frame_init ();
shared_file_pages_init ();
#endif
/* Segmentation. */
@@ -122,7 +123,6 @@ main (void)
exception_init ();
syscall_init ();
#endif
shared_files_init ();
/* Start thread scheduler and enable interrupts. */
thread_start ();

View File

@@ -15,10 +15,13 @@
#include "threads/switch.h"
#include "threads/synch.h"
#include "threads/vaddr.h"
#include "vm/page.h"
#ifdef USERPROG
#include "userprog/process.h"
#include "userprog/syscall.h"
#include "vm/page.h"
#endif
#ifdef VM
#include "vm/mmap.h"
#endif
/* Random value for struct thread's `magic' member.
@@ -262,13 +265,22 @@ thread_create (const char *name, int priority,
#ifdef USERPROG
/* Initialize the thread's file descriptor table. */
t->fd_counter = MINIMUM_USER_FD;
bool success = hash_init (&t->open_files, fd_hash, fd_less, NULL);
success = success && hash_init (&t->child_results, process_result_hash,
process_result_less, t);
if (success)
{
success = hash_init (&t->child_results, process_result_hash,
process_result_less, t);
if (!success)
hash_destroy (&t->open_files, NULL);
#ifdef VM
success = success && init_pages (t);
else
{
success = init_pages (&t->pages);
if (!success)
hash_destroy (&t->child_results, NULL);
}
#endif
}
if (!success)
{
@@ -278,6 +290,10 @@ thread_create (const char *name, int priority,
}
#endif
#ifdef VM
mmap_init (t);
#endif
/* Prepare thread for first run by initializing its stack.
Do this atomically so intermediate values for the 'stack'
member cannot be observed. */
@@ -720,6 +736,10 @@ init_thread (struct thread *t, const char *name, int nice, int priority,
t->recent_cpu = recent_cpu;
t->priority = t->base_priority;
#ifdef VM
lock_init (&t->spt_lock);
#endif
old_level = intr_disable ();
list_push_back (&all_list, &t->allelem);
intr_set_level (old_level);

View File

@@ -135,6 +135,12 @@ struct thread
/* Shared between thread.c and synch.c. */
struct list_elem elem; /* List element. */
struct hash pages; /* Table of open user pages. */
/* Memory mapped files for user virtual memory. */
struct hash mmap_files; /* List of memory mapped files. */
unsigned int mmap_counter; /* Counter for memory mapped files. */
#ifdef USERPROG
/* Owned by userprog/process.c. */
uint32_t *pagedir; /* Page directory. */
@@ -143,9 +149,7 @@ struct thread
struct hash open_files; /* Hash Table of FD -> Struct File. */
#endif
#ifdef VM
struct hash pages; /* Table of open user pages. */
#endif
struct lock spt_lock;
void *curr_esp;

View File

@@ -3,11 +3,9 @@
#include <stdio.h>
#include "stdbool.h"
#include "userprog/gdt.h"
#include "userprog/pagedir.h"
#include "threads/interrupt.h"
#include "threads/thread.h"
#ifdef VM
#include "vm/stackgrowth.h"
#include "vm/frame.h"
#include "vm/page.h"
#include "devices/swap.h"
@@ -15,12 +13,18 @@
#include "userprog/pagedir.h"
#endif
#define MAX_STACK_SIZE (8 * 1024 * 1024) // 8MB
#define MAX_STACK_OFFSET 32 // 32 bytes offset below stack pointer (ESP)
/* Number of page faults processed. */
static long long page_fault_cnt;
static void kill (struct intr_frame *);
static void page_fault (struct intr_frame *);
static bool try_fetch_page (void *upage, bool write);
static bool is_valid_stack_access (const void *fault_addr, const void *esp);
static bool grow_stack (void *upage);
bool fetch_page (void *upage, bool write);
/* Registers handlers for interrupts that can be caused by user
programs.
@@ -156,48 +160,33 @@ page_fault (struct intr_frame *f)
write = (f->error_code & PF_W) != 0;
user = (f->error_code & PF_U) != 0;
#ifdef VM
/* Select the appropriate stack pointer based on the context of the fault. */
void *esp = user ? f->esp : thread_current()->curr_esp;
/* If the fault address is in a user page that is not present, then it might
be just that the stack needs to grow or that it needs to be lazily loaded.
So we attempt to grow the stack. If this does not work, we check our SPT to
see if the page is expected to have data loaded in memory. */
void *upage = pg_round_down (fault_addr);
if (not_present && is_user_vaddr(upage))
if (not_present && is_user_vaddr (upage) && upage != NULL)
{
struct thread *t = thread_current ();
void *esp = user ? f->esp : t->curr_esp;
/* Check if the non-present user page is in the swap partition.
If so, swap it back into main memory, updating the PTE for
the faulted virtual address to point to the newly allocated
frame. */
if (page_in_swap (t, fault_addr))
{
size_t swap_slot = page_get_swap (t, fault_addr);
void *kpage = frame_alloc (0, upage, t);
swap_in (kpage, swap_slot);
bool writeable = pagedir_is_writable (t->pagedir, upage);
if (pagedir_set_page (t->pagedir, upage, kpage, writeable)) return;
}
/* Handle user page faults that need to be resolved by dynamic
stack growth by checking if this is such a fault and responding
accordingly. */
if (handle_stack_fault (fault_addr, esp)) return;
/* Handle user page faults that need to be resolved by lazy loading
of executable files by checking if they contain entries in the
SPT hash map and responding accordingly. */
if (try_fetch_page (upage, write))
if (fetch_page (upage, write))
return;
if (is_valid_stack_access (fault_addr, esp))
if (grow_stack (upage))
return;
}
/* Allows for page faults within a kernel context to communicate with
user pages for sending error codes. */
/* If the page fault occurred in kernel mode, then we intentionally indicate
a fault (for get_user() etc). */
if (!user)
{
f->eip = (void *)f->eax;
f->eax = 0xffffffff;
return;
}
#endif
{
f->eip = (void *)f->eax;
f->eax = 0xffffffff;
return;
}
/* To implement virtual memory, delete the rest of the function
body, and replace it with code that brings in the page to
@@ -210,16 +199,88 @@ page_fault (struct intr_frame *f)
kill (f);
}
#ifdef VM
/* Validates whether the fault address is a valid stack access. Access is a
valid stack access under the following two conditions:
1. The fault address must be within MAX_STACK_OFFSET (32) bytes below
the current stack pointer. (Accounts for both PUSH and PUSHA instructions)
2. Growing this stack to this address does not cause it to exceed the
MAX_STACK_SIZE (8MB) limit.
Returns true if both conditions are met, false otherwise.
Pre: fault_addr is a valid user virtual address (so also not NULL). */
static bool
try_fetch_page (void *upage, bool write)
is_valid_stack_access (const void *fault_addr, const void *esp)
{
uint32_t new_stack_size = PHYS_BASE - pg_round_down (fault_addr);
uint32_t *lowest_valid_push_addr = (uint32_t *)esp - MAX_STACK_OFFSET;
bool is_within_push_range = (uint32_t *)fault_addr >= lowest_valid_push_addr;
return is_within_push_range && new_stack_size <= MAX_STACK_SIZE;
}
/* Attempts to grow the stack by allocating and mapping a new page.
This involves:
1. Allocating a zeroed page from the user pool
2. Installing it into the page table with write permissions
Returns true if the stack was successfully grown, false if either
allocation or installation fails.
Pre: upage is a valid page-aligned address (so also not NULL). */
static bool
grow_stack (void *upage)
{
/* Allocate new page for stack */
void *new_page = frame_alloc (PAL_ZERO, upage, thread_current ());
if (new_page == NULL)
return false;
/* Install the page into user page table */
if (!pagedir_set_page (thread_current ()->pagedir, upage, new_page, true))
{
frame_free (new_page);
return false;
}
return true;
}
bool
fetch_page (void *upage, bool write)
{
/* Check if the page is in the supplemental page table. That is, it is a page
that is expected to be in memory. */
struct page_entry *page = page_get (upage);
struct page_entry *page = page_get (thread_current (), upage);
if (page == NULL)
return false;
/* Check if the non-present user page is in the swap partition.
If so, swap it back into main memory, updating the PTE for
the faulted virtual address to point to the newly allocated
frame. */
struct thread *t = thread_current ();
if (page_in_swap (t, upage))
{
/* NOTE: This code should be refactored and moved into helper functions
within 'page.c'.*/
void *kpage = frame_alloc (0, upage, t);
lock_acquire (&page->lock);
size_t swap_slot = page_get_swap (t, upage);
swap_in (kpage, swap_slot);
lock_release (&page->lock);
bool writeable = pagedir_is_writable (t->pagedir, upage);
/* TODO: When this returns false we should quit the page fault,
but currently we continue and check the stack conditions in the
page fault handler. */
return pagedir_set_page (t->pagedir, upage, kpage, writeable);
}
/* An attempt to write to a non-writeable should fail. */
if (write && !page->writable)
return false;
@@ -227,13 +288,18 @@ try_fetch_page (void *upage, bool write)
/* Load the page into memory based on the type of data it is expecting. */
bool success = false;
switch (page->type) {
case PAGE_EXECUTABLE:
success = page_load (page);
case PAGE_MMAP:
case PAGE_FILE:
case PAGE_SHARED:
success = page_load_file (page);
break;
default:
return false;
}
}
if (success && page->writable &&
!pagedir_is_writable(thread_current()->pagedir, upage))
pagedir_set_writable(thread_current()->pagedir, upage, true);
return success;
}
#endif

View File

@@ -10,5 +10,7 @@
void exception_init (void);
void exception_print_stats (void);
bool
try_fetch_page (void *upage, bool write);
#endif /* userprog/exception.h */

View File

@@ -2,14 +2,12 @@
#include <stdbool.h>
#include <stddef.h>
#include <string.h>
#include "devices/swap.h"
#include "threads/init.h"
#include "threads/pte.h"
#include "threads/palloc.h"
#ifdef VM
#include "threads/thread.h"
#include "vm/frame.h"
#include "vm/page.h"
#endif
static uint32_t *active_pd (void);
@@ -45,19 +43,12 @@ pagedir_destroy (uint32_t *pd)
for (pte = pt; pte < pt + PGSIZE / sizeof *pte; pte++)
{
#ifdef VM
if (*pte & PTE_P)
{
void *page = pte_get_page (*pte);
frame_owner_remove (page, thread_current ());
// frame_free (page);
palloc_free_page (page);
}
page_cleanup_swap (pte);
#else
if (*pte & PTE_P)
palloc_free_page (pte_get_page (*pte));
#endif
if (page_is_shared_pte (pte))
continue;
else if (page_in_swap_pte (pte))
swap_drop (page_get_swap_pte (pte));
else if (*pte & PTE_P)
frame_free (pte_get_page (*pte));
}
palloc_free_page (pt);
}

View File

@@ -25,6 +25,7 @@
#include "threads/synch.h"
#include "devices/timer.h"
#include "vm/page.h"
#include "vm/mmap.h"
#ifdef VM
#include "vm/frame.h"
#endif
@@ -172,9 +173,8 @@ start_process (void *proc_start_data)
to store the command that executed the process. */
if (data->success)
{
data->success = use_shared_file (exec_file)
&& process_init_stack (data->cmd_saveptr, &if_.esp,
data->file_name);
data->success =
process_init_stack (data->cmd_saveptr, &if_.esp, data->file_name);
}
/* Signal that the process has finished attempting to load. */
@@ -364,12 +364,14 @@ process_exit (void)
struct thread *cur = thread_current ();
uint32_t *pd;
/* Unmap all memory mapped files */
mmap_destroy ();
/* Clean up all open files */
hash_destroy (&cur->open_files, fd_cleanup);
#ifdef VM
/* Clean up the thread's supplemental page table. */
hash_destroy (&cur->pages, page_cleanup);
unuse_shared_file (cur->exec_file);
#endif
/* Close the executable file, implicitly allowing it to be written to. */
if (cur->exec_file != NULL)
@@ -703,7 +705,6 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
ASSERT (pg_ofs (upage) == 0);
ASSERT (ofs % PGSIZE == 0);
#ifdef VM
while (read_bytes > 0 || zero_bytes > 0)
{
/* Calculate how to fill this page.
@@ -713,8 +714,8 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
size_t page_zero_bytes = PGSIZE - page_read_bytes;
/* Add the page metadata to the SPT to be lazy loaded later on */
if (page_insert (file, ofs, upage, page_read_bytes, page_zero_bytes,
writable, PAGE_EXECUTABLE) == NULL)
if (page_insert_file (file, ofs, upage, page_read_bytes, page_zero_bytes,
writable, PAGE_FILE) == NULL)
return false;
/* Advance. */
@@ -724,58 +725,6 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
upage += PGSIZE;
}
return true;
#else
file_seek (file, ofs);
while (read_bytes > 0 || zero_bytes > 0)
{
/* Calculate how to fill this page.
We will read PAGE_READ_BYTES bytes from FILE
and zero the final PAGE_ZERO_BYTES bytes. */
size_t page_read_bytes = read_bytes < PGSIZE ? read_bytes : PGSIZE;
size_t page_zero_bytes = PGSIZE - page_read_bytes;
/* Check if virtual page already allocated */
struct thread *t = thread_current ();
uint8_t *kpage = pagedir_get_page (t->pagedir, upage);
if (kpage == NULL){
/* Get a new page of memory. */
kpage = get_usr_kpage (0, upage);
if (kpage == NULL){
return false;
}
/* Add the page to the process's address space. */
if (!install_page (upage, kpage, writable))
{
free_usr_kpage (kpage);
return false;
}
} else {
/* Check if writable flag for the page should be updated */
if(writable && !pagedir_is_writable(t->pagedir, upage)){
pagedir_set_writable(t->pagedir, upage, writable);
}
}
/* Load data into the page. */
if (file_read (file, kpage, page_read_bytes) != (int) page_read_bytes){
return false;
}
memset (kpage + page_read_bytes, 0, page_zero_bytes);
/* Advance. */
read_bytes -= page_read_bytes;
zero_bytes -= page_zero_bytes;
ofs += PGSIZE;
upage += PGSIZE;
}
return true;
#endif
}
/* Create a minimal stack by mapping a zeroed page at the top of
@@ -814,6 +763,7 @@ get_usr_kpage (enum palloc_flags flags, void *upage)
return NULL;
else
page = frame_alloc (flags, upage, t);
pagedir_set_accessed (t->pagedir, upage, true);
#else
page = palloc_get_page (flags | PAL_USER);
#endif
@@ -826,7 +776,6 @@ static void
free_usr_kpage (void *kpage)
{
#ifdef VM
frame_owner_remove (kpage, thread_current ());
frame_free (kpage);
#else
palloc_free_page (kpage);

View File

@@ -1,5 +1,4 @@
#include "userprog/syscall.h"
#include "userprog/exception.h"
#include "devices/shutdown.h"
#include "devices/input.h"
#include "filesys/file.h"
@@ -11,12 +10,16 @@
#include "threads/synch.h"
#include "userprog/process.h"
#include "userprog/pagedir.h"
#include "vm/frame.h"
#include "vm/page.h"
#include "vm/mmap.h"
#include <stdio.h>
#include <stdbool.h>
#include <syscall-nr.h>
#define MAX_SYSCALL_ARGS 3
#define EXIT_FAILURE -1
#define MMAP_FAILURE -1
struct open_file
{
@@ -46,11 +49,18 @@ static int syscall_write (int fd, const void *buffer, unsigned size);
static void syscall_seek (int fd, unsigned position);
static unsigned syscall_tell (int fd);
static void syscall_close (int fd);
static mapid_t syscall_mmap (int fd, void *addr);
static void syscall_munmap (mapid_t mapping);
static struct open_file *fd_get_file (int fd);
static void validate_user_pointer (const void *ptr, size_t size,
bool check_write);
static void validate_user_string (const char *str, bool check_write);
static void validate_user_ptr (const void *start, size_t size,
bool write);
static void validate_and_pin_user_ptr (const void *start, size_t size,
bool write);
static void validate_and_pin_user_str (const char *ptr);
static void unpin_user_ptr (const void *start, size_t size);
static void unpin_user_str (const char *ptr);
static int get_user (const uint8_t *);
static bool put_user (uint8_t *, uint8_t);
@@ -78,6 +88,8 @@ static const struct syscall_arguments syscall_lookup[] =
[SYS_SEEK] = {(syscall_function) syscall_seek, 2},
[SYS_TELL] = {(syscall_function) syscall_tell, 1},
[SYS_CLOSE] = {(syscall_function) syscall_close, 1},
[SYS_MMAP] = {(syscall_function) syscall_mmap, 2},
[SYS_MUNMAP] = {(syscall_function) syscall_munmap, 1}
};
/* The number of syscall functions (i.e, number of elements) within the
@@ -101,7 +113,7 @@ static void
syscall_handler (struct intr_frame *f)
{
/* First, read the system call number from the stack. */
validate_user_pointer (f->esp, sizeof (uintptr_t), false);
validate_user_ptr (f->esp, sizeof (uintptr_t), false);
uintptr_t syscall_number = *(int *)f->esp;
thread_current ()->curr_esp = f->esp;
@@ -112,7 +124,7 @@ syscall_handler (struct intr_frame *f)
struct syscall_arguments syscall = syscall_lookup[syscall_number];
/* Next, read and copy the arguments from the stack pointer. */
validate_user_pointer (f->esp + sizeof (uintptr_t),
validate_user_ptr (f->esp + sizeof (uintptr_t),
syscall.arity * sizeof (uintptr_t), false);
uintptr_t args[MAX_SYSCALL_ARGS] = { 0 };
for (int i = 0; i < syscall.arity && i < MAX_SYSCALL_ARGS; i++)
@@ -145,9 +157,11 @@ syscall_exit (int status)
static pid_t
syscall_exec (const char *cmd_line)
{
validate_user_string (cmd_line, false);
validate_and_pin_user_str (cmd_line);
pid_t pid = process_execute (cmd_line);
unpin_user_str (cmd_line);
return process_execute (cmd_line); /* Returns the PID of the new process */
return pid;
}
/* Handles the syscall of wait. Effectively a wrapper for process_wait as the
@@ -164,13 +178,15 @@ syscall_wait (pid_t pid)
static bool
syscall_create (const char *file, unsigned initial_size)
{
validate_user_string (file, false);
validate_and_pin_user_str (file);
/* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock);
bool status = filesys_create (file, initial_size);
lock_release (&filesys_lock);
unpin_user_str (file);
/* Return the status of the file creation. */
return status;
}
@@ -181,13 +197,15 @@ syscall_create (const char *file, unsigned initial_size)
static bool
syscall_remove (const char *file)
{
validate_user_string (file, false);
validate_and_pin_user_str (file);
/* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock);
bool status = filesys_remove (file);
lock_release (&filesys_lock);
unpin_user_str (file);
/* Return the status of the file removal. */
return status;
}
@@ -199,13 +217,15 @@ syscall_remove (const char *file)
static int
syscall_open (const char *file)
{
validate_user_string (file, false);
validate_and_pin_user_str (file);
/* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock);
struct file *ptr = filesys_open (file);
lock_release (&filesys_lock);
unpin_user_str (file);
/* If the file could not be opened, return failure. */
if (ptr == NULL)
return EXIT_FAILURE;
@@ -265,10 +285,11 @@ syscall_read (int fd, void *buffer, unsigned size)
if (fd < STDIN_FILENO || fd == STDOUT_FILENO)
return EXIT_FAILURE;
validate_user_pointer (buffer, size, true);
if (fd == STDIN_FILENO)
{
/* Validate the user buffer. */
validate_user_ptr (buffer, size, true);
/* Reading from the console. */
char *write_buffer = buffer;
for (unsigned i = 0; i < size; i++)
@@ -286,13 +307,19 @@ syscall_read (int fd, void *buffer, unsigned size)
if (file_info == NULL)
return EXIT_FAILURE;
/* Validate the user buffer, and pin the pages to prevent eviction. */
validate_and_pin_user_ptr (buffer, size, true);
/* Acquire the file system lock to prevent race-conditions. */
lock_acquire (&filesys_lock);
int bytes_written = file_read (file_info->file, buffer, size);
int bytes_read = file_read (file_info->file, buffer, size);
lock_release (&filesys_lock);
/* Unpin the pages to allow eviction. */
unpin_user_ptr (buffer, size);
/* Return the number of bytes read. */
return bytes_written;
return bytes_read;
}
}
@@ -308,10 +335,11 @@ syscall_write (int fd, const void *buffer, unsigned size)
if (fd <= 0)
return 0;
validate_user_pointer (buffer, size, false);
if (fd == STDOUT_FILENO)
{
/* Validate the user buffer. */
validate_user_ptr (buffer, size, false);
/* Writing to the console. */
putbuf (buffer, size);
@@ -327,13 +355,19 @@ syscall_write (int fd, const void *buffer, unsigned size)
if (file_info == NULL)
return 0;
/* Validate the user buffer, and pin the pages to prevent eviction. */
validate_and_pin_user_ptr (buffer, size, false);
/* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock);
int bytes = file_write (file_info->file, buffer, size);
int bytes_written = file_write (file_info->file, buffer, size);
lock_release (&filesys_lock);
/* Unpin the pages to allow eviction. */
unpin_user_ptr (buffer, size);
/* Return the number of bytes written. */
return bytes;
return bytes_written;
}
}
@@ -392,6 +426,83 @@ syscall_close (int fd)
}
}
/* Handles the syscall for memory mapping a file. */
static mapid_t
syscall_mmap (int fd, void *addr)
{
/* Ensure the FD is for a file in the filesystem (not STDIN or STDOUT). */
if (fd == STDOUT_FILENO || fd == STDIN_FILENO)
return MMAP_FAILURE;
/* Validate that there is a file associated with the given FD. */
struct open_file *file_info = fd_get_file (fd);
if (file_info == NULL)
return MMAP_FAILURE;
/* Ensure that the address is page-aligned and it's neither NULL nor zero. */
if (addr == 0 || addr == NULL || pg_ofs (addr) != 0)
return MMAP_FAILURE;
/* Reopen the file to obtain a separate and independent reference to the file
for the mapping. */
struct file *file = file_reopen (file_info->file);
if (file == NULL)
return MMAP_FAILURE;
/* Get the size of the file. Mmap fails if the file is empty. */
off_t file_size = file_length (file);
if (file_size == 0)
return MMAP_FAILURE;
/* ensures the page for mmap does not overlap with the stack */
if (addr >= (thread_current ()->curr_esp - PGSIZE))
return MMAP_FAILURE;
/* Check and ensure that there is enough space in the user virtual memory to
hold the entire file. */
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
{
if (page_get (thread_current (), addr + ofs) != NULL)
return MMAP_FAILURE;
}
/* Map the file data into the user virtual memory starting from addr. */
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
{
off_t read_bytes = file_size - ofs < PGSIZE ? file_size - ofs : PGSIZE;
off_t zero_bytes = PGSIZE - read_bytes;
if (page_insert_file (file, ofs, addr + ofs, read_bytes, zero_bytes, true,
PAGE_FILE) == NULL)
return MMAP_FAILURE;
}
/* Create a new mapping for the file. */
struct mmap_entry *mmap = mmap_insert (file, addr);
if (mmap == NULL)
return MMAP_FAILURE;
return mmap->mapping;
}
/* Handles the syscall for unmapping a memory mapped file.
Pre: mapping is a valid mapping identifier returned by mmap syscall. */
static void
syscall_munmap (mapid_t mapping)
{
/* Get the mmap entry from the mapping identifier. */
struct mmap_entry *mmap = mmap_get (mapping);
/* Delete the mmap entry from the hash table. */
hash_delete (&thread_current ()->mmap_files, &mmap->elem);
/* Unmap the mmap entry: free the pages and write back to the file if
necessary. NOTE. freeing and cleaning up is also handled by mmap_unmap. */
mmap_unmap (mmap);
}
/* Hashing function needed for the open_file table. Returns a hash for an entry,
based on its FD. */
unsigned
@@ -450,69 +561,171 @@ fd_get_file (int fd)
return hash_entry (e, struct open_file, elem);
}
/* Helper function that validates a block of memory and optionally pins frames.
thread_exit() if the memory is invalid. Used only by the two helper functions
validate_user_ptr and validate_and_pin_user_ptr. See the comments for those
functions for more details on each. */
static void
validate_user_ptr_helper (const void *start, size_t size, bool write, bool pin)
{
if (size == 0)
return;
/* ptr < ptr + size - 1, so sufficient to check that (ptr + size -1) is a
valid user virtual memory address. */
void *end = start + size - 1;
if (!is_user_vaddr (end))
syscall_exit (EXIT_FAILURE);
for (const void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
{
int result;
/* Check read access to pointer. */
if ((result = get_user (ptr)) == -1)
syscall_exit (EXIT_FAILURE);
/* Check write access to pointer (if required). */
if (write && !put_user ((uint8_t *)ptr, result))
syscall_exit (EXIT_FAILURE);
/* If pin is set, pin the frame to prevent eviction. */
if (pin)
{
void *kpage = pagedir_get_page(thread_current()->pagedir, ptr);
if (kpage == NULL)
{
// If it was evicted, try to load it back in.
ptr -= PGSIZE;
continue;
}
frame_pin(kpage);
}
}
}
/* Validates if a block of memory starting at PTR and of size SIZE bytes is
fully contained within valid user virtual memory. thread_exit () if the
memory is invalid.
If the size is 0, the function does no checks and returns PTR. */
static void
validate_user_pointer (const void *ptr, size_t size, bool check_write)
validate_user_ptr (const void *start, size_t size, bool write)
{
if (size == 0)
return;
/* ptr < ptr + size - 1, so sufficient to check that (ptr + size -1) is a
valid user virtual memory address. */
void *last = ptr + size - 1;
if (!is_user_vaddr (last))
syscall_exit (EXIT_FAILURE);
ptr = pg_round_down (ptr);
while (ptr <= last)
{
int result;
/* Check read access to pointer. */
if ((result = get_user (ptr)) == -1)
syscall_exit (EXIT_FAILURE);
/* Check write access to pointer (if required). */
if (check_write && !put_user (ptr, result))
syscall_exit (EXIT_FAILURE);
ptr += PGSIZE;
}
validate_user_ptr_helper (start, size, write, false);
}
/* Validates if a block of memory starting at PTR and of size SIZE bytes is
fully contained within valid user virtual memory. thread_exit () if the
memory is invalid. The function also checks if the memory is writable if
WRITE flag is set.
The function attempts to preload the pages in case they are not in memory
yet (e.g., in a swap, lazy loading). If this is successful, the frame pages
are pinned to prevent eviction prior to access.
As such, a call to this function MUST be followed by a call to
unpin_user_ptr (START, SIZE) to unpin the pages and allow eviction.
If the size is 0, the function does no checks and returns PTR. */
static void
validate_and_pin_user_ptr (const void *start, size_t size, bool write)
{
validate_user_ptr_helper (start, size, write, true);
}
/* Unpins all the pages containing a block of memory starting at START and of
size SIZE bytes.
Pre: The pages were previously pinned by validate_and_pin_user_ptr (START,
SIZE). */
static void
unpin_user_ptr (const void *start, size_t size)
{
void *end = start + size - 1;
/* We don't need to do any checks as this function is always called after
validate_and_pin_user_ptr. */
/* Go through all pages in the block range, unpinning the frames. */
for (void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
{
void *kpage = pagedir_get_page (thread_current ()->pagedir, ptr);
ASSERT (kpage != NULL);
frame_unpin (kpage);
}
}
/* Validates of a C-string starting at ptr is fully contained within valid
user virtual memory. thread_exit () if the memory is invalid. */
static void
validate_user_string (const char *ptr, bool check_write)
validate_and_pin_user_str (const char *ptr)
{
size_t offset = (uintptr_t) ptr % PGSIZE;
for (;;)
{
void *page = pg_round_down (ptr);
if (!is_user_vaddr (page))
syscall_exit (EXIT_FAILURE);
if (!is_user_vaddr (ptr))
syscall_exit (EXIT_FAILURE);
int result;
if ((result = get_user ((const uint8_t *)ptr)) == -1)
syscall_exit (EXIT_FAILURE);
if (check_write && !put_user ((uint8_t *)ptr, result))
if (get_user ((const uint8_t *)ptr) == -1)
syscall_exit (EXIT_FAILURE);
/* Pin the frame to prevent eviction. */
void *page = pg_round_down (ptr);
void *kpage = pagedir_get_page (thread_current ()->pagedir, page);
if (kpage == NULL)
{
// If it was evicted, attempt to reload.
ptr -= PGSIZE;
continue;
}
frame_pin (kpage);
while (offset < PGSIZE)
{
if (*ptr == '\0')
return; /* We reached the end of the string without issues. */
{
if (*ptr == '\0')
return; /* We reached the end of the string without issues. */
ptr++;
offset++;
}
ptr++;
offset++;
}
offset = 0;
}
}
/* Unpins all the pages containing a C-string starting at PTR.
Pre: The pages were previously pinned by validate_and_pin_user_str (PTR).
PTR points to a valid C string that ends with '\0'. */
static void
unpin_user_str (const char *ptr)
{
size_t offset = (uintptr_t)ptr % PGSIZE;
const char *str_ptr = ptr;
for (;;)
{
void *page = pg_round_down(str_ptr);
void *kpage = pagedir_get_page(thread_current()->pagedir, page);
ASSERT(kpage != NULL);
frame_unpin (kpage);
/* Scan until end of string or page */
while (offset < PGSIZE)
{
if (*str_ptr == '\0')
return; /* Found end of string */
str_ptr++;
offset++;
}
offset = 0;
}
}
/* PROVIDED BY SPEC.
Reads a byte at user virtual address UADDR.
UADDR must be below PHYS_BASE.

View File

@@ -2,15 +2,14 @@
#include <hash.h>
#include <list.h>
#include <string.h>
#include "frame.h"
#include "page.h"
#include "filesys/file.h"
#include "threads/malloc.h"
#include "threads/vaddr.h"
#include "userprog/pagedir.h"
#include "userprog/syscall.h"
#include "threads/synch.h"
#include "devices/swap.h"
/* Hash table that maps every active frame's kernel virtual address
to its corresponding 'frame_metadata'.*/
@@ -33,18 +32,13 @@ struct list_elem *next_victim = NULL;
/* Protects access to 'lru_list'. */
struct lock lru_lock;
struct frame_owner
{
struct thread *thread; /* Pointer to the thread referenced by the owner.*/
struct list_elem elem; /* List element for the owners list in
frame_metadata. */
};
struct frame_metadata
{
void *frame; /* The kernel virtual address holding the frame. */
void *upage; /* The user virtual address pointing to the frame. */
struct list owners; /* List of owners of the frame. */
struct list owners; /* List of threads that own the frame. */
bool pinned; /* Indicates wheter the frame should be
considered as an eviction candidate.*/
struct hash_elem hash_elem; /* Tracks the position of the frame metadata
within 'frame_table', whose key is the
kernel virtual address of the frame. */
@@ -56,11 +50,12 @@ struct frame_metadata
hash_hash_func frame_metadata_hash;
hash_less_func frame_metadata_less;
static struct frame_metadata *frame_metadata_find (void *frame);
static struct list_elem *lru_next (struct list_elem *e);
static struct list_elem *lru_prev (struct list_elem *e);
static struct frame_metadata *frame_metadata_get (void *frame);
static struct frame_metadata *get_victim (void);
static void free_owners (struct list *owners);
static struct frame_metadata *frame_metadata_find (void *frame);
/* Initialize the frame system by initializing the frame (hash) table with
the frame_metadata hashing and comparison functions, as well as initializing
@@ -86,7 +81,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
{
struct frame_metadata *frame_metadata;
flags |= PAL_USER;
lock_acquire (&lru_lock);
void *frame = palloc_get_page (flags);
@@ -102,53 +97,28 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
struct frame_metadata *victim = get_victim ();
ASSERT (victim != NULL); /* get_victim () should never return null. */
/* 2. Swap out victim into disk. */
/* Mark page as 'not present' and flag the page directory as having
been modified *before* eviction begins to prevent the owner of the
victim page from accessing/modifying it mid-eviction. */
struct list_elem *e;
struct file *file = NULL;
for (e = list_begin (&victim->owners); e != list_end (&victim->owners);
e = list_next (e))
/* 2. Handle victim page writing based on its type. */
struct page_entry *victim_page = page_get (thread_current (), victim->upage);
if (victim_page != NULL && victim_page->type == PAGE_MMAP)
{
struct frame_owner *frame_owner
= list_entry (e, struct frame_owner, elem);
file = frame_owner->thread->exec_file;
pagedir_clear_page (frame_owner->thread->pagedir, victim->upage);
}
/* If file is found then it must be the same for all owners, and might
have a single shared page entry. */
struct shared_page_entry *shared_page = NULL;
if (file != NULL)
{
lock_acquire (&shared_files_lock);
shared_page = shared_page_get (file, victim->upage);
ASSERT (shared_page == NULL || shared_page->frame != NULL);
if (shared_page == NULL)
lock_release (&shared_files_lock);
}
// TODO: Lock PTE of victim page for victim process.
size_t swap_slot = swap_out (victim->frame);
/* If frame had a shared page, unsign it, and set the swap slot.
Otherwise, set the swap slot in the pagedir of the owners threads. */
if (shared_page != NULL)
{
shared_page->frame = NULL;
shared_page->swap_slot = swap_slot;
lock_release (&shared_files_lock);
/* If it was a memory-mapped file page, we just write it back
to the file if it was dirty. */
if (pagedir_is_dirty(owner->pagedir, victim->upage))
{
lock_acquire (&filesys_lock);
file_write_at (victim_page->file, victim->upage,
victim_page->read_bytes, victim_page->offset);
lock_release (&filesys_lock);
}
}
else
for (e = list_begin (&victim->owners); e != list_end (&victim->owners);
e = list_next (e))
{
struct frame_owner *frame_owner
= list_entry (e, struct frame_owner, elem);
page_set_swap (frame_owner->thread, victim->upage, swap_slot);
}
{
/* Otherwise, insert the page into swap. */
page_insert_swapped (victim->upage, victim->frame, &victim->owners);
}
/* Free victim's owners. */
free_owners (&victim->owners);
/* If zero flag is set, zero out the victim page. */
if (flags & PAL_ZERO)
@@ -169,6 +139,8 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
/* Must own lru_lock here, as otherwise there is a race condition
with next_victim either being NULL or uninitialized. */
frame_metadata = malloc (sizeof (struct frame_metadata));
if (frame_metadata == NULL)
PANIC ("Couldn't allocate memory for frame metadata!\n");
frame_metadata->frame = frame;
/* Newly allocated frames are pushed to the back of the circular queue
@@ -188,57 +160,38 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
hash_insert (&frame_table, &frame_metadata->hash_elem);
}
frame_metadata->upage = upage;
list_init (&frame_metadata->owners);
struct frame_owner *frame_owner = malloc (sizeof (struct frame_owner));
frame_owner->thread = owner;
if (frame_owner == NULL)
PANIC ("Couldn't allocate memory for frame owner!\n");
frame_owner->owner = owner;
list_init (&frame_metadata->owners);
list_push_back (&frame_metadata->owners, &frame_owner->elem);
frame_metadata->upage = upage;
frame_metadata->pinned = false;
lock_release (&lru_lock);
return frame_metadata->frame;
}
/* Add a thread to a frame's frame_metadata owners list. */
bool
frame_owner_insert (void *frame, struct thread *owner)
{
struct frame_metadata *frame_metadata = frame_metadata_find (frame);
if (frame_metadata == NULL)
return false;
struct frame_owner *frame_owner = malloc (sizeof (struct frame_owner));
if (frame_owner == NULL)
return false;
frame_owner->thread = owner;
list_push_back (&frame_metadata->owners, &frame_owner->elem);
return true;
}
/* Remove and deallocate a frame owner from the frame_metadata owners list. */
void
frame_owner_remove (void *frame, struct thread *owner)
frame_pin (void *frame)
{
struct frame_metadata *frame_metadata = frame_metadata_find (frame);
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to remove an owner from a frame at kernel "
"address %p, but this address is not allocated!\n",
PANIC ("Attempted to pin a frame at an unallocated kernel address '%p'\n",
frame);
struct list_elem *oe;
for (oe = list_begin (&frame_metadata->owners);
oe != list_end (&frame_metadata->owners);)
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
oe = list_next (oe);
if (frame_owner->thread == owner)
{
list_remove (&frame_owner->elem);
free (frame_owner);
return;
}
}
NOT_REACHED ();
frame_metadata->pinned = true;
}
void
frame_unpin (void *frame)
{
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to unpin a frame at an unallocated kernel address '%p'\n",
frame);
frame_metadata->pinned = false;
}
/* Attempt to deallocate a frame for a user process by removing it from the
@@ -249,13 +202,13 @@ frame_free (void *frame)
{
struct frame_metadata *frame_metadata = frame_metadata_find (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to free a frame at kernel "
"address %p, but this address is not allocated!\n",
PANIC ("Attempted to free a frame at kernel address %p, "
"but this address is not allocated!\n",
frame);
ASSERT (list_empty (&frame_metadata->owners));
free_owners (&frame_metadata->owners);
lock_acquire (&lru_lock);
hash_delete (&frame_table, &frame_metadata->hash_elem);
list_remove (&frame_metadata->list_elem);
/* If we're freeing the frame marked as the next victim, update
@@ -274,6 +227,50 @@ frame_free (void *frame)
palloc_free_page (frame);
}
/* Add a thread to a frame's frame_metadata owners list. */
bool
frame_owner_insert (void *frame, struct thread *owner)
{
struct frame_metadata *frame_metadata = frame_metadata_find (frame);
if (frame_metadata == NULL)
return false;
struct frame_owner *frame_owner = malloc (sizeof (struct frame_owner));
if (frame_owner == NULL)
return false;
frame_owner->owner = owner;
list_push_back (&frame_metadata->owners, &frame_owner->elem);
return true;
}
/* Remove and deallocate a frame owner from the frame_metadata owners list.
*/
void
frame_owner_remove (void *frame, struct thread *owner)
{
struct frame_metadata *frame_metadata = frame_metadata_find (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to remove an owner from a frame at kernel "
"address %p, but this address is not allocated!\n",
frame);
struct list_elem *oe;
for (oe = list_begin (&frame_metadata->owners);
oe != list_end (&frame_metadata->owners);)
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
oe = list_next (oe);
if (frame_owner->owner == owner)
{
list_remove (&frame_owner->elem);
free (frame_owner);
return;
}
}
NOT_REACHED ();
}
/* Find a frame_metadata entry in the frame table. */
static struct frame_metadata *
frame_metadata_find (void *frame)
@@ -299,18 +296,23 @@ get_victim (void)
while (!found)
{
frame_metadata = list_entry (ve, struct frame_metadata, list_elem);
void *upage = frame_metadata->upage;
ve = lru_next (ve);
/* Check whether any owner thread has accessed the page. */
found = true;
struct list_elem *oe;
/* Skip pinned frames */
if (frame_metadata->pinned)
continue;
/* Returns once a frame that was not accessed by any owner is found. */
found = true;
for (oe = list_begin (&frame_metadata->owners);
oe != list_end (&frame_metadata->owners); oe = list_next (oe))
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
uint32_t *pd = frame_owner->thread->pagedir;
uint32_t *pd = frame_owner->owner->pagedir;
void *upage = frame_metadata->upage;
if (pagedir_is_accessed (pd, upage))
{
found = false;
@@ -323,6 +325,19 @@ get_victim (void)
return frame_metadata;
}
static void
free_owners (struct list *owners)
{
struct list_elem *oe;
for (oe = list_begin (owners); oe != list_end (owners);)
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
oe = list_remove (oe);
free (frame_owner);
}
}
/* Hash function for frame metadata, used for storing entries in the
frame table. */
unsigned
@@ -340,14 +355,26 @@ frame_metadata_hash (const struct hash_elem *e, void *aux UNUSED)
bool
frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
{
struct frame_metadata *a =
hash_entry (a_, struct frame_metadata, hash_elem);
struct frame_metadata *b =
hash_entry (b_, struct frame_metadata, hash_elem);
{
struct frame_metadata *a =
hash_entry (a_, struct frame_metadata, hash_elem);
struct frame_metadata *b =
hash_entry (b_, struct frame_metadata, hash_elem);
return a->frame < b->frame;
}
return a->frame < b->frame;
}
static struct frame_metadata *
frame_metadata_get (void *frame)
{
struct frame_metadata key_metadata;
key_metadata.frame = frame;
struct hash_elem *e = hash_find (&frame_table, &key_metadata.hash_elem);
if (e == NULL) return NULL;
return hash_entry (e, struct frame_metadata, hash_elem);
}
/* Returns the next recently used element after the one provided, which
is achieved by iterating through lru_list like a circular queue

View File

@@ -4,8 +4,16 @@
#include "threads/thread.h"
#include "threads/palloc.h"
struct frame_owner
{
struct thread *owner; /* The thread that owns the frame. */
struct list_elem elem; /* List element for the list of owners. */
};
void frame_init (void);
void *frame_alloc (enum palloc_flags, void *, struct thread *);
void frame_pin (void *frame);
void frame_unpin (void *frame);
void frame_free (void *frame);
bool frame_owner_insert (void *frame, struct thread *owner);

129
src/vm/mmap.c Normal file
View File

@@ -0,0 +1,129 @@
#include "mmap.h"
#include "page.h"
#include "threads/thread.h"
#include "threads/vaddr.h"
#include "threads/malloc.h"
#include "userprog/syscall.h"
#include "userprog/pagedir.h"
#include <stdio.h>
static unsigned mmap_hash (const struct hash_elem *e, void *aux);
static bool mmap_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux);
static void mmap_cleanup(struct hash_elem *e, void *aux);
/* Initializes the mmap table for the given thread, setting the mmap counter to
0 and initializing the hash table. */
bool
mmap_init (struct thread *t)
{
t->mmap_counter = 0;
return hash_init (&t->mmap_files, mmap_hash, mmap_less, NULL);
}
struct mmap_entry *
mmap_get (mapid_t mapping)
{
struct mmap_entry fake_mmap_entry;
fake_mmap_entry.mapping = mapping;
struct hash_elem *e
= hash_find (&thread_current ()->mmap_files, &fake_mmap_entry.elem);
if (e == NULL)
return NULL;
return hash_entry (e, struct mmap_entry, elem);
}
/* Inserts a new mmap entry into the mmap table for the current thread. Upage
is the start address of the file data in the user VM. */
struct mmap_entry *
mmap_insert (struct file *file, void *upage)
{
if (file == NULL || upage == NULL)
return NULL;
struct mmap_entry *mmap = malloc (sizeof (struct mmap_entry));
if (mmap == NULL)
return NULL;
mmap->mapping = thread_current ()->mmap_counter++;
mmap->file = file;
mmap->upage = upage;
hash_insert (&thread_current ()->mmap_files, &mmap->elem);
return mmap;
}
/* Unmaps the given mmap entry from the current thread's mmap table. */
void
mmap_unmap (struct mmap_entry *mmap)
{
if (mmap == NULL)
return;
/* Free all the pages associated with the mapping, writing back to the file
if necessary. */
off_t length = file_length (mmap->file);
for (off_t ofs = 0; ofs < length; ofs += PGSIZE)
{
void *upage = mmap->upage + ofs;
/* Get the SPT page entry for this page. */
struct page_entry *page = page_get(thread_current (), upage);
if (page == NULL)
continue;
/* Write the page back to the file if it is dirty. */
if (pagedir_is_dirty (thread_current ()->pagedir, upage))
{
lock_acquire (&filesys_lock);
file_write_at (mmap->file, upage, page->read_bytes, ofs);
lock_release (&filesys_lock);
}
/* Remove the page from the supplemental page table. */
hash_delete (&thread_current ()->pages, &page->elem);
}
file_close (mmap->file);
free (mmap);
}
/* Destroys the mmap table for the current thread. Frees all the memory
allocated for the mmap entries. */
void
mmap_destroy (void)
{
hash_destroy (&thread_current ()->mmap_files, mmap_cleanup);
}
/* A hash function for the mmap table. Returns a hash for an entry, based on its
mapping. */
static unsigned
mmap_hash (const struct hash_elem *e, void *aux UNUSED)
{
return hash_entry (e, struct mmap_entry, elem)->mapping;
}
/* A comparator function for the mmap table. Compares two entries based on their
mappings. */
static bool
mmap_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
{
const struct mmap_entry *a = hash_entry (a_, struct mmap_entry, elem);
const struct mmap_entry *b = hash_entry (b_, struct mmap_entry, elem);
return a->mapping < b->mapping;
}
/* Cleans up the mmap table for the current thread. Implicitly unmaps the mmap
entry, freeing pages and writing back to the file if necessary. */
static void
mmap_cleanup (struct hash_elem *e, void *aux UNUSED)
{
struct mmap_entry *mmap = hash_entry (e, struct mmap_entry, elem);
mmap_unmap (mmap);
}

27
src/vm/mmap.h Normal file
View File

@@ -0,0 +1,27 @@
#ifndef VM_MMAP_H
#define VM_MMAP_H
#include <hash.h>
#include "threads/thread.h"
#include "filesys/file.h"
/* A mapping identifier type. */
typedef unsigned mapid_t;
/* A structure to represent a memory mapped file. */
struct mmap_entry {
mapid_t mapping; /* The mapping identifier of the mapped file. */
struct file *file; /* A pointer to the file that is being mapped. */
void *upage; /* The start address of the file data in the user VM. */
struct hash_elem elem; /* An elem for the hash table. */
};
bool mmap_init (struct thread *t);
struct mmap_entry *mmap_get (mapid_t mapping);
struct mmap_entry *mmap_insert (struct file *file, void *upage);
void mmap_unmap (struct mmap_entry *mmap);
void mmap_umap_all (void);
void mmap_destroy (void);
#endif /* vm/mmap.h */

View File

@@ -1,57 +1,57 @@
#include "page.h"
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "devices/swap.h"
#include "filesys/file.h"
#include "filesys/filesys.h"
#include "threads/pte.h"
#include "threads/malloc.h"
#include "threads/palloc.h"
#include "threads/pte.h"
#include "threads/vaddr.h"
#include "userprog/pagedir.h"
#include "threads/synch.h"
#include "devices/swap.h"
#include "userprog/process.h"
#include "userprog/pagedir.h"
#include "vm/frame.h"
#define SWAP_FLAG_BIT 9
#define SHARED_FLAG_BIT 10
#define ADDR_START_BIT 12
struct hash shared_files;
struct hash shared_file_pages;
struct lock shared_file_pages_lock;
static unsigned page_hash (const struct hash_elem *e, void *aux UNUSED);
static bool page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED);
static struct shared_file_entry *shared_file_insert (struct file *file);
static struct shared_page_entry *shared_page_insert (struct file *file,
void *upage, void *frame);
static struct shared_file_entry *shared_file_get (struct file *file);
static unsigned shared_file_hash (const struct hash_elem *e, void *aux UNUSED);
static bool shared_file_less (const struct hash_elem *a_,
const struct hash_elem *b_, void *aux UNUSED);
static unsigned shared_page_hash (const struct hash_elem *e, void *aux UNUSED);
static bool shared_page_less (const struct hash_elem *a_,
const struct hash_elem *b_, void *aux UNUSED);
static void shared_page_cleanup (struct hash_elem *e, void *aux UNUSED);
static void page_unset_swap (struct thread *owner, void *upage);
/* Initialise a thread's supplemental pages table. */
static void page_flag_shared (struct thread *owner, void *upage, bool shared);
static unsigned shared_file_page_hash (const struct hash_elem *e,
void *aux UNUSED);
static bool shared_file_page_less (const struct hash_elem *a_,
const struct hash_elem *b_,
void *aux UNUSED);
static struct shared_file_page *shared_file_page_get (struct file *file,
void *upage);
/* Initialise a supplementary page table. */
bool
init_pages (struct thread *t)
init_pages (struct hash *pages)
{
return hash_init (&t->pages, page_hash, page_less, NULL);
ASSERT (pages != NULL);
return hash_init (pages, page_hash, page_less, NULL);
}
/* Hashing function needed for the SPT table. Returns a hash for an entry,
based on its upage. */
unsigned
static unsigned
page_hash (const struct hash_elem *e, void *aux UNUSED)
{
struct page_entry *page = hash_entry (e, struct page_entry, elem);
return hash_ptr(page->upage);
return hash_ptr (page->upage);
}
/* Comparator function for the SPT table. Compares two entries based on their
upages. */
bool
static bool
page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
{
@@ -61,13 +61,80 @@ page_less (const struct hash_elem *a_, const struct hash_elem *b_,
return a->upage < b->upage;
}
/* Allocate and insert a new page entry into the thread's page table. */
static void page_flag_swap (uint32_t *pte, bool set);
static void page_set_swap (struct thread *owner, uint32_t *pte,
size_t swap_slot);
// TODO: Deal with NULL malloc returns
/* Swap out 'owner' process's 'upage' stored at 'kpage'. Then, allocate and
insert a new page entry into the user process thread's SPT representing
this swapped out page. */
bool
page_insert_swapped (void *upage, void *kpage, struct list *owners)
{
struct file *exec_file = NULL;
struct list_elem *e;
for (e = list_begin (owners); e != list_end (owners); e = list_next (e))
{
struct thread *owner = list_entry (e, struct frame_owner, elem)->owner;
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
if (exec_file != NULL || page_is_shared_pte (pte))
{
ASSERT (page_is_shared_pte (pte));
pagedir_clear_page (owner->pagedir, upage);
exec_file = owner->exec_file;
ASSERT (exec_file != NULL);
continue;
}
ASSERT (list_size (owners) == 1);
/* 1. Initialize swapped page entry. */
struct page_entry *page = page_get (owner, upage);
if (page == NULL)
{
page = malloc (sizeof (struct page_entry));
if (page == NULL)
return NULL;
page->upage = upage;
lock_init (&page->lock);
hash_insert (&owner->pages, &page->elem);
}
/* Mark page as 'swapped' and flag the page directory as having
been modified *before* eviction begins to prevent the owner of the
victim page from accessing/modifying it mid-eviction. */
/* TODO: We need to stop the process from destroying pagedir mid-eviction,
as this could render the page table entry invalid. */
page_flag_swap (pte, true);
lock_acquire (&page->lock);
pagedir_clear_page (owner->pagedir, upage);
size_t swap_slot = swap_out (kpage);
page_set_swap (owner, pte, swap_slot);
lock_release (&page->lock);
}
if (exec_file != NULL)
{
lock_acquire (&shared_file_pages_lock);
struct shared_file_page *sfp = shared_file_page_get (exec_file, upage);
sfp->frame = NULL;
sfp->swap_slot = swap_out (kpage);
lock_release (&shared_file_pages_lock);
}
return true;
}
/* Allocate and insert a new page entry into the user process thread's
SPT representing a file page. */
struct page_entry *
page_insert (struct file *file, off_t ofs, void *upage, uint32_t read_bytes,
uint32_t zero_bytes, bool writable, enum page_type type)
page_insert_file (struct file *file, off_t ofs, void *upage,
uint32_t read_bytes, uint32_t zero_bytes, bool writable,
enum page_type type)
{
/* If page exists, just update it. */
struct page_entry *existing = page_get (upage);
struct thread *t = thread_current ();
struct page_entry *existing = page_get (t, upage);
if (existing != NULL)
{
ASSERT (existing->read_bytes == read_bytes);
@@ -75,19 +142,20 @@ page_insert (struct file *file, off_t ofs, void *upage, uint32_t read_bytes,
existing->writable = existing->writable || writable;
return existing;
}
/* Otherwise allocate a new one. */
struct page_entry *page = malloc(sizeof (struct page_entry));
if (page == NULL)
return NULL;
page->type = type;
page->file = file;
page->offset = ofs;
page->upage = upage;
page->read_bytes = read_bytes;
page->zero_bytes = zero_bytes;
page->writable = writable;
page->shared = false;
page->type = type;
lock_init (&page->lock);
hash_insert (&thread_current ()->pages, &page->elem);
return page;
}
@@ -95,14 +163,15 @@ page_insert (struct file *file, off_t ofs, void *upage, uint32_t read_bytes,
/* Gets a page_entry from the starting address of the page. Returns NULL if no
such page_entry exists in the hash map.*/
struct page_entry *
page_get (void *upage)
page_get (struct thread *thread, void *upage)
{
lock_acquire (&thread->spt_lock);
struct page_entry fake_page_entry;
fake_page_entry.upage = upage;
struct hash_elem *e
= hash_find (&thread_current ()->pages, &fake_page_entry.elem);
= hash_find (&thread->pages, &fake_page_entry.elem);
lock_release (&thread->spt_lock);
if (e == NULL)
return NULL;
@@ -110,79 +179,113 @@ page_get (void *upage)
}
bool
page_load (struct page_entry *page)
page_load_file (struct page_entry *page)
{
struct thread *t = thread_current ();
/* If the page is read-only, we want to check if it is a shared page already
loaded into memory. If it is, we can just map the page to the frame. */
if (!page->writable)
{
lock_acquire (&shared_files_lock);
struct shared_page_entry *shared_page =
shared_page_get (page->file, page->upage);
/* Mark page as shared and install the shared frame. */
if (shared_page != NULL)
{
if (shared_page->frame == NULL)
{
void *frame = frame_alloc (PAL_USER, page->upage, t);
if (frame == NULL)
return false;
shared_page->frame = frame;
}
lock_release (&shared_files_lock);
if (!install_page (page->upage, shared_page->frame, false))
return false;
page->shared = true;
return true;
}
}
/* Allocate a frame for the page. If a frame allocation fails, then
frame_alloc should try to evict a page. If it is still NULL, the OS
panics as this should not happen if eviction is working correctly. */
struct thread *t = thread_current ();
bool shareable = !page->writable && file_compare (page->file, t->exec_file);
if (shareable)
{
lock_acquire (&shared_file_pages_lock);
struct shared_file_page *sfp
= shared_file_page_get (page->file, page->upage);
if (sfp != NULL)
{
/* Frame exists, just install it. */
if (sfp->frame != NULL)
{
if (!install_page (page->upage, sfp->frame, page->writable))
{
lock_release (&shared_file_pages_lock);
return false;
}
/* First time adding the shared page, so add thread as owner. */
if (page->type != PAGE_SHARED)
{
frame_owner_insert (sfp->frame, t);
}
}
/* Shared page is in swap. Load it. */
else
{
void *frame = frame_alloc (PAL_USER, page->upage, t);
if (frame == NULL)
PANIC (
"Could not allocate a frame to load page into memory.");
swap_in (frame, sfp->swap_slot);
if (!install_page (page->upage, frame, false))
{
frame_free (frame);
lock_release (&shared_file_pages_lock);
return false;
}
}
page_flag_shared (t, page->upage, true);
if (page->type != PAGE_SHARED)
{
sfp->ref_count++;
page->type = PAGE_SHARED;
}
lock_release (&shared_file_pages_lock);
return true;
}
}
void *frame = frame_alloc (PAL_USER, page->upage, t);
pagedir_set_accessed (t->pagedir, page->upage, true);
if (frame == NULL)
PANIC ("Could not allocate a frame to load page into memory.");
/* Ensure page is not marked as shared while it doesn't exist in the
shared_files table, to avoid memory leaks. */
page->shared = false;
/* Map the page to the frame. */
if (!install_page (page->upage, frame, page->writable))
goto fail;
{
if (shareable)
lock_release (&shared_file_pages_lock);
frame_free (frame);
return false;
}
/* Move the file pointer to the correct location in the file. Then, read the
data from the file into the frame. Checks that we were able to read the
expected number of bytes. */
file_seek (page->file, page->offset);
if (file_read (page->file, frame, page->read_bytes) != (int) page->read_bytes)
goto fail;
{
if (shareable)
lock_release (&shared_file_pages_lock);
frame_free (frame);
return false;
}
/* Zero out the remaining bytes in the frame. */
memset (frame + page->read_bytes, 0, page->zero_bytes);
/* If the page is read-only, we need to add it to the shared pages table. */
if (!page->writable)
/* If file page is read-only, make it shared. */
if (shareable)
{
struct shared_page_entry *shared_page
= shared_page_insert (page->file, page->upage, frame);
if (shared_page == NULL)
goto fail;
lock_release (&shared_files_lock);
page->shared = true;
struct shared_file_page *sfp = malloc (sizeof (struct shared_file_page));
if (sfp == NULL)
{
lock_release (&shared_file_pages_lock);
frame_free (frame);
return false;
}
sfp->file = page->file;
sfp->upage = page->upage;
sfp->frame = frame;
sfp->swap_slot = 0;
sfp->ref_count = 1;
hash_insert (&shared_file_pages, &sfp->elem);
page_flag_shared (t, page->upage, true);
page->type = PAGE_SHARED;
lock_release (&shared_file_pages_lock);
}
/* Mark the page as loaded successfully. */
return true;
fail:
if (!page->writable)
lock_release (&shared_files_lock);
frame_owner_remove (frame, t);
frame_free (frame);
return false;
}
/* Function to clean up a page_entry. Given the elem of that page_entry, frees
@@ -190,217 +293,47 @@ fail:
void
page_cleanup (struct hash_elem *e, void *aux UNUSED)
{
struct thread *t = thread_current ();
lock_acquire (&thread_current ()->spt_lock);
struct page_entry *page = hash_entry (e, struct page_entry, elem);
/* If page is shared then mark it as not present and not in swap, to avoid
* being freed. */
uint32_t *pd = t->pagedir;
if (pd != NULL && page->shared)
if (page->type == PAGE_SHARED)
{
frame_owner_remove (pagedir_get_page (pd, page->upage), t);
pagedir_clear_page (pd, page->upage);
page_unset_swap (t, page->upage);
lock_acquire (&shared_file_pages_lock);
struct shared_file_page *sfp
= shared_file_page_get (page->file, page->upage);
ASSERT (sfp != NULL);
if (sfp->frame != NULL)
frame_owner_remove (sfp->frame, thread_current ());
sfp->ref_count--;
if (sfp->ref_count == 0)
{
hash_delete (&shared_file_pages, &sfp->elem);
if (sfp->frame != NULL)
frame_free (sfp->frame);
else
swap_drop (sfp->swap_slot);
free (sfp);
}
lock_release (&shared_file_pages_lock);
}
free (page);
lock_release (&thread_current ()->spt_lock);
}
/* Initialise the shared files and table and lock. */
/* Flags the provided page table entry as representing a swapped out page. */
void
shared_files_init ()
{
lock_init (&shared_files_lock);
if (!hash_init (&shared_files, shared_file_hash, shared_file_less, NULL))
PANIC ("Failed to initialise shared_files table.");
}
bool
use_shared_file (struct file *file)
{
lock_acquire (&shared_files_lock);
struct shared_file_entry *shared_file = shared_file_get (file);
if (shared_file == NULL)
{
shared_file = shared_file_insert (file);
if (shared_file == NULL)
{
lock_release (&shared_files_lock);
return false;
}
}
shared_file->ref_count++;
lock_release (&shared_files_lock);
return true;
}
bool
unuse_shared_file (struct file *file)
{
lock_acquire (&shared_files_lock);
struct shared_file_entry *shared_file = shared_file_get (file);
if (shared_file == NULL)
{
lock_release (&shared_files_lock);
return false;
}
shared_file->ref_count--;
if (shared_file->ref_count <= 0)
{
hash_destroy (&shared_file->pages, shared_page_cleanup);
hash_delete (&shared_files, &shared_file->elem);
free (shared_file);
}
lock_release (&shared_files_lock);
}
page_flag_swap (uint32_t *pte, bool set)
{
if (set)
*pte |= (1 << SWAP_FLAG_BIT);
else
*pte &= ~(1 << SWAP_FLAG_BIT);
}
/* Sets the address bits of the page table entry to the provided swap slot
value. To be used for later retrieval of the swap slot when page faulting. */
static void
shared_page_cleanup (struct hash_elem *e, void *aux UNUSED)
page_set_swap (struct thread *owner, uint32_t *pte, size_t swap_slot)
{
struct shared_page_entry *shared_page
= hash_entry (e, struct shared_page_entry, elem);
if (shared_page->frame == NULL)
swap_drop (shared_page->swap_slot);
else
/* Note: ref_count <= 0, so it is guaranteed that the frame is unused. */
frame_free (shared_page->frame);
free (shared_page);
}
/* Hashing function needed for the shared_file table. Returns a hash for an
entry based on its file pointer. */
static unsigned
shared_file_hash (const struct hash_elem *e, void *aux UNUSED)
{
return file_hash (hash_entry (e, struct shared_file_entry, elem)->file);
}
/* Less function needed for the shared_file table. */
static bool
shared_file_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
{
const struct shared_file_entry *a = hash_entry (a_, struct shared_file_entry,
elem);
const struct shared_file_entry *b = hash_entry (b_, struct shared_file_entry,
elem);
return !file_compare (a->file, b->file);
}
/* Hashing function needed for the shared pages table. Returns a hash for an
entry based on its user virtual address (upage) pointer. */
static unsigned
shared_page_hash (const struct hash_elem *e, void *aux UNUSED)
{
return hash_ptr (hash_entry (e, struct shared_page_entry, elem)->upage);
}
/* Less function needed for the shared pages table. */
static bool
shared_page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
{
const struct shared_page_entry *a = hash_entry (a_, struct shared_page_entry,
elem);
const struct shared_page_entry *b = hash_entry (b_, struct shared_page_entry,
elem);
return a->upage < b->upage;
}
static struct shared_file_entry *
shared_file_get (struct file *file)
{
struct shared_file_entry fake_shared_file;
fake_shared_file.file = file;
struct hash_elem *e = hash_find (&shared_files, &fake_shared_file.elem);
if (e == NULL)
return NULL;
return hash_entry (e, struct shared_file_entry, elem);
}
/* Gets a shared_page_entry from the shared_pages table using the file and upage
of the page. Returns NULL if no such page_entry exists in the hash map.*/
struct shared_page_entry *
shared_page_get (struct file *file, void *upage)
{
/* Search first for the file within the shared_pages structure */
struct shared_file_entry *shared_file = shared_file_get (file);
if (shared_file == NULL)
return NULL;
/* Search for the page within the shared_file's hash table */
struct shared_page_entry fake_shared_page_entry;
fake_shared_page_entry.upage = upage;
struct hash_elem *e = hash_find (&shared_file->pages, &fake_shared_page_entry.elem);
if (e == NULL)
return NULL;
return hash_entry (e, struct shared_page_entry, elem);
}
static struct shared_file_entry *
shared_file_insert (struct file *file)
{
struct shared_file_entry *shared_file
= malloc (sizeof (struct shared_file_entry));
if (shared_file == NULL)
return NULL;
shared_file->file = file;
shared_file->ref_count = 0;
if (!hash_init (&shared_file->pages, shared_page_hash, shared_page_less,
NULL))
{
free (shared_file);
return NULL;
}
hash_insert (&shared_files, &shared_file->elem);
return shared_file;
}
static struct shared_page_entry *
shared_page_insert (struct file *file, void *upage, void *frame)
{
struct shared_file_entry *shared_file = shared_file_get (file);
/* Allocate a new shared_page_entry first for easier error handling. */
struct shared_page_entry *shared_page
= malloc (sizeof (struct shared_page_entry));
if (shared_page == NULL)
return NULL;
/* If shared file doesn't exist in table, also create it. */
if (shared_file == NULL)
{
shared_file = shared_file_insert (file);
if (shared_file == NULL)
{
free (shared_page);
return NULL;
}
}
shared_page->upage = upage;
shared_page->frame = frame;
hash_insert (&shared_file->pages, &shared_page->elem);
return shared_page;
}
/* Updates the 'owner' thread's page table entry for virtual address 'upage'
to flag the page as being stored in swap, and stores the specified swap slot
value in the entry at the address bits for later retrieval from disk. */
void
page_set_swap (struct thread *owner, void *upage, size_t swap_slot)
{
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
/* Store the provided swap slot in the address bits of the page table
entry, truncating excess bits. */
*pte |= (1 << SWAP_FLAG_BIT);
@@ -410,53 +343,117 @@ page_set_swap (struct thread *owner, void *upage, size_t swap_slot)
invalidate_pagedir (owner->pagedir);
}
/* Updates the page table entry for virtual address 'upage' to flag the page as
NOT being stored in swap, and clears the excess bits. */
static void
page_unset_swap (struct thread *owner, void *upage)
{
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
*pte &= ~(1 << SWAP_FLAG_BIT);
*pte &= ~PTE_ADDR;
invalidate_pagedir (owner->pagedir);
}
/* Returns true iff the page with user address 'upage' owned by 'owner'
/* Returns true iff the page with user address 'upage' owned by 'owner'
is flagged to be in the swap disk via the owner's page table. */
bool
page_in_swap (struct thread *owner, void *upage)
{
lock_acquire (&owner->spt_lock);
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
return pte != NULL &&
(*pte & (1 << SWAP_FLAG_BIT)) != 0;
lock_release (&owner->spt_lock);
return page_in_swap_pte (pte);
}
/* Returns true iff the page table entry is marked to be in the swap disk. */
bool
page_in_swap_pte (uint32_t *pte)
{
return pte != NULL && (*pte & (1 << SWAP_FLAG_BIT)) != 0;
}
/* Given that the page with user address 'upage' owned by 'owner' is flagged
to be in the swap disk via the owner's page table, returns its stored
swap slot. Otherwise panics the kernel. */
swap slot and marks the PTE as not being in swap. */
size_t
page_get_swap (struct thread *owner, void *upage)
{
lock_acquire (&owner->spt_lock);
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
ASSERT (pte != NULL);
ASSERT ((*pte & PTE_P) == 0);
/* Masks the address bits and returns truncated value. */
page_flag_swap (pte, false);
lock_release (&owner->spt_lock);
return ((*pte & PTE_ADDR) >> ADDR_START_BIT);
}
/* If the swap bit is set for a page table entry, drop the swap. */
bool
page_cleanup_swap (uint32_t *pte)
/* Returns the swap slot stored in a PTE. */
size_t
page_get_swap_pte (uint32_t *pte)
{
if ((*pte & (1 << SWAP_FLAG_BIT)) != 0)
{
size_t swap_slot = ((*pte & PTE_ADDR) >> ADDR_START_BIT);
swap_drop (swap_slot);
return true;
}
return false;
}
ASSERT (pte != NULL);
ASSERT ((*pte & PTE_P) == 0);
return ((*pte & PTE_ADDR) >> ADDR_START_BIT);
}
/* Flags the provided page table entry as representing a shared page. */
static void
page_flag_shared (struct thread *owner, void *upage, bool shared)
{
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
ASSERT (pte != NULL);
if (shared)
*pte |= (1 << SHARED_FLAG_BIT);
else
*pte &= ~(1 << SHARED_FLAG_BIT);
}
/* Returns true iff the page table entry is marked to be shared. */
bool
page_is_shared_pte (uint32_t *pte)
{
return pte != NULL && (*pte & (1 << SHARED_FLAG_BIT)) != 0;
}
/* Initializes the shared file pages hash table. */
void
shared_file_pages_init ()
{
if (!hash_init (&shared_file_pages, shared_file_page_hash,
shared_file_page_less, NULL))
PANIC ("Failed to initialize shared file pages hash table.");
lock_init (&shared_file_pages_lock);
}
/* Hash function for shared file pages, used for storing entries in the
shared file pages table. */
static unsigned
shared_file_page_hash (const struct hash_elem *e, void *aux UNUSED)
{
struct shared_file_page *sfp = hash_entry (e, struct shared_file_page, elem);
void *inode = file_get_inode (sfp->file);
void *upage = sfp->upage;
void *bytes[2] = { inode, upage };
return hash_bytes (bytes, sizeof (bytes));
}
/* 'less_func' comparison function for shared file pages, used for comparing
the keys of the shared file pages table. */
static bool
shared_file_page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
{
const struct shared_file_page *a
= hash_entry (a_, struct shared_file_page, elem);
const struct shared_file_page *b
= hash_entry (b_, struct shared_file_page, elem);
return !file_compare (a->file, b->file) || a->upage < b->upage;
}
static struct shared_file_page *
shared_file_page_get (struct file *file, void *upage)
{
struct shared_file_page fake_sfp;
fake_sfp.file = file;
fake_sfp.upage = upage;
struct hash_elem *e = hash_find (&shared_file_pages, &fake_sfp.elem);
if (e == NULL)
return NULL;
return hash_entry (e, struct shared_file_page, elem);
}

View File

@@ -5,59 +5,60 @@
#include "threads/synch.h"
#include "filesys/off_t.h"
struct lock shared_files_lock;
enum page_type {
PAGE_EXECUTABLE,
PAGE_EMPTY
enum page_type
{
PAGE_FILE,
PAGE_MMAP,
PAGE_EMPTY,
PAGE_SHARED
};
struct page_entry {
struct page_entry
{
enum page_type type; /* Type of Data that should go into the page */
void *upage; /* Start Address of the User Page (Key of hash table). */
/* Data for swapped pages */
struct lock lock; /* Enforces mutual exclusion in accessing the page
referenced by the entry between its owning process
and any thread performing page eviction. */
/* File Data */
struct file *file; /* Pointer to the file for executables. */
off_t offset; /* Offset of the page content within the file. */
uint32_t read_bytes; /* Number of bytes to read within the page. */
uint32_t zero_bytes; /* Number of bytes to zero within the page. */
bool writable; /* Flag for whether this page is writable or not. */
bool shared; /* Flag for whether this page is shared or not. */
struct hash_elem elem; /* An elem for the hash table. */
};
struct shared_file_entry {
struct file *file; /* Pointer to the file. */
struct hash pages;
struct shared_file_page
{
struct file *file;
void *upage;
void *frame;
size_t swap_slot;
int ref_count;
struct hash_elem elem;
};
struct shared_page_entry {
void *upage;
void *frame;
size_t swap_slot;
bool init_pages (struct hash *pages);
bool page_insert_swapped (void *upage, void *kpage, struct list *owners);
struct page_entry *page_insert_file (struct file *file, off_t ofs, void *upage,
uint32_t read_bytes, uint32_t zero_bytes,
bool writable, enum page_type);
struct page_entry *page_get (struct thread *thread, void *upage);
bool page_load_file (struct page_entry *page);
void page_cleanup (struct hash_elem *e, void *aux);
struct hash_elem elem;
};
bool init_pages (struct thread *t);
struct page_entry *page_insert (struct file *file, off_t ofs, void *upage,
uint32_t read_bytes, uint32_t zero_bytes,
bool writable, enum page_type type);
struct page_entry *page_get (void *upage);
bool page_load (struct page_entry *page);
void page_cleanup (struct hash_elem *e, void *aux UNUSED);
void page_set_swap (struct thread *, void *, size_t);
bool page_in_swap (struct thread *, void *);
size_t page_get_swap (struct thread *, void *);
bool page_cleanup_swap (uint32_t *pte);
bool page_in_swap_pte (uint32_t *pte);
size_t page_get_swap (struct thread *owner, void *upage);
size_t page_get_swap_pte (uint32_t *pte);
void shared_files_init ();
bool use_shared_file (struct file *file);
bool unuse_shared_file (struct file *file);
struct shared_page_entry *shared_page_get (struct file *file, void *upage);
bool page_is_shared_pte (uint32_t *pte);
void shared_file_pages_init (void);
#endif /* vm/frame.h */
#endif

View File

@@ -1,60 +0,0 @@
#include <stdio.h>
#include "stackgrowth.h"
#include "frame.h"
#include "threads/palloc.h"
#include "threads/thread.h"
#include "threads/vaddr.h"
#include "userprog/pagedir.h"
#define MAX_STACK_ACCESS_DIST 32
static bool is_stack_fault (const void *addr, const void *esp);
static bool grow_stack (const void *addr);
/* Determine whether a particular page fault occured due to a stack
access below the stack pointer that should induce stack growth, and
if so grow the stack by a single page (capped at MAX_STACK_SIZE). */
bool
handle_stack_fault (const void *ptr, const void *esp)
{
return is_stack_fault (ptr, esp) && grow_stack (ptr);
}
/* Determines whether a particular page fault appears to be caused by
a stack access that should induce dynamic stack growth. Stack size
is capped at MAX_STACK_SIZE. */
static bool
is_stack_fault (const void *addr, const void *esp)
{
return ((uint32_t*)addr >= ((uint32_t*)esp - MAX_STACK_ACCESS_DIST) &&
((PHYS_BASE - pg_round_down (addr)) <= MAX_STACK_SIZE));
}
/* Grows the stack of the process running inside the current thread by a single
page given a user virtual address inside of the page wherein the new section
of the stack should be allocated. */
static bool
grow_stack (const void *addr)
{
struct thread *t = thread_current ();
void *last_page = pg_round_down (addr);
/* This function should only be called when dealing with a faulting stack
access that induces stack growth, so the provided address shouldn't be
present in a page within the current thread's page directory. */
ASSERT (pagedir_get_page (t->pagedir, last_page) == NULL);
uint8_t *new_page = frame_alloc (PAL_ZERO, last_page, t);
if (new_page == NULL)
return false;
if (!pagedir_set_page (t->pagedir, last_page, new_page, true))
{
frame_owner_remove (new_page, t);
frame_free (new_page);
return false;
}
return true;
}

View File

@@ -1,10 +0,0 @@
#ifndef VM_GROWSTACK_H
#define VM_GROWSTACK_H
#include <stdio.h>
#define MAX_STACK_SIZE 8388608 // (8MB)
bool handle_stack_fault (const void *ptr, const void *esp);
#endif /* vm/frame.h */