Compare commits

..

4 Commits

7 changed files with 337 additions and 418 deletions

View File

@@ -2,6 +2,7 @@
#include <inttypes.h> #include <inttypes.h>
#include <stdio.h> #include <stdio.h>
#include "stdbool.h" #include "stdbool.h"
#include "threads/synch.h"
#include "userprog/gdt.h" #include "userprog/gdt.h"
#include "userprog/pagedir.h" #include "userprog/pagedir.h"
#include "userprog/process.h" #include "userprog/process.h"
@@ -249,6 +250,12 @@ grow_stack (void *upage)
bool bool
fetch_page (void *upage, bool write) fetch_page (void *upage, bool write)
{ {
/* Check if the page is in the supplemental page table. That is, it is a page
that is expected to be in memory. */
struct page_entry *page = page_get (upage);
if (page == NULL)
return false;
/* Check if the non-present user page is in the swap partition. /* Check if the non-present user page is in the swap partition.
If so, swap it back into main memory, updating the PTE for If so, swap it back into main memory, updating the PTE for
the faulted virtual address to point to the newly allocated the faulted virtual address to point to the newly allocated
@@ -256,25 +263,23 @@ fetch_page (void *upage, bool write)
struct thread *t = thread_current (); struct thread *t = thread_current ();
if (page_in_swap (t, upage)) if (page_in_swap (t, upage))
{ {
size_t swap_slot = page_get_swap (t, upage); /* NOTE: This code should be refactored and moved into helper functions
within 'page.c'.*/
void *kpage = frame_alloc (0, upage, t); void *kpage = frame_alloc (0, upage, t);
lock_acquire (&page->lock);
size_t swap_slot = page_get_swap (t, upage);
swap_in (kpage, swap_slot); swap_in (kpage, swap_slot);
bool writeable = pagedir_is_writable (t->pagedir, upage); lock_release (&page->lock);
if (pagedir_set_page (t->pagedir, upage, kpage, writeable))
{
struct page_entry *page = page_get(upage);
if (page != NULL)
page->frame = kpage;
return true;
}
}
/* Check if the page is in the supplemental page table. That is, it is a page bool writeable = pagedir_is_writable (t->pagedir, upage);
that is expected to be in memory. */
struct page_entry *page = page_get (upage); /* TODO: When this returns false we should quit the page fault,
if (page == NULL) but currently we continue and check the stack conditions in the
return false; page fault handler. */
return pagedir_set_page (t->pagedir, upage, kpage, writeable);
}
/* An attempt to write to a non-writeable should fail. */ /* An attempt to write to a non-writeable should fail. */
if (write && !page->writable) if (write && !page->writable)
@@ -283,8 +288,9 @@ fetch_page (void *upage, bool write)
/* Load the page into memory based on the type of data it is expecting. */ /* Load the page into memory based on the type of data it is expecting. */
bool success = false; bool success = false;
switch (page->type) { switch (page->type) {
case PAGE_MMAP:
case PAGE_FILE: case PAGE_FILE:
success = page_load (page, page->writable); success = page_load_file (page, page->writable);
break; break;
default: default:
return false; return false;

View File

@@ -714,7 +714,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
size_t page_zero_bytes = PGSIZE - page_read_bytes; size_t page_zero_bytes = PGSIZE - page_read_bytes;
/* Add the page metadata to the SPT to be lazy loaded later on */ /* Add the page metadata to the SPT to be lazy loaded later on */
if (page_insert (file, ofs, upage, page_read_bytes, page_zero_bytes, if (page_insert_file (file, ofs, upage, page_read_bytes, page_zero_bytes,
writable, PAGE_FILE) == NULL) writable, PAGE_FILE) == NULL)
return false; return false;

View File

@@ -10,7 +10,6 @@
#include "threads/synch.h" #include "threads/synch.h"
#include "userprog/process.h" #include "userprog/process.h"
#include "userprog/pagedir.h" #include "userprog/pagedir.h"
#include "vm/frame.h"
#include "vm/page.h" #include "vm/page.h"
#include "vm/mmap.h" #include "vm/mmap.h"
#include <stdio.h> #include <stdio.h>
@@ -53,14 +52,9 @@ static mapid_t syscall_mmap (int fd, void *addr);
static void syscall_munmap (mapid_t mapping); static void syscall_munmap (mapid_t mapping);
static struct open_file *fd_get_file (int fd); static struct open_file *fd_get_file (int fd);
static void validate_user_ptr (const void *start, size_t size, static void validate_user_pointer (const void *ptr, size_t size,
bool write); bool check_write);
static void validate_and_pin_user_ptr (const void *start, size_t size, static void validate_user_string (const char *str, bool check_write);
bool write);
static void validate_and_pin_user_str (const char *ptr);
static void unpin_user_ptr (const void *start, size_t size);
static void unpin_user_str (const char *ptr);
static int get_user (const uint8_t *); static int get_user (const uint8_t *);
static bool put_user (uint8_t *, uint8_t); static bool put_user (uint8_t *, uint8_t);
@@ -113,7 +107,7 @@ static void
syscall_handler (struct intr_frame *f) syscall_handler (struct intr_frame *f)
{ {
/* First, read the system call number from the stack. */ /* First, read the system call number from the stack. */
validate_user_ptr (f->esp, sizeof (uintptr_t), false); validate_user_pointer (f->esp, sizeof (uintptr_t), false);
uintptr_t syscall_number = *(int *)f->esp; uintptr_t syscall_number = *(int *)f->esp;
thread_current ()->curr_esp = f->esp; thread_current ()->curr_esp = f->esp;
@@ -124,7 +118,7 @@ syscall_handler (struct intr_frame *f)
struct syscall_arguments syscall = syscall_lookup[syscall_number]; struct syscall_arguments syscall = syscall_lookup[syscall_number];
/* Next, read and copy the arguments from the stack pointer. */ /* Next, read and copy the arguments from the stack pointer. */
validate_user_ptr (f->esp + sizeof (uintptr_t), validate_user_pointer (f->esp + sizeof (uintptr_t),
syscall.arity * sizeof (uintptr_t), false); syscall.arity * sizeof (uintptr_t), false);
uintptr_t args[MAX_SYSCALL_ARGS] = { 0 }; uintptr_t args[MAX_SYSCALL_ARGS] = { 0 };
for (int i = 0; i < syscall.arity && i < MAX_SYSCALL_ARGS; i++) for (int i = 0; i < syscall.arity && i < MAX_SYSCALL_ARGS; i++)
@@ -157,11 +151,9 @@ syscall_exit (int status)
static pid_t static pid_t
syscall_exec (const char *cmd_line) syscall_exec (const char *cmd_line)
{ {
validate_and_pin_user_str (cmd_line); validate_user_string (cmd_line, false);
pid_t pid = process_execute (cmd_line);
unpin_user_str (cmd_line);
return pid; return process_execute (cmd_line); /* Returns the PID of the new process */
} }
/* Handles the syscall of wait. Effectively a wrapper for process_wait as the /* Handles the syscall of wait. Effectively a wrapper for process_wait as the
@@ -178,15 +170,13 @@ syscall_wait (pid_t pid)
static bool static bool
syscall_create (const char *file, unsigned initial_size) syscall_create (const char *file, unsigned initial_size)
{ {
validate_and_pin_user_str (file); validate_user_string (file, false);
/* Acquire the file system lock to prevent race conditions. */ /* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock); lock_acquire (&filesys_lock);
bool status = filesys_create (file, initial_size); bool status = filesys_create (file, initial_size);
lock_release (&filesys_lock); lock_release (&filesys_lock);
unpin_user_str (file);
/* Return the status of the file creation. */ /* Return the status of the file creation. */
return status; return status;
} }
@@ -197,15 +187,13 @@ syscall_create (const char *file, unsigned initial_size)
static bool static bool
syscall_remove (const char *file) syscall_remove (const char *file)
{ {
validate_and_pin_user_str (file); validate_user_string (file, false);
/* Acquire the file system lock to prevent race conditions. */ /* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock); lock_acquire (&filesys_lock);
bool status = filesys_remove (file); bool status = filesys_remove (file);
lock_release (&filesys_lock); lock_release (&filesys_lock);
unpin_user_str (file);
/* Return the status of the file removal. */ /* Return the status of the file removal. */
return status; return status;
} }
@@ -217,15 +205,13 @@ syscall_remove (const char *file)
static int static int
syscall_open (const char *file) syscall_open (const char *file)
{ {
validate_and_pin_user_str (file); validate_user_string (file, false);
/* Acquire the file system lock to prevent race conditions. */ /* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock); lock_acquire (&filesys_lock);
struct file *ptr = filesys_open (file); struct file *ptr = filesys_open (file);
lock_release (&filesys_lock); lock_release (&filesys_lock);
unpin_user_str (file);
/* If the file could not be opened, return failure. */ /* If the file could not be opened, return failure. */
if (ptr == NULL) if (ptr == NULL)
return EXIT_FAILURE; return EXIT_FAILURE;
@@ -285,11 +271,10 @@ syscall_read (int fd, void *buffer, unsigned size)
if (fd < STDIN_FILENO || fd == STDOUT_FILENO) if (fd < STDIN_FILENO || fd == STDOUT_FILENO)
return EXIT_FAILURE; return EXIT_FAILURE;
validate_user_pointer (buffer, size, true);
if (fd == STDIN_FILENO) if (fd == STDIN_FILENO)
{ {
/* Validate the user buffer. */
validate_user_ptr (buffer, size, true);
/* Reading from the console. */ /* Reading from the console. */
char *write_buffer = buffer; char *write_buffer = buffer;
for (unsigned i = 0; i < size; i++) for (unsigned i = 0; i < size; i++)
@@ -307,19 +292,13 @@ syscall_read (int fd, void *buffer, unsigned size)
if (file_info == NULL) if (file_info == NULL)
return EXIT_FAILURE; return EXIT_FAILURE;
/* Validate the user buffer, and pin the pages to prevent eviction. */
validate_and_pin_user_ptr (buffer, size, true);
/* Acquire the file system lock to prevent race-conditions. */ /* Acquire the file system lock to prevent race-conditions. */
lock_acquire (&filesys_lock); lock_acquire (&filesys_lock);
int bytes_read = file_read (file_info->file, buffer, size); int bytes_written = file_read (file_info->file, buffer, size);
lock_release (&filesys_lock); lock_release (&filesys_lock);
/* Unpin the pages to allow eviction. */
unpin_user_ptr (buffer, size);
/* Return the number of bytes read. */ /* Return the number of bytes read. */
return bytes_read; return bytes_written;
} }
} }
@@ -335,11 +314,10 @@ syscall_write (int fd, const void *buffer, unsigned size)
if (fd <= 0) if (fd <= 0)
return 0; return 0;
validate_user_pointer (buffer, size, false);
if (fd == STDOUT_FILENO) if (fd == STDOUT_FILENO)
{ {
/* Validate the user buffer. */
validate_user_ptr (buffer, size, false);
/* Writing to the console. */ /* Writing to the console. */
putbuf (buffer, size); putbuf (buffer, size);
@@ -355,19 +333,13 @@ syscall_write (int fd, const void *buffer, unsigned size)
if (file_info == NULL) if (file_info == NULL)
return 0; return 0;
/* Validate the user buffer, and pin the pages to prevent eviction. */
validate_and_pin_user_ptr (buffer, size, false);
/* Acquire the file system lock to prevent race conditions. */ /* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock); lock_acquire (&filesys_lock);
int bytes_written = file_write (file_info->file, buffer, size); int bytes = file_write (file_info->file, buffer, size);
lock_release (&filesys_lock); lock_release (&filesys_lock);
/* Unpin the pages to allow eviction. */
unpin_user_ptr (buffer, size);
/* Return the number of bytes written. */ /* Return the number of bytes written. */
return bytes_written; return bytes;
} }
} }
@@ -454,8 +426,8 @@ syscall_mmap (int fd, void *addr)
if (file_size == 0) if (file_size == 0)
return MMAP_FAILURE; return MMAP_FAILURE;
/* ensures the page for mmap does not overlap with the stack */ /* Ensure that the mmap page doesn't overlap with the stack. */
if (addr >= (thread_current ()->curr_esp - PGSIZE)) if (addr >= (thread_current ()->curr_esp) - PGSIZE)
return MMAP_FAILURE; return MMAP_FAILURE;
/* Check and ensure that there is enough space in the user virtual memory to /* Check and ensure that there is enough space in the user virtual memory to
@@ -472,8 +444,8 @@ syscall_mmap (int fd, void *addr)
off_t read_bytes = file_size - ofs < PGSIZE ? file_size - ofs : PGSIZE; off_t read_bytes = file_size - ofs < PGSIZE ? file_size - ofs : PGSIZE;
off_t zero_bytes = PGSIZE - read_bytes; off_t zero_bytes = PGSIZE - read_bytes;
if (page_insert (file, ofs, addr + ofs, read_bytes, zero_bytes, true, if (page_insert_file (file, ofs, addr + ofs, read_bytes, zero_bytes, true,
PAGE_FILE) == NULL) PAGE_MMAP) == NULL)
return MMAP_FAILURE; return MMAP_FAILURE;
} }
@@ -482,7 +454,6 @@ syscall_mmap (int fd, void *addr)
if (mmap == NULL) if (mmap == NULL)
return MMAP_FAILURE; return MMAP_FAILURE;
return mmap->mapping; return mmap->mapping;
} }
@@ -561,127 +532,54 @@ fd_get_file (int fd)
return hash_entry (e, struct open_file, elem); return hash_entry (e, struct open_file, elem);
} }
/* Helper function that validates a block of memory and optionally pins frames.
thread_exit() if the memory is invalid. Used only by the two helper functions
validate_user_ptr and validate_and_pin_user_ptr. See the comments for those
functions for more details on each. */
static void
validate_user_ptr_helper (const void *start, size_t size, bool write, bool pin)
{
if (size == 0)
return;
/* ptr < ptr + size - 1, so sufficient to check that (ptr + size -1) is a
valid user virtual memory address. */
void *end = start + size - 1;
if (!is_user_vaddr (end))
syscall_exit (EXIT_FAILURE);
for (const void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
{
int result;
/* Check read access to pointer. */
if ((result = get_user (ptr)) == -1)
syscall_exit (EXIT_FAILURE);
/* Check write access to pointer (if required). */
if (write && !put_user ((uint8_t *)ptr, result))
syscall_exit (EXIT_FAILURE);
/* If pin is set, pin the frame to prevent eviction. */
if (pin)
{
void *kpage = pagedir_get_page(thread_current()->pagedir, ptr);
if (kpage == NULL)
{
// If it was evicted, try to load it back in.
ptr -= PGSIZE;
continue;
}
frame_pin(kpage);
}
}
}
/* Validates if a block of memory starting at PTR and of size SIZE bytes is /* Validates if a block of memory starting at PTR and of size SIZE bytes is
fully contained within valid user virtual memory. thread_exit () if the fully contained within valid user virtual memory. thread_exit () if the
memory is invalid. memory is invalid.
If the size is 0, the function does no checks and returns PTR. */ If the size is 0, the function does no checks and returns PTR. */
static void static void
validate_user_ptr (const void *start, size_t size, bool write) validate_user_pointer (const void *ptr, size_t size, bool check_write)
{ {
validate_user_ptr_helper (start, size, write, false); if (size == 0)
} return;
/* ptr < ptr + size - 1, so sufficient to check that (ptr + size -1) is a
/* Validates if a block of memory starting at PTR and of size SIZE bytes is valid user virtual memory address. */
fully contained within valid user virtual memory. thread_exit () if the void *last = ptr + size - 1;
memory is invalid. The function also checks if the memory is writable if if (!is_user_vaddr (last))
WRITE flag is set. syscall_exit (EXIT_FAILURE);
ptr = pg_round_down (ptr);
The function attempts to preload the pages in case they are not in memory while (ptr <= last)
yet (e.g., in a swap, lazy loading). If this is successful, the frame pages
are pinned to prevent eviction prior to access.
As such, a call to this function MUST be followed by a call to
unpin_user_ptr (START, SIZE) to unpin the pages and allow eviction.
If the size is 0, the function does no checks and returns PTR. */
static void
validate_and_pin_user_ptr (const void *start, size_t size, bool write)
{ {
validate_user_ptr_helper (start, size, write, true); int result;
} /* Check read access to pointer. */
if ((result = get_user (ptr)) == -1)
/* Unpins all the pages containing a block of memory starting at START and of syscall_exit (EXIT_FAILURE);
size SIZE bytes. /* Check write access to pointer (if required). */
if (check_write && !put_user (ptr, result))
Pre: The pages were previously pinned by validate_and_pin_user_ptr (START, syscall_exit (EXIT_FAILURE);
SIZE). */ ptr += PGSIZE;
static void
unpin_user_ptr (const void *start, size_t size)
{
void *end = start + size - 1;
/* We don't need to do any checks as this function is always called after
validate_and_pin_user_ptr. */
/* Go through all pages in the block range, unpinning the frames. */
for (void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
{
void *kpage = pagedir_get_page (thread_current ()->pagedir, ptr);
ASSERT (kpage != NULL);
frame_unpin (kpage);
} }
} }
/* Validates of a C-string starting at ptr is fully contained within valid /* Validates of a C-string starting at ptr is fully contained within valid
user virtual memory. thread_exit () if the memory is invalid. */ user virtual memory. thread_exit () if the memory is invalid. */
static void static void
validate_and_pin_user_str (const char *ptr) validate_user_string (const char *ptr, bool check_write)
{ {
size_t offset = (uintptr_t) ptr % PGSIZE; size_t offset = (uintptr_t) ptr % PGSIZE;
for (;;) for (;;)
{ {
void *page = pg_round_down (ptr);
if (!is_user_vaddr (page))
syscall_exit (EXIT_FAILURE);
if (!is_user_vaddr (ptr)) if (!is_user_vaddr (ptr))
syscall_exit (EXIT_FAILURE); syscall_exit (EXIT_FAILURE);
int result;
if (get_user ((const uint8_t *)ptr) == -1) if ((result = get_user ((const uint8_t *)ptr)) == -1)
syscall_exit (EXIT_FAILURE);
if (check_write && !put_user ((uint8_t *)ptr, result))
syscall_exit (EXIT_FAILURE); syscall_exit (EXIT_FAILURE);
/* Pin the frame to prevent eviction. */
void *page = pg_round_down (ptr);
void *kpage = pagedir_get_page (thread_current ()->pagedir, page);
if (kpage == NULL)
{
// If it was evicted, attempt to reload.
ptr -= PGSIZE;
continue;
}
frame_pin (kpage);
while (offset < PGSIZE) while (offset < PGSIZE)
{ {
@@ -693,36 +591,7 @@ validate_and_pin_user_str (const char *ptr)
} }
offset = 0; offset = 0;
}
}
/* Unpins all the pages containing a C-string starting at PTR.
Pre: The pages were previously pinned by validate_and_pin_user_str (PTR).
PTR points to a valid C string that ends with '\0'. */
static void
unpin_user_str (const char *ptr)
{
size_t offset = (uintptr_t)ptr % PGSIZE;
const char *str_ptr = ptr;
for (;;)
{
void *page = pg_round_down(str_ptr);
void *kpage = pagedir_get_page(thread_current()->pagedir, page);
ASSERT(kpage != NULL);
frame_unpin (kpage);
/* Scan until end of string or page */
while (offset < PGSIZE)
{
if (*str_ptr == '\0')
return; /* Found end of string */
str_ptr++;
offset++;
}
offset = 0;
} }
} }

View File

@@ -2,74 +2,115 @@
#include <hash.h> #include <hash.h>
#include <list.h> #include <list.h>
#include <string.h> #include <string.h>
#include <stdio.h>
#include "frame.h" #include "frame.h"
#include "page.h" #include "page.h"
#include "filesys/file.h"
#include "threads/malloc.h" #include "threads/malloc.h"
#include "threads/vaddr.h" #include "threads/vaddr.h"
#include "userprog/pagedir.h" #include "userprog/pagedir.h"
#include "userprog/syscall.h"
#include "threads/synch.h" #include "threads/synch.h"
#include "devices/swap.h"
struct frame_entry /* Hash table that maps every active frame's kernel virtual address
to its corresponding 'frame_metadata'.*/
struct hash frame_table;
/* Linked list used to represent the circular queue in the 'clock'
algorithm for page eviction. Iterating from the element that is
currently pointed at by 'next_victim' yields an ordering of the entries
from oldest to newest (in terms of when they were added or checked
for having been referenced by a process). */
struct list lru_list;
/* The next element in lru_list to be considered for eviction (oldest added
or referenced page in the circular queue). If this page has has an
'accessed' bit of 0 when considering eviction, then it will be the next
victim. Otherwise, the next element in the queue is similarly considered. */
struct list_elem *next_victim = NULL;
/* Synchronisation variables. */
/* Protects access to 'lru_list'. */
struct lock lru_lock;
struct frame_metadata
{ {
void *frame; void *frame; /* The kernel virtual address holding the frame. */
void *upage; void *upage; /* The user virtual address pointing to the frame. */
struct thread *owner; struct thread *owner; /* Pointer to the thread that owns the frame. */
bool pinned; struct hash_elem hash_elem; /* Tracks the position of the frame metadata
within 'frame_table', whose key is the
struct hash_elem hash_elem; kernel virtual address of the frame. */
struct list_elem list_elem; struct list_elem list_elem; /* Tracks the position of the frame metadata
in either the 'active' or 'inactive' list,
so a victim can be chosen for eviction. */
}; };
struct hash frame_table; hash_hash_func frame_metadata_hash;
struct lock frame_lock; hash_less_func frame_metadata_less;
struct list lru_list;
struct list_elem *next_victim;
hash_hash_func frame_hash;
hash_less_func frame_less;
static struct frame_entry *frame_get (void *frame);
static struct frame_entry *get_victim (void);
static struct list_elem *lru_next (struct list_elem *e); static struct list_elem *lru_next (struct list_elem *e);
static struct list_elem *lru_prev (struct list_elem *e); static struct list_elem *lru_prev (struct list_elem *e);
static struct frame_metadata *get_victim (void);
/* Initialize the frame system by initializing the frame (hash) table with
the frame_metadata hashing and comparison functions, as well as initializing
'lru_list' and its associated synchronisation primitives. */
void void
frame_init (void) frame_init (void)
{ {
hash_init (&frame_table, frame_hash, frame_less, NULL); hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
lock_init (&frame_lock);
list_init (&lru_list); list_init (&lru_list);
lock_init (&lru_lock);
} }
/* TODO: Consider synchronisation more closely (i.e. just for hash
table). */
/* Attempt to allocate a frame for a user process, either by direct
allocation of a user page if there is sufficient RAM, or by
evicting a currently active page if memory allocated for user
processes is fulled and storing it in swap. If swap is full in
the former case, panic the kernel. */
void * void *
frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
{ {
lock_acquire (&frame_lock); struct frame_metadata *frame_metadata;
struct frame_entry *frame_metadata;
flags |= PAL_USER; flags |= PAL_USER;
lock_acquire (&lru_lock);
void *frame = palloc_get_page (flags); void *frame = palloc_get_page (flags);
/* If a frame couldn't be allocated we must be out of main memory. Thus,
obtain a victim page to replace with our page, and swap the victim
into disk. */
if (frame == NULL) if (frame == NULL)
{ {
/* 1. Obtain victim. */
if (next_victim == NULL) if (next_victim == NULL)
PANIC ("Couldn't allocate a single page to main memory!\n"); PANIC ("Couldn't allocate a single page to main memory!\n");
struct frame_entry *victim = get_victim (); struct frame_metadata *victim = get_victim ();
ASSERT (victim != NULL); /* get_victim () should never return null. */ ASSERT (victim != NULL); /* get_victim () should never return null. */
/* 2. Swap out victim into disk. */ /* 2. Handle victim page writing based on its type. */
/* Mark page as 'not present' and flag the page directory as having struct page_entry *victim_page = page_get (victim->upage);
been modified *before* eviction begins to prevent the owner of the if (victim_page != NULL && victim_page->type == PAGE_MMAP)
victim page from accessing/modifying it mid-eviction. */ {
pagedir_clear_page (victim->owner->pagedir, victim->upage); /* If it was a memory-mapped file page, we just write it back
to the file if it was dirty. */
if (pagedir_is_dirty(owner->pagedir, victim->upage))
{
lock_acquire (&filesys_lock);
file_write_at (victim_page->file, victim->upage,
victim_page->read_bytes, victim_page->offset);
lock_release (&filesys_lock);
}
// TODO: Lock PTE of victim page for victim process. }
else
/* Otherwise, insert the page into swap. */
page_insert_swapped (victim->upage, victim->frame, victim->owner);
size_t swap_slot = swap_out (victim->frame);
page_set_swap (victim->owner, victim->upage, swap_slot);
/* If zero flag is set, zero out the victim page. */ /* If zero flag is set, zero out the victim page. */
if (flags & PAL_ZERO) if (flags & PAL_ZERO)
@@ -89,7 +130,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
{ {
/* Must own lru_lock here, as otherwise there is a race condition /* Must own lru_lock here, as otherwise there is a race condition
with next_victim either being NULL or uninitialized. */ with next_victim either being NULL or uninitialized. */
frame_metadata = malloc (sizeof (struct frame_entry)); frame_metadata = malloc (sizeof (struct frame_metadata));
frame_metadata->frame = frame; frame_metadata->frame = frame;
/* Newly allocated frames are pushed to the back of the circular queue /* Newly allocated frames are pushed to the back of the circular queue
@@ -111,33 +152,9 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
frame_metadata->upage = upage; frame_metadata->upage = upage;
frame_metadata->owner = owner; frame_metadata->owner = owner;
frame_metadata->pinned = false; lock_release (&lru_lock);
void *frame_addr = frame_metadata->frame; return frame_metadata->frame;
lock_release (&frame_lock);
return frame_addr;
}
void
frame_pin (void *frame)
{
struct frame_entry *frame_metadata = frame_get (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to pin a frame at an unallocated kernel address '%p'\n",
frame);
frame_metadata->pinned = true;
}
void
frame_unpin (void *frame)
{
struct frame_entry *frame_metadata = frame_get (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to unpin a frame at an unallocated kernel address '%p'\n",
frame);
frame_metadata->pinned = false;
} }
/* Attempt to deallocate a frame for a user process by removing it from the /* Attempt to deallocate a frame for a user process by removing it from the
@@ -146,24 +163,18 @@ frame_unpin (void *frame)
void void
frame_free (void *frame) frame_free (void *frame)
{ {
lock_acquire(&frame_lock); struct frame_metadata key_metadata;
struct frame_entry key_metadata;
key_metadata.frame = frame; key_metadata.frame = frame;
struct hash_elem *e = struct hash_elem *e =
hash_delete (&frame_table, &key_metadata.hash_elem); hash_delete (&frame_table, &key_metadata.hash_elem);
if (e == NULL) if (e == NULL) PANIC ("Attempted to free a frame at kernel address %p, "
return; "but this address is not allocated!\n", frame);
struct frame_entry *frame_metadata = struct frame_metadata *frame_metadata =
hash_entry (e, struct frame_entry, hash_elem); hash_entry (e, struct frame_metadata, hash_elem);
struct page_entry *page = page_get (frame_metadata->upage);
if (page != NULL)
{
page->frame = NULL;
}
lock_acquire (&lru_lock);
list_remove (&frame_metadata->list_elem); list_remove (&frame_metadata->list_elem);
/* If we're freeing the frame marked as the next victim, update /* If we're freeing the frame marked as the next victim, update
@@ -176,34 +187,29 @@ frame_free (void *frame)
else else
next_victim = lru_next (next_victim); next_victim = lru_next (next_victim);
} }
lock_release (&lru_lock);
free (frame_metadata); free (frame_metadata);
palloc_free_page (frame); palloc_free_page (frame);
lock_release (&frame_lock);
} }
/* TODO: Account for page aliases when checking accessed bit. */ /* TODO: Account for page aliases when checking accessed bit. */
/* A pre-condition for calling this function is that the calling thread /* A pre-condition for calling this function is that the calling thread
owns lru_lock and that lru_list is non-empty. */ owns lru_lock and that lru_list is non-empty. */
static struct frame_entry * static struct frame_metadata *
get_victim (void) get_victim (void)
{ {
struct list_elem *e = next_victim; struct list_elem *e = next_victim;
struct frame_entry *frame_metadata; struct frame_metadata *frame_metadata;
uint32_t *pd; uint32_t *pd;
void *upage; void *upage;
for (;;) for (;;)
{ {
frame_metadata = list_entry (e, struct frame_entry, list_elem); frame_metadata = list_entry (e, struct frame_metadata, list_elem);
pd = frame_metadata->owner->pagedir; pd = frame_metadata->owner->pagedir;
upage = frame_metadata->upage; upage = frame_metadata->upage;
e = lru_next (e); e = lru_next (e);
/* Skip pinned frames */
if (frame_metadata->pinned)
continue;
if (!pagedir_is_accessed (pd, upage)) if (!pagedir_is_accessed (pd, upage))
break; break;
@@ -217,41 +223,29 @@ get_victim (void)
/* Hash function for frame metadata, used for storing entries in the /* Hash function for frame metadata, used for storing entries in the
frame table. */ frame table. */
unsigned unsigned
frame_hash (const struct hash_elem *e, void *aux UNUSED) frame_metadata_hash (const struct hash_elem *e, void *aux UNUSED)
{ {
struct frame_entry *entry = struct frame_metadata *frame_metadata =
hash_entry (e, struct frame_entry, hash_elem); hash_entry (e, struct frame_metadata, hash_elem);
return hash_bytes (&entry->frame, sizeof (entry->frame)); return hash_bytes (&frame_metadata->frame, sizeof (frame_metadata->frame));
} }
/* 'less_func' comparison function for frame metadata, used for comparing /* 'less_func' comparison function for frame metadata, used for comparing
the keys of the frame table. Returns true iff the kernel virtual address the keys of the frame table. Returns true iff the kernel virtual address
of the first frame is less than that of the second frame. */ of the first frame is less than that of the second frame. */
bool bool
frame_less (const struct hash_elem *a_, const struct hash_elem *b_, frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED) void *aux UNUSED)
{ {
struct frame_entry *a = struct frame_metadata *a =
hash_entry (a_, struct frame_entry, hash_elem); hash_entry (a_, struct frame_metadata, hash_elem);
struct frame_entry *b = struct frame_metadata *b =
hash_entry (b_, struct frame_entry, hash_elem); hash_entry (b_, struct frame_metadata, hash_elem);
return a->frame < b->frame; return a->frame < b->frame;
} }
static struct frame_entry *
frame_get (void *frame)
{
struct frame_entry fake_frame;
fake_frame.frame = frame;
struct hash_elem *e = hash_find (&frame_table, &fake_frame.hash_elem);
if (e == NULL) return NULL;
return hash_entry (e, struct frame_entry, hash_elem);
}
/* Returns the next recently used element after the one provided, which /* Returns the next recently used element after the one provided, which
is achieved by iterating through lru_list like a circular queue is achieved by iterating through lru_list like a circular queue
(wrapping around the list at the tail). */ (wrapping around the list at the tail). */

View File

@@ -6,8 +6,6 @@
void frame_init (void); void frame_init (void);
void *frame_alloc (enum palloc_flags, void *, struct thread *); void *frame_alloc (enum palloc_flags, void *, struct thread *);
void frame_pin (void *frame);
void frame_unpin (void *frame);
void frame_free (void *frame); void frame_free (void *frame);
#endif /* vm/frame.h */ #endif /* vm/frame.h */

View File

@@ -1,10 +1,12 @@
#include "page.h" #include "page.h"
#include <stdint.h>
#include <string.h> #include <string.h>
#include <stdio.h> #include <stdio.h>
#include "filesys/file.h" #include "filesys/file.h"
#include "threads/pte.h" #include "threads/pte.h"
#include "threads/malloc.h" #include "threads/malloc.h"
#include "threads/palloc.h" #include "threads/palloc.h"
#include "devices/swap.h"
#include "userprog/process.h" #include "userprog/process.h"
#include "userprog/pagedir.h" #include "userprog/pagedir.h"
#include "vm/frame.h" #include "vm/frame.h"
@@ -33,23 +35,66 @@ page_less (const struct hash_elem *a_, const struct hash_elem *b_,
return a->upage < b->upage; return a->upage < b->upage;
} }
/* Allocate and insert a new page entry into the thread's page table. */ static void page_flag_swap (uint32_t *pte, bool set);
static void page_set_swap (struct thread *owner, uint32_t *pte,
size_t swap_slot);
// TODO: Deal with NULL malloc returns
/* Swap out 'owner' process's 'upage' stored at 'kpage'. Then, allocate and
insert a new page entry into the user process thread's SPT representing
this swapped out page. */
struct page_entry * struct page_entry *
page_insert (struct file *file, off_t ofs, void *upage, uint32_t read_bytes, page_insert_swapped (void *upage, void *kpage, struct thread *owner)
uint32_t zero_bytes, bool writable, enum page_type type) {
/* 1. Initialize swapped page entry. */
struct page_entry *page = page_get (upage);
if (page == NULL)
{
page = malloc (sizeof (struct page_entry));
if (page == NULL)
return NULL;
page->upage = upage;
lock_init (&page->lock);
hash_insert (&owner->pages, &page->elem);
}
/* Mark page as 'swapped' and flag the page directory as having
been modified *before* eviction begins to prevent the owner of the
victim page from accessing/modifying it mid-eviction. */
/* TODO: We need to stop the process from destroying pagedir mid-eviction,
as this could render the page table entry invalid. */
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
page_flag_swap (pte, true);
lock_acquire (&page->lock);
pagedir_clear_page (owner->pagedir, upage);
size_t swap_slot = swap_out (kpage);
page_set_swap (owner, pte, swap_slot);
lock_release (&page->lock);
return page;
}
/* Allocate and insert a new page entry into the user process thread's
SPT representing a file page. */
struct page_entry *
page_insert_file (struct file *file, off_t ofs, void *upage,
uint32_t read_bytes, uint32_t zero_bytes, bool writable,
enum page_type type)
{ {
struct page_entry *page = malloc(sizeof (struct page_entry)); struct page_entry *page = malloc(sizeof (struct page_entry));
if (page == NULL) if (page == NULL)
return NULL; return NULL;
page->upage = upage; page->type = type;
page->frame = NULL;
page->file = file; page->file = file;
page->offset = ofs; page->offset = ofs;
page->upage = upage;
page->read_bytes = read_bytes; page->read_bytes = read_bytes;
page->zero_bytes = zero_bytes; page->zero_bytes = zero_bytes;
page->writable = writable; page->writable = writable;
page->type = type; lock_init (&page->lock);
hash_insert (&thread_current ()->pages, &page->elem); hash_insert (&thread_current ()->pages, &page->elem);
return page; return page;
@@ -72,7 +117,7 @@ page_get (void *upage)
} }
bool bool
page_load (struct page_entry *page, bool writable) page_load_file (struct page_entry *page, bool writable)
{ {
/* Allocate a frame for the page. If a frame allocation fails, then /* Allocate a frame for the page. If a frame allocation fails, then
frame_alloc should try to evict a page. If it is still NULL, the OS frame_alloc should try to evict a page. If it is still NULL, the OS
@@ -103,8 +148,6 @@ page_load (struct page_entry *page, bool writable)
/* Zero out the remaining bytes in the frame. */ /* Zero out the remaining bytes in the frame. */
memset (frame + page->read_bytes, 0, page->zero_bytes); memset (frame + page->read_bytes, 0, page->zero_bytes);
page->frame = frame;
/* Mark the page as loaded successfully. */ /* Mark the page as loaded successfully. */
return true; return true;
} }
@@ -114,22 +157,24 @@ page_load (struct page_entry *page, bool writable)
void void
page_cleanup (struct hash_elem *e, void *aux UNUSED) page_cleanup (struct hash_elem *e, void *aux UNUSED)
{ {
struct page_entry *page = hash_entry (e, struct page_entry, elem); free (hash_entry (e, struct page_entry, elem));
if (page->frame != NULL)
frame_free (page->frame);
free (page);
} }
/* Updates the 'owner' thread's page table entry for virtual address 'upage' /* Flags the provided page table entry as representing a swapped out page. */
to flag the page as being stored in swap, and stores the specified swap slot
value in the entry at the address bits for later retrieval from disk. */
void void
page_set_swap (struct thread *owner, void *upage, size_t swap_slot) page_flag_swap (uint32_t *pte, bool set)
{ {
uint32_t *pte = lookup_page (owner->pagedir, upage, false); if (set)
*pte |= (1 << SWAP_FLAG_BIT);
else
*pte &= ~(1 << SWAP_FLAG_BIT);
}
/* Sets the address bits of the page table entry to the provided swap slot
value. To be used for later retrieval of the swap slot when page faulting. */
static void
page_set_swap (struct thread *owner, uint32_t *pte, size_t swap_slot)
{
/* Store the provided swap slot in the address bits of the page table /* Store the provided swap slot in the address bits of the page table
entry, truncating excess bits. */ entry, truncating excess bits. */
*pte |= (1 << SWAP_FLAG_BIT); *pte |= (1 << SWAP_FLAG_BIT);
@@ -151,7 +196,7 @@ page_in_swap (struct thread *owner, void *upage)
/* Given that the page with user address 'upage' owned by 'owner' is flagged /* Given that the page with user address 'upage' owned by 'owner' is flagged
to be in the swap disk via the owner's page table, returns its stored to be in the swap disk via the owner's page table, returns its stored
swap slot. Otherwise panics the kernel. */ swap slot and marks the PTE as not being in swap. */
size_t size_t
page_get_swap (struct thread *owner, void *upage) page_get_swap (struct thread *owner, void *upage)
{ {
@@ -161,5 +206,6 @@ page_get_swap (struct thread *owner, void *upage)
ASSERT ((*pte & PTE_P) == 0); ASSERT ((*pte & PTE_P) == 0);
/* Masks the address bits and returns truncated value. */ /* Masks the address bits and returns truncated value. */
page_flag_swap (pte, false);
return ((*pte & PTE_ADDR) >> ADDR_START_BIT); return ((*pte & PTE_ADDR) >> ADDR_START_BIT);
} }

View File

@@ -2,17 +2,22 @@
#define VM_PAGE_H #define VM_PAGE_H
#include "threads/thread.h" #include "threads/thread.h"
#include "threads/synch.h"
#include "filesys/off_t.h" #include "filesys/off_t.h"
enum page_type { enum page_type {
PAGE_FILE, PAGE_FILE,
PAGE_EMPTY PAGE_MMAP
}; };
struct page_entry { struct page_entry {
enum page_type type; /* Type of Data that should go into the page */ enum page_type type; /* Type of Data that should go into the page */
void *upage; /* Start Address of the User Page (Key of hash table). */ void *upage; /* Start Address of the User Page (Key of hash table). */
void *frame; /* Frame Address where the page is loaded. */
/* Data for swapped pages */
struct lock lock; /* Enforces mutual exclusion in accessing the page
referenced by the entry between its owning process
and any thread performing page eviction. */
/* File Data */ /* File Data */
struct file *file; /* Pointer to the file for executables. */ struct file *file; /* Pointer to the file for executables. */
@@ -27,15 +32,16 @@ struct page_entry {
unsigned page_hash (const struct hash_elem *e, void *aux); unsigned page_hash (const struct hash_elem *e, void *aux);
bool page_less (const struct hash_elem *a_, const struct hash_elem *b_, bool page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux); void *aux);
struct page_entry *page_insert (struct file *file, off_t ofs, void *upage, struct page_entry *page_insert_swapped (void *upage, void* kpage,
struct thread *owner);
struct page_entry *page_insert_file (struct file *file, off_t ofs, void *upage,
uint32_t read_bytes, uint32_t zero_bytes, uint32_t read_bytes, uint32_t zero_bytes,
bool writable, enum page_type type); bool writable, enum page_type);
struct page_entry *page_get (void *upage); struct page_entry *page_get (void *upage);
bool page_load (struct page_entry *page, bool writable); bool page_load_file (struct page_entry *page, bool writable);
void page_cleanup (struct hash_elem *e, void *aux); void page_cleanup (struct hash_elem *e, void *aux);
void page_set_swap (struct thread *, void *, size_t);
bool page_in_swap (struct thread *, void *); bool page_in_swap (struct thread *, void *);
size_t page_get_swap (struct thread *, void *); size_t page_get_swap (struct thread *owner, void *upage);
#endif /* vm/frame.h */ #endif /* vm/frame.h */