Compare commits
17 Commits
vm/memory-
...
vm/stack-g
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5265fed288 | ||
|
|
d8edc6d3fe | ||
|
|
5682974f9d | ||
|
|
6f85d7642d | ||
|
|
13de832586 | ||
|
|
5c661c2e24 | ||
|
|
5f40d83e66 | ||
|
|
4f84a83611 | ||
|
|
c74a8c55aa | ||
|
|
c670c29e47 | ||
|
|
af7f2ba873 | ||
|
|
3ef5264b6e | ||
|
59e7a64f8e
|
|||
|
cf4bf90cbb
|
|||
|
9a6abab95e
|
|||
|
44f6a85163
|
|||
|
83e044cf68
|
@@ -63,6 +63,7 @@ userprog_SRC += userprog/tss.c # TSS management.
|
||||
|
||||
# Virtual memory code.
|
||||
vm_SRC += vm/frame.c # Frame table manager.
|
||||
vm_SRC += vm/page.c # Page table manager.
|
||||
vm_SRC += devices/swap.c # Swap block manager.
|
||||
#vm_SRC = vm/file.c # Some other file.
|
||||
|
||||
|
||||
@@ -143,6 +143,8 @@ struct thread
|
||||
struct hash open_files; /* Hash Table of FD -> Struct File. */
|
||||
#endif
|
||||
|
||||
void *curr_esp;
|
||||
|
||||
/* Owned by thread.c. */
|
||||
unsigned magic; /* Detects stack overflow. */
|
||||
};
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# -*- makefile -*-
|
||||
|
||||
kernel.bin: DEFINES = -DUSERPROG -DFILESYS
|
||||
KERNEL_SUBDIRS = threads devices lib lib/kernel userprog filesys
|
||||
kernel.bin: DEFINES = -DUSERPROG -DFILESYS -DVM
|
||||
KERNEL_SUBDIRS = threads devices lib lib/kernel userprog filesys vm
|
||||
TEST_SUBDIRS = tests/userprog tests/userprog/no-vm tests/filesys/base
|
||||
GRADING_FILE = $(SRCDIR)/tests/userprog/Grading
|
||||
SIMULATOR = --qemu
|
||||
|
||||
@@ -2,8 +2,15 @@
|
||||
#include <inttypes.h>
|
||||
#include <stdio.h>
|
||||
#include "userprog/gdt.h"
|
||||
#include "userprog/pagedir.h"
|
||||
#include "userprog/process.h"
|
||||
#include "threads/interrupt.h"
|
||||
#include "threads/palloc.h"
|
||||
#include "threads/thread.h"
|
||||
#include "threads/vaddr.h"
|
||||
|
||||
#define MAX_STACK_SIZE (8 * 1024 * 1024) // 8MB
|
||||
#define MAX_STACK_OFFSET 32 // 32 bytes offset below stack pointer (ESP)
|
||||
|
||||
/* Number of page faults processed. */
|
||||
static long long page_fault_cnt;
|
||||
@@ -11,6 +18,9 @@ static long long page_fault_cnt;
|
||||
static void kill (struct intr_frame *);
|
||||
static void page_fault (struct intr_frame *);
|
||||
|
||||
static bool is_valid_stack_access (const void *fault_addr, const void *esp);
|
||||
static bool grow_stack (void *upage);
|
||||
|
||||
/* Registers handlers for interrupts that can be caused by user
|
||||
programs.
|
||||
|
||||
@@ -145,6 +155,27 @@ page_fault (struct intr_frame *f)
|
||||
write = (f->error_code & PF_W) != 0;
|
||||
user = (f->error_code & PF_U) != 0;
|
||||
|
||||
if (!user || !not_present)
|
||||
{
|
||||
f->eip = (void *)f->eax;
|
||||
f->eax = 0xffffffff;
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the fault address is in a user page that is not present, then it might
|
||||
be just that the stack needs to grow. So we attempt to grow the stack. */
|
||||
void *upage = pg_round_down (fault_addr);
|
||||
if (not_present && is_user_vaddr (upage) && upage != NULL)
|
||||
{
|
||||
if (is_valid_stack_access (fault_addr, f->esp))
|
||||
{
|
||||
if (grow_stack (upage))
|
||||
return;
|
||||
}
|
||||
|
||||
/* TODO: Check SPT for the page. */
|
||||
}
|
||||
|
||||
/* To implement virtual memory, delete the rest of the function
|
||||
body, and replace it with code that brings in the page to
|
||||
which fault_addr refers. */
|
||||
@@ -156,3 +187,50 @@ page_fault (struct intr_frame *f)
|
||||
kill (f);
|
||||
}
|
||||
|
||||
/* Validates whether the fault address is a valid stack access. Access is a
|
||||
valid stack access under the following two conditions:
|
||||
1. The fault address must be within MAX_STACK_OFFSET (32) bytes below
|
||||
the current stack pointer. (Accounts for both PUSH and PUSHA instructions)
|
||||
2. Growing this stack to this address does not cause it to exceed the
|
||||
MAX_STACK_SIZE (8MB) limit.
|
||||
|
||||
Returns true if both conditions are met, false otherwise.
|
||||
|
||||
Pre: fault_addr is a valid user virtual address (so also not NULL). */
|
||||
static bool
|
||||
is_valid_stack_access (const void *fault_addr, const void *esp)
|
||||
{
|
||||
uint32_t new_stack_size = PHYS_BASE - pg_round_down (fault_addr);
|
||||
|
||||
uint32_t *lowest_valid_push_addr = (uint32_t *)esp - MAX_STACK_OFFSET;
|
||||
bool is_within_push_range = (uint32_t *)fault_addr >= lowest_valid_push_addr;
|
||||
|
||||
return is_within_push_range && new_stack_size <= MAX_STACK_SIZE;
|
||||
}
|
||||
|
||||
/* Attempts to grow the stack by allocating and mapping a new page.
|
||||
This involves:
|
||||
1. Allocating a zeroed page from the user pool
|
||||
2. Installing it into the page table with write permissions
|
||||
|
||||
Returns true if the stack was successfully grown, false if either
|
||||
allocation or installation fails.
|
||||
|
||||
Pre: upage is a valid page-aligned address (so also not NULL). */
|
||||
static bool
|
||||
grow_stack (void *upage)
|
||||
{
|
||||
/* Allocate new page for stack */
|
||||
void *kpage = palloc_get_page (PAL_USER | PAL_ZERO);
|
||||
if (kpage == NULL)
|
||||
return false;
|
||||
|
||||
/* Install the page into user page table */
|
||||
if (!install_page (upage, kpage, true))
|
||||
{
|
||||
palloc_free_page (kpage);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -116,9 +116,9 @@ process_execute (const char *cmd)
|
||||
return tid;
|
||||
}
|
||||
|
||||
static void *get_usr_kpage (enum palloc_flags flags);
|
||||
static void *get_usr_kpage (enum palloc_flags flags, void *upage);
|
||||
static void free_usr_kpage (void *kpage);
|
||||
static bool install_page (void *upage, void *kpage, bool writable);
|
||||
bool install_page (void *upage, void *kpage, bool writable);
|
||||
|
||||
static bool process_init_stack (char *cmd_saveptr, void **esp, char *file_name);
|
||||
static void *push_to_stack (void **esp, void *data, size_t data_size);
|
||||
@@ -257,12 +257,13 @@ process_init_stack (char *cmd_saveptr, void **esp, char *file_name)
|
||||
int pages_needed = DIV_CEIL (overflow_bytes, PGSIZE);
|
||||
|
||||
/* Allocate the pages and map them to the user process. */
|
||||
void *upage;
|
||||
uint8_t *kpage;
|
||||
for (int i = 1; i < pages_needed + 1; i++)
|
||||
{
|
||||
uint8_t *kpage = get_usr_kpage (PAL_ZERO);
|
||||
if (!install_page (((uint8_t *) PHYS_BASE) - PGSIZE * (i + 1),
|
||||
kpage, true))
|
||||
return false;
|
||||
upage = ((uint8_t *) PHYS_BASE) - PGSIZE * (i + 1);
|
||||
kpage = get_usr_kpage (PAL_ZERO, upage);
|
||||
if (!install_page (upage, kpage, true)) return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -710,7 +711,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
|
||||
if (kpage == NULL){
|
||||
|
||||
/* Get a new page of memory. */
|
||||
kpage = get_usr_kpage (0);
|
||||
kpage = get_usr_kpage (0, upage);
|
||||
if (kpage == NULL){
|
||||
return false;
|
||||
}
|
||||
@@ -753,10 +754,12 @@ setup_stack (void **esp)
|
||||
uint8_t *kpage;
|
||||
bool success = false;
|
||||
|
||||
kpage = get_usr_kpage (PAL_ZERO);
|
||||
void *upage = ((uint8_t *) PHYS_BASE) - PGSIZE;
|
||||
|
||||
kpage = get_usr_kpage (PAL_ZERO, upage);
|
||||
if (kpage != NULL)
|
||||
{
|
||||
success = install_page (((uint8_t *) PHYS_BASE) - PGSIZE, kpage, true);
|
||||
success = install_page (upage, kpage, true);
|
||||
if (success)
|
||||
*esp = PHYS_BASE;
|
||||
else
|
||||
@@ -765,14 +768,20 @@ setup_stack (void **esp)
|
||||
return success;
|
||||
}
|
||||
|
||||
/* Claims a page from the user pool and returns its kernel address,
|
||||
updating the frame table if VM is enabled. */
|
||||
/* Claims a page from the user pool for ownership by the current thread
|
||||
and returns its kernel address, updating the frame table if VM
|
||||
is enabled. Requires the intended virtual address for where the page
|
||||
will be installed. */
|
||||
static void *
|
||||
get_usr_kpage (enum palloc_flags flags)
|
||||
get_usr_kpage (enum palloc_flags flags, void *upage)
|
||||
{
|
||||
void *page;
|
||||
#ifdef VM
|
||||
page = frame_alloc (flags);
|
||||
struct thread *t = thread_current ();
|
||||
if (pagedir_get_page (t->pagedir, upage) != NULL)
|
||||
return NULL;
|
||||
else
|
||||
page = frame_alloc (flags, upage, t);
|
||||
#else
|
||||
page = palloc_get_page (flags | PAL_USER);
|
||||
#endif
|
||||
@@ -800,7 +809,7 @@ free_usr_kpage (void *kpage)
|
||||
with palloc_get_page().
|
||||
Returns true on success, false if UPAGE is already mapped or
|
||||
if memory allocation fails. */
|
||||
static bool
|
||||
bool
|
||||
install_page (void *upage, void *kpage, bool writable)
|
||||
{
|
||||
struct thread *t = thread_current ();
|
||||
|
||||
@@ -8,4 +8,6 @@ int process_wait (tid_t);
|
||||
void process_exit (void);
|
||||
void process_activate (void);
|
||||
|
||||
bool install_page (void *upage, void *kpage, bool writable);
|
||||
|
||||
#endif /* userprog/process.h */
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include "userprog/process.h"
|
||||
#include "userprog/pagedir.h"
|
||||
#include <stdio.h>
|
||||
#include <stdbool.h>
|
||||
#include <syscall-nr.h>
|
||||
|
||||
#define MAX_SYSCALL_ARGS 3
|
||||
@@ -46,8 +47,11 @@ static unsigned syscall_tell (int fd);
|
||||
static void syscall_close (int fd);
|
||||
|
||||
static struct open_file *fd_get_file (int fd);
|
||||
static void validate_user_pointer (const void *start, size_t size);
|
||||
static void validate_user_string (const char *str);
|
||||
static void validate_user_pointer (const void *ptr, size_t size,
|
||||
bool check_write);
|
||||
static void validate_user_string (const char *str, bool check_write);
|
||||
static int get_user (const uint8_t *);
|
||||
static bool put_user (uint8_t *, uint8_t);
|
||||
|
||||
/* A struct defining a syscall_function pointer along with its arity. */
|
||||
struct syscall_arguments
|
||||
@@ -96,8 +100,9 @@ static void
|
||||
syscall_handler (struct intr_frame *f)
|
||||
{
|
||||
/* First, read the system call number from the stack. */
|
||||
validate_user_pointer (f->esp, sizeof (uintptr_t));
|
||||
validate_user_pointer (f->esp, sizeof (uintptr_t), false);
|
||||
uintptr_t syscall_number = *(int *)f->esp;
|
||||
thread_current ()->curr_esp = f->esp;
|
||||
|
||||
/* Ensures the number corresponds to a system call that can be handled. */
|
||||
if (syscall_number >= LOOKUP_SIZE)
|
||||
@@ -107,8 +112,7 @@ syscall_handler (struct intr_frame *f)
|
||||
|
||||
/* Next, read and copy the arguments from the stack pointer. */
|
||||
validate_user_pointer (f->esp + sizeof (uintptr_t),
|
||||
syscall.arity * sizeof (uintptr_t));
|
||||
|
||||
syscall.arity * sizeof (uintptr_t), false);
|
||||
uintptr_t args[MAX_SYSCALL_ARGS] = { 0 };
|
||||
for (int i = 0; i < syscall.arity && i < MAX_SYSCALL_ARGS; i++)
|
||||
args[i] = *(uintptr_t *)(f->esp + sizeof (uintptr_t) * (i + 1));
|
||||
@@ -140,8 +144,7 @@ syscall_exit (int status)
|
||||
static pid_t
|
||||
syscall_exec (const char *cmd_line)
|
||||
{
|
||||
/* Validate the user string before executing the process. */
|
||||
validate_user_string (cmd_line);
|
||||
validate_user_string (cmd_line, false);
|
||||
|
||||
return process_execute (cmd_line); /* Returns the PID of the new process */
|
||||
}
|
||||
@@ -160,8 +163,7 @@ syscall_wait (pid_t pid)
|
||||
static bool
|
||||
syscall_create (const char *file, unsigned initial_size)
|
||||
{
|
||||
/* Validate the user string before creating the file. */
|
||||
validate_user_string (file);
|
||||
validate_user_string (file, false);
|
||||
|
||||
/* Acquire the file system lock to prevent race conditions. */
|
||||
lock_acquire (&filesys_lock);
|
||||
@@ -178,8 +180,7 @@ syscall_create (const char *file, unsigned initial_size)
|
||||
static bool
|
||||
syscall_remove (const char *file)
|
||||
{
|
||||
/* Validate the user string before removing the file. */
|
||||
validate_user_string (file);
|
||||
validate_user_string (file, false);
|
||||
|
||||
/* Acquire the file system lock to prevent race conditions. */
|
||||
lock_acquire (&filesys_lock);
|
||||
@@ -197,8 +198,7 @@ syscall_remove (const char *file)
|
||||
static int
|
||||
syscall_open (const char *file)
|
||||
{
|
||||
/* Validate the user string before opening the file. */
|
||||
validate_user_string (file);
|
||||
validate_user_string (file, false);
|
||||
|
||||
/* Acquire the file system lock to prevent race conditions. */
|
||||
lock_acquire (&filesys_lock);
|
||||
@@ -264,8 +264,7 @@ syscall_read (int fd, void *buffer, unsigned size)
|
||||
if (fd < STDIN_FILENO || fd == STDOUT_FILENO)
|
||||
return EXIT_FAILURE;
|
||||
|
||||
/* Validate the user buffer for the provided size before reading. */
|
||||
validate_user_pointer (buffer, size);
|
||||
validate_user_pointer (buffer, size, true);
|
||||
|
||||
if (fd == STDIN_FILENO)
|
||||
{
|
||||
@@ -308,8 +307,7 @@ syscall_write (int fd, const void *buffer, unsigned size)
|
||||
if (fd <= 0)
|
||||
return 0;
|
||||
|
||||
/* Validate the user buffer for the provided size before writing. */
|
||||
validate_user_pointer (buffer, size);
|
||||
validate_user_pointer (buffer, size, false);
|
||||
|
||||
if (fd == STDOUT_FILENO)
|
||||
{
|
||||
@@ -451,63 +449,91 @@ fd_get_file (int fd)
|
||||
return hash_entry (e, struct open_file, elem);
|
||||
}
|
||||
|
||||
/* Validates if a block of memory starting at START and of size SIZE bytes is
|
||||
fully contained within user virtual memory. Kills the thread (by exiting with
|
||||
failure) if the memory is invalid. Otherwise, returns (nothing) normally.
|
||||
If the size is 0, the function does no checks and returns the given ptr. */
|
||||
/* Validates if a block of memory starting at PTR and of size SIZE bytes is
|
||||
fully contained within valid user virtual memory. thread_exit () if the
|
||||
memory is invalid.
|
||||
If the size is 0, the function does no checks and returns PTR. */
|
||||
static void
|
||||
validate_user_pointer (const void *start, size_t size)
|
||||
validate_user_pointer (const void *ptr, size_t size, bool check_write)
|
||||
{
|
||||
/* If the size is 0, we do not need to check anything. */
|
||||
if (size == 0)
|
||||
return;
|
||||
|
||||
const void *end = start + size - 1;
|
||||
|
||||
/* Check if the start and end pointers are valid user virtual addresses. */
|
||||
if (start == NULL || !is_user_vaddr (start) || !is_user_vaddr (end))
|
||||
/* ptr < ptr + size - 1, so sufficient to check that (ptr + size -1) is a
|
||||
valid user virtual memory address. */
|
||||
void *last = ptr + size - 1;
|
||||
if (!is_user_vaddr (last))
|
||||
syscall_exit (EXIT_FAILURE);
|
||||
|
||||
/* We now need to check if the entire memory block is mapped to physical
|
||||
memory by the page table. */
|
||||
for (const void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
|
||||
if (pagedir_get_page (thread_current ()->pagedir, ptr) == NULL)
|
||||
ptr = pg_round_down (ptr);
|
||||
while (ptr <= last)
|
||||
{
|
||||
int result;
|
||||
/* Check read access to pointer. */
|
||||
if ((result = get_user (ptr)) == -1)
|
||||
syscall_exit (EXIT_FAILURE);
|
||||
/* Check write access to pointer (if required). */
|
||||
if (check_write && !put_user (ptr, result))
|
||||
syscall_exit (EXIT_FAILURE);
|
||||
ptr += PGSIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Validates if a string is fully contained within user virtual memory. Kills
|
||||
the thread (by exiting with failure) if the memory is invalid. Otherwise,
|
||||
returns (nothing) normally. */
|
||||
/* Validates of a C-string starting at ptr is fully contained within valid
|
||||
user virtual memory. thread_exit () if the memory is invalid. */
|
||||
static void
|
||||
validate_user_string (const char *str)
|
||||
validate_user_string (const char *ptr, bool check_write)
|
||||
{
|
||||
/* Check if the string pointer is a valid user virtual address. */
|
||||
if (str == NULL || !is_user_vaddr (str))
|
||||
syscall_exit (EXIT_FAILURE);
|
||||
size_t offset = (uintptr_t) ptr % PGSIZE;
|
||||
|
||||
/* Calculate the offset of the string within the (first) page. */
|
||||
size_t offset = (uintptr_t) str % PGSIZE;
|
||||
|
||||
/* We move page by page, checking if the page is mapped to physical memory. */
|
||||
for (;;)
|
||||
{
|
||||
void *page = pg_round_down (str);
|
||||
void *page = pg_round_down (ptr);
|
||||
|
||||
/* If we reach addresses that are not mapped to physical memory before the
|
||||
end of the string, the thread is terminated. */
|
||||
if (!is_user_vaddr(page) ||
|
||||
pagedir_get_page (thread_current ()->pagedir, page) == NULL)
|
||||
if (!is_user_vaddr (page))
|
||||
syscall_exit (EXIT_FAILURE);
|
||||
if (!is_user_vaddr (ptr))
|
||||
syscall_exit (EXIT_FAILURE);
|
||||
int result;
|
||||
if ((result = get_user ((const uint8_t *)ptr)) == -1)
|
||||
syscall_exit (EXIT_FAILURE);
|
||||
if (check_write && !put_user ((uint8_t *)ptr, result))
|
||||
syscall_exit (EXIT_FAILURE);
|
||||
|
||||
while (offset < PGSIZE)
|
||||
{
|
||||
if (*str == '\0')
|
||||
if (*ptr == '\0')
|
||||
return; /* We reached the end of the string without issues. */
|
||||
|
||||
str++;
|
||||
ptr++;
|
||||
offset++;
|
||||
}
|
||||
|
||||
offset = 0; /* Next page will start at the beginning. */
|
||||
offset = 0;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/* PROVIDED BY SPEC.
|
||||
Reads a byte at user virtual address UADDR.
|
||||
UADDR must be below PHYS_BASE.
|
||||
Returns the byte value if successful, -1 if a segfault occurred. */
|
||||
static int
|
||||
get_user (const uint8_t *uaddr)
|
||||
{
|
||||
int result;
|
||||
asm ("movl $1f, %0; movzbl %1, %0; 1:" : "=&a"(result) : "m"(*uaddr));
|
||||
return result;
|
||||
}
|
||||
|
||||
/* PROVIDED BY SPEC.
|
||||
Writes BYTE to user address UDST.
|
||||
UDST must be below PHYS_BASE.
|
||||
Returns true if successful, false if a segfault occurred. */
|
||||
static bool
|
||||
put_user (uint8_t *udst, uint8_t byte)
|
||||
{
|
||||
int error_code;
|
||||
asm ("movl $1f, %0; movb %b2, %1; 1:"
|
||||
: "=&a"(error_code), "=m"(*udst)
|
||||
: "q"(byte));
|
||||
return error_code != -1;
|
||||
}
|
||||
187
src/vm/frame.c
187
src/vm/frame.c
@@ -1,34 +1,42 @@
|
||||
#include <debug.h>
|
||||
#include <hash.h>
|
||||
#include <list.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "frame.h"
|
||||
#include "page.h"
|
||||
#include "threads/malloc.h"
|
||||
#include "threads/vaddr.h"
|
||||
#include "userprog/pagedir.h"
|
||||
#include "threads/synch.h"
|
||||
#include "devices/swap.h"
|
||||
|
||||
/* Hash table that maps every active frame's kernel virtual address
|
||||
to its corresponding 'frame_metadata'.*/
|
||||
struct hash frame_table;
|
||||
|
||||
/* Linked list of frame_metadata whose pages are predicted to currently
|
||||
be in the working set of a process. They are not considered for
|
||||
eviction, but are considered for demotion to the 'inactive' list. */
|
||||
struct list active_list;
|
||||
/* Linked list used to represent the circular queue in the 'clock'
|
||||
algorithm for page eviction. Iterating from the element that is
|
||||
currently pointed at by 'next_victim' yields an ordering of the entries
|
||||
from oldest to newest (in terms of when they were added or checked
|
||||
for having been referenced by a process). */
|
||||
struct list lru_list;
|
||||
|
||||
/* Linked list of frame_metadata whose pages are predicted to leave the
|
||||
working set of their processes soon, so are considered for eviction.
|
||||
Pages are considered for eviction from the tail end, and are initially
|
||||
demoted to 'inactive' at the head. */
|
||||
struct list inactive_list;
|
||||
/* The next element in lru_list to be considered for eviction (oldest added
|
||||
or referenced page in the circular queue). If this page has has an
|
||||
'accessed' bit of 0 when considering eviction, then it will be the next
|
||||
victim. Otherwise, the next element in the queue is similarly considered. */
|
||||
struct list_elem *next_victim = NULL;
|
||||
|
||||
/* Synchronisation variables. */
|
||||
/* Ensures mutual exclusion to accessing the 'head' and first element of
|
||||
'inactive_list', which is accessed every time a frame is allocated. */
|
||||
struct lock inactive_head_lock;
|
||||
/* Protects access to 'lru_list'. */
|
||||
struct lock lru_lock;
|
||||
|
||||
struct frame_metadata
|
||||
{
|
||||
void *frame; /* The kernel virtual address holding the frame. */
|
||||
void *upage; /* The user virtual address pointing to the frame. */
|
||||
struct thread *owner; /* Pointer to the thread that owns the frame. */
|
||||
struct hash_elem hash_elem; /* Tracks the position of the frame metadata
|
||||
within 'frame_table', whose key is the
|
||||
kernel virtual address of the frame. */
|
||||
@@ -40,56 +48,102 @@ struct frame_metadata
|
||||
hash_hash_func frame_metadata_hash;
|
||||
hash_less_func frame_metadata_less;
|
||||
|
||||
static struct list_elem *lru_next (struct list_elem *e);
|
||||
static struct list_elem *lru_prev (struct list_elem *e);
|
||||
static struct frame_metadata *get_victim (void);
|
||||
|
||||
/* Initialize the frame system by initializing the frame (hash) table with
|
||||
the frame_metadata hashing and comparison functions, as well as initializing
|
||||
the active & inactive lists. Also initializes the system's synchronisation
|
||||
primitives. */
|
||||
'lru_list' and its associated synchronisation primitives. */
|
||||
void
|
||||
frame_init (void)
|
||||
{
|
||||
hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
|
||||
list_init (&active_list);
|
||||
list_init (&inactive_list);
|
||||
|
||||
lock_init (&inactive_head_lock);
|
||||
list_init (&lru_list);
|
||||
lock_init (&lru_lock);
|
||||
}
|
||||
|
||||
/* TODO: Consider synchronisation more closely (i.e. just for hash
|
||||
table). */
|
||||
/* Attempt to allocate a frame for a user process, either by direct
|
||||
allocation of a user page if there is sufficient RAM, or by
|
||||
evicting a currently active page if memory allocated for user
|
||||
processes is fulled and storing it in swap. If swap is full in
|
||||
the former case, panic the kernel. */
|
||||
void *
|
||||
frame_alloc (enum palloc_flags flags)
|
||||
frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||
{
|
||||
struct frame_metadata *frame_metadata;
|
||||
flags |= PAL_USER;
|
||||
|
||||
lock_acquire (&lru_lock);
|
||||
void *frame = palloc_get_page (flags);
|
||||
|
||||
/* If a frame couldn't be allocated we must be out of main memory. Thus,
|
||||
obtain a victim page to replace with our page, and swap the victim
|
||||
into disk. */
|
||||
if (frame == NULL)
|
||||
{
|
||||
/* TODO: Find victim page to replace, and swap it with this new page. */
|
||||
return NULL;
|
||||
/* 1. Obtain victim. */
|
||||
if (next_victim == NULL)
|
||||
PANIC ("Couldn't allocate a single page to main memory!\n");
|
||||
|
||||
struct frame_metadata *victim = get_victim ();
|
||||
ASSERT (victim != NULL); /* get_victim () should never return null. */
|
||||
|
||||
/* 2. Swap out victim into disk. */
|
||||
size_t swap_slot = swap_out (victim->frame);
|
||||
page_set_swap (victim->owner, victim->upage, swap_slot);
|
||||
|
||||
/* If zero flag is set, zero out the victim page. */
|
||||
if (flags & PAL_ZERO)
|
||||
memset (victim->frame, 0, PGSIZE);
|
||||
|
||||
/* 3. Indicate that the new frame's metadata will be stored
|
||||
inside the same structure that stored the victim's metadata.
|
||||
As both the new frame and the victim frame share the same kernel
|
||||
virtual address, the hash map need not be updated, and neither
|
||||
the list_elem value as both share the same lru_list position. */
|
||||
frame_metadata = victim;
|
||||
}
|
||||
|
||||
struct frame_metadata *frame_metadata =
|
||||
malloc (sizeof (struct frame_metadata));
|
||||
/* If sufficient main memory allows the frame to be directly allocated,
|
||||
we must update the frame table with a new entry, and grow lru_list. */
|
||||
else
|
||||
{
|
||||
/* Must own lru_lock here, as otherwise there is a race condition
|
||||
with next_victim either being NULL or uninitialized. */
|
||||
frame_metadata = malloc (sizeof (struct frame_metadata));
|
||||
frame_metadata->frame = frame;
|
||||
|
||||
/* Newly faulted pages begin at the head of the inactive list. */
|
||||
lock_acquire (&inactive_head_lock);
|
||||
list_push_front (&inactive_list, &frame_metadata->list_elem);
|
||||
lock_release (&inactive_head_lock);
|
||||
/* Newly allocated frames are pushed to the back of the circular queue
|
||||
represented by lru_list. Must explicitly handle the case where the
|
||||
circular queue is empty (when next_victim == NULL). */
|
||||
if (next_victim == NULL)
|
||||
{
|
||||
list_push_back (&lru_list, &frame_metadata->list_elem);
|
||||
next_victim = &frame_metadata->list_elem;
|
||||
}
|
||||
else
|
||||
{
|
||||
struct list_elem *lru_tail = lru_prev (next_victim);
|
||||
list_insert (lru_tail, &frame_metadata->list_elem);
|
||||
}
|
||||
|
||||
/* Finally, insert frame metadata within the frame table, with the key as its
|
||||
allocated kernel address. */
|
||||
hash_replace (&frame_table, &frame_metadata->hash_elem);
|
||||
hash_insert (&frame_table, &frame_metadata->hash_elem);
|
||||
}
|
||||
|
||||
return frame;
|
||||
frame_metadata->upage = upage;
|
||||
frame_metadata->owner = owner;
|
||||
lock_release (&lru_lock);
|
||||
|
||||
return frame_metadata->frame;
|
||||
}
|
||||
|
||||
/* Attempt to deallocate a frame for a user process by removing it from the
|
||||
frame table as well as active/inactive list, and freeing the underlying
|
||||
page memory. Panics if the frame isn't active in memory. */
|
||||
frame table as well as lru_list, and freeing the underlying page
|
||||
memory & metadata struct. Panics if the frame isn't active in memory. */
|
||||
void
|
||||
frame_free (void *frame)
|
||||
{
|
||||
@@ -98,17 +152,58 @@ frame_free (void *frame)
|
||||
|
||||
struct hash_elem *e =
|
||||
hash_delete (&frame_table, &key_metadata.hash_elem);
|
||||
if (e == NULL) PANIC ("Attempted to free a frame without a corresponding "
|
||||
"kernel address!\n");
|
||||
if (e == NULL) PANIC ("Attempted to free a frame at kernel address %p, "
|
||||
"but this address is not allocated!\n", frame);
|
||||
|
||||
struct frame_metadata *frame_metadata =
|
||||
hash_entry (e, struct frame_metadata, hash_elem);
|
||||
|
||||
lock_acquire (&lru_lock);
|
||||
list_remove (&frame_metadata->list_elem);
|
||||
|
||||
/* If we're freeing the frame marked as the next victim, update
|
||||
next_victim to either be the next least recently used page, or NULL
|
||||
if no pages are loaded in main memory. */
|
||||
if (&frame_metadata->list_elem == next_victim)
|
||||
{
|
||||
if (list_empty (&lru_list))
|
||||
next_victim = NULL;
|
||||
else
|
||||
next_victim = lru_next (next_victim);
|
||||
}
|
||||
lock_release (&lru_lock);
|
||||
|
||||
free (frame_metadata);
|
||||
palloc_free_page (frame);
|
||||
}
|
||||
|
||||
/* TODO: Account for page aliases when checking accessed bit. */
|
||||
/* A pre-condition for calling this function is that the calling thread
|
||||
owns lru_lock and that lru_list is non-empty. */
|
||||
static struct frame_metadata *
|
||||
get_victim (void)
|
||||
{
|
||||
struct list_elem *e = next_victim;
|
||||
struct frame_metadata *frame_metadata;
|
||||
uint32_t *pd;
|
||||
void *upage;
|
||||
for (;;)
|
||||
{
|
||||
frame_metadata = list_entry (e, struct frame_metadata, list_elem);
|
||||
pd = frame_metadata->owner->pagedir;
|
||||
upage = frame_metadata->upage;
|
||||
e = lru_next (e);
|
||||
|
||||
if (!pagedir_is_accessed (pd, upage))
|
||||
break;
|
||||
|
||||
pagedir_set_accessed (pd, upage, false);
|
||||
}
|
||||
|
||||
next_victim = e;
|
||||
return frame_metadata;
|
||||
}
|
||||
|
||||
/* Hash function for frame metadata, used for storing entries in the
|
||||
frame table. */
|
||||
unsigned
|
||||
@@ -135,3 +230,27 @@ frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_,
|
||||
return a->frame < b->frame;
|
||||
}
|
||||
|
||||
/* Returns the next recently used element after the one provided, which
|
||||
is achieved by iterating through lru_list like a circular queue
|
||||
(wrapping around the list at the tail). */
|
||||
static struct list_elem *
|
||||
lru_next (struct list_elem *e)
|
||||
{
|
||||
if (!list_empty (&lru_list) && e == list_back (&lru_list))
|
||||
return list_front (&lru_list);
|
||||
|
||||
return list_next (e);
|
||||
}
|
||||
|
||||
/* Returns the previous recently used element after the one provided, which
|
||||
is achieved by iterating through lru_list like a circular queue
|
||||
(wrapping around the list at the head). */
|
||||
static struct list_elem *
|
||||
lru_prev (struct list_elem *e)
|
||||
{
|
||||
if (!list_empty (&lru_list) && e == list_front (&lru_list))
|
||||
return list_back (&lru_list);
|
||||
|
||||
return list_prev (e);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
#ifndef VM_FRAME_H
|
||||
#define VM_FRAME_H
|
||||
|
||||
#include "threads/thread.h"
|
||||
#include "threads/palloc.h"
|
||||
|
||||
void frame_init (void);
|
||||
void *frame_alloc (enum palloc_flags);
|
||||
void *frame_alloc (enum palloc_flags, void *, struct thread *);
|
||||
void frame_free (void *frame);
|
||||
|
||||
#endif /* vm/frame.h */
|
||||
|
||||
20
src/vm/page.c
Normal file
20
src/vm/page.c
Normal file
@@ -0,0 +1,20 @@
|
||||
#include "page.h"
|
||||
|
||||
/* Updates the 'owner' thread's page table entry for virtual address 'upage'
|
||||
to have a present bit of 0 and stores the specified swap slot value in the
|
||||
entry for later retrieval from disk. */
|
||||
void
|
||||
page_set_swap (struct thread *owner, void *upage, size_t swap_slot)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
/* Given that the page with user address 'upage' owned by 'owner' is flagged
|
||||
to be in the swap disk via the owner's page table, returns its stored
|
||||
swap slot. Otherwise panics the kernel. */
|
||||
size_t
|
||||
page_get_swap (struct thread *owner, void *upage)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
9
src/vm/page.h
Normal file
9
src/vm/page.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#ifndef VM_PAGE_H
|
||||
#define VM_PAGE_H
|
||||
|
||||
#include "threads/thread.h"
|
||||
|
||||
void page_set_swap (struct thread *, void *, size_t);
|
||||
size_t page_get_swap (struct thread *, void *);
|
||||
|
||||
#endif /* vm/frame.h */
|
||||
Reference in New Issue
Block a user