Compare commits

..

22 Commits

Author SHA1 Message Date
sBubshait
5265fed288 Refactor stack growth to be helper functions in exception for easier merging 2024-12-05 00:27:40 +00:00
Demetriades, Themis
d8edc6d3fe Merge branch 'virtual-memory' into 'ethan-stack-growth'
# Conflicts:
#   src/Makefile.build
2024-11-30 23:21:16 +00:00
Demetriades, Themis
5682974f9d Merge branch 'vm/supplemental-page-table' into 'master'
Implement frame table & page eviction algorithm

See merge request lab2425_autumn/pintos_22!53
2024-11-30 23:01:04 +00:00
Themis Demetriades
6f85d7642d feat: implement clock (second-chance) page eviction algorithm 2024-11-30 22:40:13 +00:00
EDiasAlberto
13de832586 Refactor stack growth code to remove messy conditions 2024-11-29 23:52:05 +00:00
EDiasAlberto
5c661c2e24 Feat: pointer validation checks string across multiple pages and handle kernel page faults 2024-11-29 23:49:49 +00:00
EDiasAlberto
5f40d83e66 Implement MMU-based user memory validation 2024-11-29 23:03:31 +00:00
EDiasAlberto
4f84a83611 Refactor: abstract new page allocation to one general function and make helper functions static 2024-11-27 19:41:22 +00:00
EDiasAlberto
c74a8c55aa Implement stack growth for system calls and add stack pointer tracking to thread 2024-11-27 19:21:43 +00:00
EDiasAlberto
c670c29e47 update stack growth header to fit virtual memory naming format 2024-11-27 18:57:20 +00:00
Themis Demetriades
ea2725f606 feat: implement frame table without thread safety 2024-11-26 15:17:11 +00:00
EDiasAlberto
af7f2ba873 Fix: Magic number in stackgrowth.c 2024-11-26 04:54:00 +00:00
EDiasAlberto
3ef5264b6e feat: allow stack to grow for process up to 8MB in size 2024-11-26 04:43:25 +00:00
Demetriades, Themis
605050e38d Merge branch 'code-review-2-changes' into 'master'
fix: code review 2 changes

See merge request lab2425_autumn/pintos_22!52
2024-11-24 16:52:48 +00:00
6225a2eb8b fix: ignore failing tests for now 2024-11-24 16:22:13 +00:00
aedb72246b fix: do not acquire filesys_lock for tell and seek 2024-11-24 15:41:18 +00:00
e1f0258f8e fix: handle malloc result in init_process_result 2024-11-24 15:09:32 +00:00
59e7a64f8e Only check user pages rather than all bytes in-between, for known-size pointers 2024-11-12 15:48:22 +00:00
cf4bf90cbb Implement user pointer checking for C strings 2024-11-12 15:34:45 +00:00
9a6abab95e Check access to user memory using page fault method (via get_user and put_user). 2024-11-12 15:00:16 +00:00
44f6a85163 Add get_user and put_user provided by spec. 2024-11-12 14:50:53 +00:00
83e044cf68 Let kernel handle its own page faults 2024-11-12 14:50:53 +00:00
14 changed files with 537 additions and 91 deletions

View File

@@ -16,18 +16,13 @@ stages:
script:
- cd src/$DIR
- make check | tee build.log
- grep -q "FAIL tests/$DIR" build.log && exit 1 || exit 0
- grep -vE "^FAIL $IGNORE\$" build.log | grep -q "FAIL tests/$DIR" && exit 1 || exit 0
test_devices:
extends: .pintos_tests
variables:
DIR: devices
test_filesys:
extends: .pintos_tests
variables:
DIR: filesys
test_threads:
extends: .pintos_tests
variables:
@@ -42,3 +37,4 @@ test_vm:
extends: .pintos_tests
variables:
DIR: vm
IGNORE: (tests/vm/pt-grow-stack|tests/vm/pt-grow-pusha|tests/vm/pt-big-stk-obj|tests/vm/pt-overflowstk|tests/vm/pt-write-code2|tests/vm/pt-grow-stk-sc|tests/vm/page-linear|tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-read|tests/vm/mmap-close|tests/vm/mmap-overlap|tests/vm/mmap-twice|tests/vm/mmap-write|tests/vm/mmap-exit|tests/vm/mmap-shuffle|tests/vm/mmap-clean|tests/vm/mmap-inherit|tests/vm/mmap-misalign|tests/vm/mmap-null|tests/vm/mmap-over-code|tests/vm/mmap-over-data|tests/vm/mmap-over-stk|tests/vm/mmap-remove)

View File

@@ -62,6 +62,8 @@ userprog_SRC += userprog/gdt.c # GDT initialization.
userprog_SRC += userprog/tss.c # TSS management.
# Virtual memory code.
vm_SRC += vm/frame.c # Frame table manager.
vm_SRC += vm/page.c # Page table manager.
vm_SRC += devices/swap.c # Swap block manager.
#vm_SRC = vm/file.c # Some other file.

View File

@@ -32,6 +32,7 @@
#include "tests/threads/tests.h"
#endif
#ifdef VM
#include "vm/frame.h"
#include "devices/swap.h"
#endif
#ifdef FILESYS
@@ -101,6 +102,9 @@ main (void)
palloc_init (user_page_limit);
malloc_init ();
paging_init ();
#ifdef VM
frame_init ();
#endif
/* Segmentation. */
#ifdef USERPROG

View File

@@ -71,7 +71,7 @@ static void kernel_thread (thread_func *, void *aux);
static void idle (void *aux UNUSED);
static struct thread *running_thread (void);
static struct thread *next_thread_to_run (void);
static void init_process_result (struct thread *t);
static bool init_process_result (struct thread *t);
static void init_thread (struct thread *, const char *name, int nice,
int priority, fp32_t recent_cpu);
static bool is_thread (struct thread *) UNUSED;
@@ -252,7 +252,11 @@ thread_create (const char *name, int priority,
struct thread *parent_thread = thread_current ();
init_thread (t, name, parent_thread->nice, priority, parent_thread->recent_cpu);
tid = t->tid = allocate_tid ();
init_process_result (t);
if (!init_process_result (t))
{
palloc_free_page (t);
return TID_ERROR;
}
#ifdef USERPROG
/* Initialize the thread's file descriptor table. */
@@ -668,15 +672,18 @@ is_thread (struct thread *t)
}
/* Allocate and initialise a process result for given thread. */
static void
static bool
init_process_result (struct thread *t)
{
struct process_result *result = malloc (sizeof (struct process_result));
if (result == NULL)
return false;
result->tid = t->tid;
result->exit_status = -1;
lock_init (&result->lock);
sema_init (&result->sema, 0);
t->result = result;
return true;
}
/* Does basic initialization of T as a blocked thread named

View File

@@ -143,6 +143,8 @@ struct thread
struct hash open_files; /* Hash Table of FD -> Struct File. */
#endif
void *curr_esp;
/* Owned by thread.c. */
unsigned magic; /* Detects stack overflow. */
};

View File

@@ -1,7 +1,7 @@
# -*- makefile -*-
kernel.bin: DEFINES = -DUSERPROG -DFILESYS
KERNEL_SUBDIRS = threads devices lib lib/kernel userprog filesys
kernel.bin: DEFINES = -DUSERPROG -DFILESYS -DVM
KERNEL_SUBDIRS = threads devices lib lib/kernel userprog filesys vm
TEST_SUBDIRS = tests/userprog tests/userprog/no-vm tests/filesys/base
GRADING_FILE = $(SRCDIR)/tests/userprog/Grading
SIMULATOR = --qemu

View File

@@ -2,8 +2,15 @@
#include <inttypes.h>
#include <stdio.h>
#include "userprog/gdt.h"
#include "userprog/pagedir.h"
#include "userprog/process.h"
#include "threads/interrupt.h"
#include "threads/palloc.h"
#include "threads/thread.h"
#include "threads/vaddr.h"
#define MAX_STACK_SIZE (8 * 1024 * 1024) // 8MB
#define MAX_STACK_OFFSET 32 // 32 bytes offset below stack pointer (ESP)
/* Number of page faults processed. */
static long long page_fault_cnt;
@@ -11,6 +18,9 @@ static long long page_fault_cnt;
static void kill (struct intr_frame *);
static void page_fault (struct intr_frame *);
static bool is_valid_stack_access (const void *fault_addr, const void *esp);
static bool grow_stack (void *upage);
/* Registers handlers for interrupts that can be caused by user
programs.
@@ -145,6 +155,27 @@ page_fault (struct intr_frame *f)
write = (f->error_code & PF_W) != 0;
user = (f->error_code & PF_U) != 0;
if (!user || !not_present)
{
f->eip = (void *)f->eax;
f->eax = 0xffffffff;
return;
}
/* If the fault address is in a user page that is not present, then it might
be just that the stack needs to grow. So we attempt to grow the stack. */
void *upage = pg_round_down (fault_addr);
if (not_present && is_user_vaddr (upage) && upage != NULL)
{
if (is_valid_stack_access (fault_addr, f->esp))
{
if (grow_stack (upage))
return;
}
/* TODO: Check SPT for the page. */
}
/* To implement virtual memory, delete the rest of the function
body, and replace it with code that brings in the page to
which fault_addr refers. */
@@ -156,3 +187,50 @@ page_fault (struct intr_frame *f)
kill (f);
}
/* Validates whether the fault address is a valid stack access. Access is a
valid stack access under the following two conditions:
1. The fault address must be within MAX_STACK_OFFSET (32) bytes below
the current stack pointer. (Accounts for both PUSH and PUSHA instructions)
2. Growing this stack to this address does not cause it to exceed the
MAX_STACK_SIZE (8MB) limit.
Returns true if both conditions are met, false otherwise.
Pre: fault_addr is a valid user virtual address (so also not NULL). */
static bool
is_valid_stack_access (const void *fault_addr, const void *esp)
{
uint32_t new_stack_size = PHYS_BASE - pg_round_down (fault_addr);
uint32_t *lowest_valid_push_addr = (uint32_t *)esp - MAX_STACK_OFFSET;
bool is_within_push_range = (uint32_t *)fault_addr >= lowest_valid_push_addr;
return is_within_push_range && new_stack_size <= MAX_STACK_SIZE;
}
/* Attempts to grow the stack by allocating and mapping a new page.
This involves:
1. Allocating a zeroed page from the user pool
2. Installing it into the page table with write permissions
Returns true if the stack was successfully grown, false if either
allocation or installation fails.
Pre: upage is a valid page-aligned address (so also not NULL). */
static bool
grow_stack (void *upage)
{
/* Allocate new page for stack */
void *kpage = palloc_get_page (PAL_USER | PAL_ZERO);
if (kpage == NULL)
return false;
/* Install the page into user page table */
if (!install_page (upage, kpage, true))
{
palloc_free_page (kpage);
return false;
}
return true;
}

View File

@@ -24,6 +24,9 @@
#include "threads/vaddr.h"
#include "threads/synch.h"
#include "devices/timer.h"
#ifdef VM
#include "vm/frame.h"
#endif
/* Defines the native number of bytes processed by the processor
(for the purposes of alignment). */
@@ -113,7 +116,10 @@ process_execute (const char *cmd)
return tid;
}
static bool install_page (void *upage, void *kpage, bool writable);
static void *get_usr_kpage (enum palloc_flags flags, void *upage);
static void free_usr_kpage (void *kpage);
bool install_page (void *upage, void *kpage, bool writable);
static bool process_init_stack (char *cmd_saveptr, void **esp, char *file_name);
static void *push_to_stack (void **esp, void *data, size_t data_size);
#define push_var_to_stack(esp, var) (push_to_stack (esp, &var, sizeof (var)))
@@ -251,12 +257,13 @@ process_init_stack (char *cmd_saveptr, void **esp, char *file_name)
int pages_needed = DIV_CEIL (overflow_bytes, PGSIZE);
/* Allocate the pages and map them to the user process. */
void *upage;
uint8_t *kpage;
for (int i = 1; i < pages_needed + 1; i++)
{
uint8_t *kpage = palloc_get_page (PAL_USER | PAL_ZERO);
if (!install_page (((uint8_t *) PHYS_BASE) - PGSIZE * (i + 1),
kpage, true))
return false;
upage = ((uint8_t *) PHYS_BASE) - PGSIZE * (i + 1);
kpage = get_usr_kpage (PAL_ZERO, upage);
if (!install_page (upage, kpage, true)) return false;
}
}
@@ -704,7 +711,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
if (kpage == NULL){
/* Get a new page of memory. */
kpage = palloc_get_page (PAL_USER);
kpage = get_usr_kpage (0, upage);
if (kpage == NULL){
return false;
}
@@ -712,7 +719,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
/* Add the page to the process's address space. */
if (!install_page (upage, kpage, writable))
{
palloc_free_page (kpage);
free_usr_kpage (kpage);
return false;
}
@@ -746,19 +753,53 @@ setup_stack (void **esp)
{
uint8_t *kpage;
bool success = false;
kpage = palloc_get_page (PAL_USER | PAL_ZERO);
void *upage = ((uint8_t *) PHYS_BASE) - PGSIZE;
kpage = get_usr_kpage (PAL_ZERO, upage);
if (kpage != NULL)
{
success = install_page (((uint8_t *) PHYS_BASE) - PGSIZE, kpage, true);
success = install_page (upage, kpage, true);
if (success)
*esp = PHYS_BASE;
else
palloc_free_page (kpage);
free_usr_kpage (kpage);
}
return success;
}
/* Claims a page from the user pool for ownership by the current thread
and returns its kernel address, updating the frame table if VM
is enabled. Requires the intended virtual address for where the page
will be installed. */
static void *
get_usr_kpage (enum palloc_flags flags, void *upage)
{
void *page;
#ifdef VM
struct thread *t = thread_current ();
if (pagedir_get_page (t->pagedir, upage) != NULL)
return NULL;
else
page = frame_alloc (flags, upage, t);
#else
page = palloc_get_page (flags | PAL_USER);
#endif
return page;
}
/* Frees a page belonging to a user process given its kernel address,
updating the frame table if VM is enabled. */
static void
free_usr_kpage (void *kpage)
{
#ifdef VM
frame_free (kpage);
#else
palloc_free_page (kpage);
#endif
}
/* Adds a mapping from user virtual address UPAGE to kernel
virtual address KPAGE to the page table.
If WRITABLE is true, the user process may modify the page;
@@ -768,7 +809,7 @@ setup_stack (void **esp)
with palloc_get_page().
Returns true on success, false if UPAGE is already mapped or
if memory allocation fails. */
static bool
bool
install_page (void *upage, void *kpage, bool writable)
{
struct thread *t = thread_current ();

View File

@@ -8,4 +8,6 @@ int process_wait (tid_t);
void process_exit (void);
void process_activate (void);
bool install_page (void *upage, void *kpage, bool writable);
#endif /* userprog/process.h */

View File

@@ -11,6 +11,7 @@
#include "userprog/process.h"
#include "userprog/pagedir.h"
#include <stdio.h>
#include <stdbool.h>
#include <syscall-nr.h>
#define MAX_SYSCALL_ARGS 3
@@ -46,8 +47,11 @@ static unsigned syscall_tell (int fd);
static void syscall_close (int fd);
static struct open_file *fd_get_file (int fd);
static void validate_user_pointer (const void *start, size_t size);
static void validate_user_string (const char *str);
static void validate_user_pointer (const void *ptr, size_t size,
bool check_write);
static void validate_user_string (const char *str, bool check_write);
static int get_user (const uint8_t *);
static bool put_user (uint8_t *, uint8_t);
/* A struct defining a syscall_function pointer along with its arity. */
struct syscall_arguments
@@ -96,8 +100,9 @@ static void
syscall_handler (struct intr_frame *f)
{
/* First, read the system call number from the stack. */
validate_user_pointer (f->esp, sizeof (uintptr_t));
uintptr_t syscall_number = *(int *) f->esp;
validate_user_pointer (f->esp, sizeof (uintptr_t), false);
uintptr_t syscall_number = *(int *)f->esp;
thread_current ()->curr_esp = f->esp;
/* Ensures the number corresponds to a system call that can be handled. */
if (syscall_number >= LOOKUP_SIZE)
@@ -107,11 +112,10 @@ syscall_handler (struct intr_frame *f)
/* Next, read and copy the arguments from the stack pointer. */
validate_user_pointer (f->esp + sizeof (uintptr_t),
syscall.arity * sizeof (uintptr_t));
uintptr_t args[MAX_SYSCALL_ARGS] = {0};
syscall.arity * sizeof (uintptr_t), false);
uintptr_t args[MAX_SYSCALL_ARGS] = { 0 };
for (int i = 0; i < syscall.arity && i < MAX_SYSCALL_ARGS; i++)
args[i] = *(uintptr_t *) (f->esp + sizeof (uintptr_t) * (i + 1));
args[i] = *(uintptr_t *)(f->esp + sizeof (uintptr_t) * (i + 1));
/* Call the function that handles this system call with the arguments. When
there is a return value it is stored in f->eax. */
@@ -140,8 +144,7 @@ syscall_exit (int status)
static pid_t
syscall_exec (const char *cmd_line)
{
/* Validate the user string before executing the process. */
validate_user_string (cmd_line);
validate_user_string (cmd_line, false);
return process_execute (cmd_line); /* Returns the PID of the new process */
}
@@ -160,8 +163,7 @@ syscall_wait (pid_t pid)
static bool
syscall_create (const char *file, unsigned initial_size)
{
/* Validate the user string before creating the file. */
validate_user_string (file);
validate_user_string (file, false);
/* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock);
@@ -178,8 +180,7 @@ syscall_create (const char *file, unsigned initial_size)
static bool
syscall_remove (const char *file)
{
/* Validate the user string before removing the file. */
validate_user_string (file);
validate_user_string (file, false);
/* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock);
@@ -197,8 +198,7 @@ syscall_remove (const char *file)
static int
syscall_open (const char *file)
{
/* Validate the user string before opening the file. */
validate_user_string (file);
validate_user_string (file, false);
/* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock);
@@ -264,8 +264,7 @@ syscall_read (int fd, void *buffer, unsigned size)
if (fd < STDIN_FILENO || fd == STDOUT_FILENO)
return EXIT_FAILURE;
/* Validate the user buffer for the provided size before reading. */
validate_user_pointer (buffer, size);
validate_user_pointer (buffer, size, true);
if (fd == STDIN_FILENO)
{
@@ -308,8 +307,7 @@ syscall_write (int fd, const void *buffer, unsigned size)
if (fd <= 0)
return 0;
/* Validate the user buffer for the provided size before writing. */
validate_user_pointer (buffer, size);
validate_user_pointer (buffer, size, false);
if (fd == STDOUT_FILENO)
{
@@ -348,12 +346,7 @@ syscall_seek (int fd, unsigned position)
/* Find the file from the FD. If it does not exist, do nothing. */
struct open_file *file_info = fd_get_file (fd);
if (file_info != NULL)
{
/* File exists: Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock);
file_seek (file_info->file, position);
lock_release (&filesys_lock);
}
}
/* Handles the syscall for returning the next byte in a file referenced by
@@ -367,10 +360,7 @@ syscall_tell (int fd)
if (file_info == NULL)
return 0;
/* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock);
unsigned pos = file_tell (file_info->file);
lock_release (&filesys_lock);
/* Return the current position in the file. */
return pos;
@@ -459,63 +449,91 @@ fd_get_file (int fd)
return hash_entry (e, struct open_file, elem);
}
/* Validates if a block of memory starting at START and of size SIZE bytes is
fully contained within user virtual memory. Kills the thread (by exiting with
failure) if the memory is invalid. Otherwise, returns (nothing) normally.
If the size is 0, the function does no checks and returns the given ptr. */
/* Validates if a block of memory starting at PTR and of size SIZE bytes is
fully contained within valid user virtual memory. thread_exit () if the
memory is invalid.
If the size is 0, the function does no checks and returns PTR. */
static void
validate_user_pointer (const void *start, size_t size)
validate_user_pointer (const void *ptr, size_t size, bool check_write)
{
/* If the size is 0, we do not need to check anything. */
if (size == 0)
return;
const void *end = start + size - 1;
/* Check if the start and end pointers are valid user virtual addresses. */
if (start == NULL || !is_user_vaddr (start) || !is_user_vaddr (end))
/* ptr < ptr + size - 1, so sufficient to check that (ptr + size -1) is a
valid user virtual memory address. */
void *last = ptr + size - 1;
if (!is_user_vaddr (last))
syscall_exit (EXIT_FAILURE);
/* We now need to check if the entire memory block is mapped to physical
memory by the page table. */
for (const void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
if (pagedir_get_page (thread_current ()->pagedir, ptr) == NULL)
syscall_exit (EXIT_FAILURE);
ptr = pg_round_down (ptr);
while (ptr <= last)
{
int result;
/* Check read access to pointer. */
if ((result = get_user (ptr)) == -1)
syscall_exit (EXIT_FAILURE);
/* Check write access to pointer (if required). */
if (check_write && !put_user (ptr, result))
syscall_exit (EXIT_FAILURE);
ptr += PGSIZE;
}
}
/* Validates if a string is fully contained within user virtual memory. Kills
the thread (by exiting with failure) if the memory is invalid. Otherwise,
returns (nothing) normally. */
/* Validates of a C-string starting at ptr is fully contained within valid
user virtual memory. thread_exit () if the memory is invalid. */
static void
validate_user_string (const char *str)
validate_user_string (const char *ptr, bool check_write)
{
/* Check if the string pointer is a valid user virtual address. */
if (str == NULL || !is_user_vaddr (str))
syscall_exit (EXIT_FAILURE);
size_t offset = (uintptr_t) ptr % PGSIZE;
/* Calculate the offset of the string within the (first) page. */
size_t offset = (uintptr_t) str % PGSIZE;
/* We move page by page, checking if the page is mapped to physical memory. */
for (;;)
{
void *page = pg_round_down (str);
{
void *page = pg_round_down (ptr);
/* If we reach addresses that are not mapped to physical memory before the
end of the string, the thread is terminated. */
if (!is_user_vaddr(page) ||
pagedir_get_page (thread_current ()->pagedir, page) == NULL)
syscall_exit (EXIT_FAILURE);
if (!is_user_vaddr (page))
syscall_exit (EXIT_FAILURE);
if (!is_user_vaddr (ptr))
syscall_exit (EXIT_FAILURE);
int result;
if ((result = get_user ((const uint8_t *)ptr)) == -1)
syscall_exit (EXIT_FAILURE);
if (check_write && !put_user ((uint8_t *)ptr, result))
syscall_exit (EXIT_FAILURE);
while (offset < PGSIZE)
while (offset < PGSIZE)
{
if (*str == '\0')
if (*ptr == '\0')
return; /* We reached the end of the string without issues. */
str++;
ptr++;
offset++;
}
offset = 0; /* Next page will start at the beginning. */
}
offset = 0;
}
}
/* PROVIDED BY SPEC.
Reads a byte at user virtual address UADDR.
UADDR must be below PHYS_BASE.
Returns the byte value if successful, -1 if a segfault occurred. */
static int
get_user (const uint8_t *uaddr)
{
int result;
asm ("movl $1f, %0; movzbl %1, %0; 1:" : "=&a"(result) : "m"(*uaddr));
return result;
}
/* PROVIDED BY SPEC.
Writes BYTE to user address UDST.
UDST must be below PHYS_BASE.
Returns true if successful, false if a segfault occurred. */
static bool
put_user (uint8_t *udst, uint8_t byte)
{
int error_code;
asm ("movl $1f, %0; movb %b2, %1; 1:"
: "=&a"(error_code), "=m"(*udst)
: "q"(byte));
return error_code != -1;
}

256
src/vm/frame.c Normal file
View File

@@ -0,0 +1,256 @@
#include <debug.h>
#include <hash.h>
#include <list.h>
#include <string.h>
#include "frame.h"
#include "page.h"
#include "threads/malloc.h"
#include "threads/vaddr.h"
#include "userprog/pagedir.h"
#include "threads/synch.h"
#include "devices/swap.h"
/* Hash table that maps every active frame's kernel virtual address
to its corresponding 'frame_metadata'.*/
struct hash frame_table;
/* Linked list used to represent the circular queue in the 'clock'
algorithm for page eviction. Iterating from the element that is
currently pointed at by 'next_victim' yields an ordering of the entries
from oldest to newest (in terms of when they were added or checked
for having been referenced by a process). */
struct list lru_list;
/* The next element in lru_list to be considered for eviction (oldest added
or referenced page in the circular queue). If this page has has an
'accessed' bit of 0 when considering eviction, then it will be the next
victim. Otherwise, the next element in the queue is similarly considered. */
struct list_elem *next_victim = NULL;
/* Synchronisation variables. */
/* Protects access to 'lru_list'. */
struct lock lru_lock;
struct frame_metadata
{
void *frame; /* The kernel virtual address holding the frame. */
void *upage; /* The user virtual address pointing to the frame. */
struct thread *owner; /* Pointer to the thread that owns the frame. */
struct hash_elem hash_elem; /* Tracks the position of the frame metadata
within 'frame_table', whose key is the
kernel virtual address of the frame. */
struct list_elem list_elem; /* Tracks the position of the frame metadata
in either the 'active' or 'inactive' list,
so a victim can be chosen for eviction. */
};
hash_hash_func frame_metadata_hash;
hash_less_func frame_metadata_less;
static struct list_elem *lru_next (struct list_elem *e);
static struct list_elem *lru_prev (struct list_elem *e);
static struct frame_metadata *get_victim (void);
/* Initialize the frame system by initializing the frame (hash) table with
the frame_metadata hashing and comparison functions, as well as initializing
'lru_list' and its associated synchronisation primitives. */
void
frame_init (void)
{
hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
list_init (&lru_list);
lock_init (&lru_lock);
}
/* TODO: Consider synchronisation more closely (i.e. just for hash
table). */
/* Attempt to allocate a frame for a user process, either by direct
allocation of a user page if there is sufficient RAM, or by
evicting a currently active page if memory allocated for user
processes is fulled and storing it in swap. If swap is full in
the former case, panic the kernel. */
void *
frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
{
struct frame_metadata *frame_metadata;
flags |= PAL_USER;
lock_acquire (&lru_lock);
void *frame = palloc_get_page (flags);
/* If a frame couldn't be allocated we must be out of main memory. Thus,
obtain a victim page to replace with our page, and swap the victim
into disk. */
if (frame == NULL)
{
/* 1. Obtain victim. */
if (next_victim == NULL)
PANIC ("Couldn't allocate a single page to main memory!\n");
struct frame_metadata *victim = get_victim ();
ASSERT (victim != NULL); /* get_victim () should never return null. */
/* 2. Swap out victim into disk. */
size_t swap_slot = swap_out (victim->frame);
page_set_swap (victim->owner, victim->upage, swap_slot);
/* If zero flag is set, zero out the victim page. */
if (flags & PAL_ZERO)
memset (victim->frame, 0, PGSIZE);
/* 3. Indicate that the new frame's metadata will be stored
inside the same structure that stored the victim's metadata.
As both the new frame and the victim frame share the same kernel
virtual address, the hash map need not be updated, and neither
the list_elem value as both share the same lru_list position. */
frame_metadata = victim;
}
/* If sufficient main memory allows the frame to be directly allocated,
we must update the frame table with a new entry, and grow lru_list. */
else
{
/* Must own lru_lock here, as otherwise there is a race condition
with next_victim either being NULL or uninitialized. */
frame_metadata = malloc (sizeof (struct frame_metadata));
frame_metadata->frame = frame;
/* Newly allocated frames are pushed to the back of the circular queue
represented by lru_list. Must explicitly handle the case where the
circular queue is empty (when next_victim == NULL). */
if (next_victim == NULL)
{
list_push_back (&lru_list, &frame_metadata->list_elem);
next_victim = &frame_metadata->list_elem;
}
else
{
struct list_elem *lru_tail = lru_prev (next_victim);
list_insert (lru_tail, &frame_metadata->list_elem);
}
hash_insert (&frame_table, &frame_metadata->hash_elem);
}
frame_metadata->upage = upage;
frame_metadata->owner = owner;
lock_release (&lru_lock);
return frame_metadata->frame;
}
/* Attempt to deallocate a frame for a user process by removing it from the
frame table as well as lru_list, and freeing the underlying page
memory & metadata struct. Panics if the frame isn't active in memory. */
void
frame_free (void *frame)
{
struct frame_metadata key_metadata;
key_metadata.frame = frame;
struct hash_elem *e =
hash_delete (&frame_table, &key_metadata.hash_elem);
if (e == NULL) PANIC ("Attempted to free a frame at kernel address %p, "
"but this address is not allocated!\n", frame);
struct frame_metadata *frame_metadata =
hash_entry (e, struct frame_metadata, hash_elem);
lock_acquire (&lru_lock);
list_remove (&frame_metadata->list_elem);
/* If we're freeing the frame marked as the next victim, update
next_victim to either be the next least recently used page, or NULL
if no pages are loaded in main memory. */
if (&frame_metadata->list_elem == next_victim)
{
if (list_empty (&lru_list))
next_victim = NULL;
else
next_victim = lru_next (next_victim);
}
lock_release (&lru_lock);
free (frame_metadata);
palloc_free_page (frame);
}
/* TODO: Account for page aliases when checking accessed bit. */
/* A pre-condition for calling this function is that the calling thread
owns lru_lock and that lru_list is non-empty. */
static struct frame_metadata *
get_victim (void)
{
struct list_elem *e = next_victim;
struct frame_metadata *frame_metadata;
uint32_t *pd;
void *upage;
for (;;)
{
frame_metadata = list_entry (e, struct frame_metadata, list_elem);
pd = frame_metadata->owner->pagedir;
upage = frame_metadata->upage;
e = lru_next (e);
if (!pagedir_is_accessed (pd, upage))
break;
pagedir_set_accessed (pd, upage, false);
}
next_victim = e;
return frame_metadata;
}
/* Hash function for frame metadata, used for storing entries in the
frame table. */
unsigned
frame_metadata_hash (const struct hash_elem *e, void *aux UNUSED)
{
struct frame_metadata *frame_metadata =
hash_entry (e, struct frame_metadata, hash_elem);
return hash_bytes (&frame_metadata->frame, sizeof (frame_metadata->frame));
}
/* 'less_func' comparison function for frame metadata, used for comparing
the keys of the frame table. Returns true iff the kernel virtual address
of the first frame is less than that of the second frame. */
bool
frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
{
struct frame_metadata *a =
hash_entry (a_, struct frame_metadata, hash_elem);
struct frame_metadata *b =
hash_entry (b_, struct frame_metadata, hash_elem);
return a->frame < b->frame;
}
/* Returns the next recently used element after the one provided, which
is achieved by iterating through lru_list like a circular queue
(wrapping around the list at the tail). */
static struct list_elem *
lru_next (struct list_elem *e)
{
if (!list_empty (&lru_list) && e == list_back (&lru_list))
return list_front (&lru_list);
return list_next (e);
}
/* Returns the previous recently used element after the one provided, which
is achieved by iterating through lru_list like a circular queue
(wrapping around the list at the head). */
static struct list_elem *
lru_prev (struct list_elem *e)
{
if (!list_empty (&lru_list) && e == list_front (&lru_list))
return list_back (&lru_list);
return list_prev (e);
}

11
src/vm/frame.h Normal file
View File

@@ -0,0 +1,11 @@
#ifndef VM_FRAME_H
#define VM_FRAME_H
#include "threads/thread.h"
#include "threads/palloc.h"
void frame_init (void);
void *frame_alloc (enum palloc_flags, void *, struct thread *);
void frame_free (void *frame);
#endif /* vm/frame.h */

20
src/vm/page.c Normal file
View File

@@ -0,0 +1,20 @@
#include "page.h"
/* Updates the 'owner' thread's page table entry for virtual address 'upage'
to have a present bit of 0 and stores the specified swap slot value in the
entry for later retrieval from disk. */
void
page_set_swap (struct thread *owner, void *upage, size_t swap_slot)
{
}
/* Given that the page with user address 'upage' owned by 'owner' is flagged
to be in the swap disk via the owner's page table, returns its stored
swap slot. Otherwise panics the kernel. */
size_t
page_get_swap (struct thread *owner, void *upage)
{
}

9
src/vm/page.h Normal file
View File

@@ -0,0 +1,9 @@
#ifndef VM_PAGE_H
#define VM_PAGE_H
#include "threads/thread.h"
void page_set_swap (struct thread *, void *, size_t);
size_t page_get_swap (struct thread *, void *);
#endif /* vm/frame.h */