From 83e044cf68d2d11ee96671d5c5492e4f804b8598 Mon Sep 17 00:00:00 2001 From: Gleb Koval Date: Fri, 8 Nov 2024 01:18:17 +0000 Subject: [PATCH 01/34] Let kernel handle its own page faults --- src/userprog/exception.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 0a20b53..8a800b7 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -145,6 +145,14 @@ page_fault (struct intr_frame *f) write = (f->error_code & PF_W) != 0; user = (f->error_code & PF_U) != 0; + /* Kernel page fault is further handled by the kernel itself. */ + if (!user) + { + f->eip = (void *)f->eax; + f->eax = 0xffffffff; + return; + } + /* To implement virtual memory, delete the rest of the function body, and replace it with code that brings in the page to which fault_addr refers. */ From 44f6a85163ee33cf09ec9180fea3a4f9a06b0e52 Mon Sep 17 00:00:00 2001 From: Gleb Koval Date: Fri, 8 Nov 2024 01:21:20 +0000 Subject: [PATCH 02/34] Add get_user and put_user provided by spec. --- src/userprog/syscall.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/src/userprog/syscall.c b/src/userprog/syscall.c index ccb02f3..b162253 100644 --- a/src/userprog/syscall.c +++ b/src/userprog/syscall.c @@ -11,6 +11,7 @@ #include "userprog/process.h" #include "userprog/pagedir.h" #include +#include #include static struct lock filesys_lock; @@ -47,6 +48,8 @@ static void syscall_close (int fd); static struct open_file *fd_get_file (int fd); static void *validate_user_pointer (const void *ptr, size_t size); +static int get_user (const uint8_t *); +static bool put_user (uint8_t *, uint8_t); /* A struct defining a syscall_function pointer along with its arity. */ typedef struct @@ -415,3 +418,29 @@ validate_user_pointer (const void *ptr, size_t size) return (void *) ptr; } + +/* PROVIDED BY SPEC. + Reads a byte at user virtual address UADDR. + UADDR must be below PHYS_BASE. + Returns the byte value if successful, -1 if a segfault occurred. */ +static int +get_user (const uint8_t *uaddr) +{ + int result; + asm ("movl $1f, %0; movzbl %1, %0; 1:" : "=&a"(result) : "m"(*uaddr)); + return result; +} + +/* PROVIDED BY SPEC. + Writes BYTE to user address UDST. + UDST must be below PHYS_BASE. + Returns true if successful, false if a segfault occurred. */ +static bool +put_user (uint8_t *udst, uint8_t byte) +{ + int error_code; + asm ("movl $1f, %0; movb %b2, %1; 1:" + : "=&a"(error_code), "=m"(*udst) + : "q"(byte)); + return error_code != -1; +} \ No newline at end of file From 9a6abab95ed569c98cb7e0cf87b0428ee023de71 Mon Sep 17 00:00:00 2001 From: Gleb Koval Date: Fri, 8 Nov 2024 01:23:45 +0000 Subject: [PATCH 03/34] Check access to user memory using page fault method (via get_user and put_user). --- src/userprog/syscall.c | 52 ++++++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/src/userprog/syscall.c b/src/userprog/syscall.c index b162253..ca6f116 100644 --- a/src/userprog/syscall.c +++ b/src/userprog/syscall.c @@ -47,7 +47,8 @@ static unsigned syscall_tell (int fd); static void syscall_close (int fd); static struct open_file *fd_get_file (int fd); -static void *validate_user_pointer (const void *ptr, size_t size); +static void *validate_user_pointer (const void *ptr, size_t size, + bool check_write); static int get_user (const uint8_t *); static bool put_user (uint8_t *, uint8_t); @@ -99,8 +100,8 @@ static void syscall_handler (struct intr_frame *f) { /* First, read the system call number from the stack. */ - validate_user_pointer (f->esp, 1); - unsigned syscall_number = *(int *) f->esp; + validate_user_pointer (f->esp, 1, false); + unsigned syscall_number = *(int *)f->esp; /* Ensures the number corresponds to a system call that can be handled. */ if (syscall_number >= LOOKUP_SIZE) @@ -110,10 +111,10 @@ syscall_handler (struct intr_frame *f) /* Next, read and copy the arguments from the stack pointer. */ validate_user_pointer (f->esp + sizeof (uintptr_t), - syscall.arity * sizeof (uintptr_t)); - uintptr_t args[3] = {0}; - for (int i=0; i < syscall.arity; i++) - args[i] = *(uintptr_t *) (f->esp + sizeof (uintptr_t) * (i + 1)); + syscall.arity * sizeof (uintptr_t), false); + uintptr_t args[3] = { 0 }; + for (int i = 0; i < syscall.arity; i++) + args[i] = *(uintptr_t *)(f->esp + sizeof (uintptr_t) * (i + 1)); /* Call the function that handles this system call with the arguments. When there is a return value it is stored in f->eax. */ @@ -144,7 +145,7 @@ syscall_exit (int status) static pid_t syscall_exec (const char *cmd_line) { - validate_user_pointer (cmd_line, 1); + validate_user_pointer (cmd_line, 1, false); lock_acquire (&filesys_lock); pid_t pid = process_execute(cmd_line); @@ -167,7 +168,7 @@ syscall_wait (pid_t pid) static bool syscall_create (const char *file UNUSED, unsigned initial_size UNUSED) { - validate_user_pointer (file, 1); + validate_user_pointer (file, 1, false); lock_acquire (&filesys_lock); bool status = filesys_create (file, initial_size); @@ -182,7 +183,7 @@ syscall_create (const char *file UNUSED, unsigned initial_size UNUSED) static bool syscall_remove (const char *file) { - validate_user_pointer (file, 1); + validate_user_pointer (file, 1, false); lock_acquire (&filesys_lock); bool status = filesys_remove (file); @@ -198,7 +199,7 @@ syscall_remove (const char *file) static int syscall_open (const char *file) { - validate_user_pointer (file, 1); + validate_user_pointer (file, 1, false); lock_acquire (&filesys_lock); struct file *ptr = filesys_open (file); @@ -253,7 +254,7 @@ syscall_read (int fd, void *buffer, unsigned size) if (fd < 0 || fd == STDOUT_FILENO) return -1; - validate_user_pointer (buffer, size); + validate_user_pointer (buffer, size, true); if (fd == STDIN_FILENO) { @@ -290,7 +291,7 @@ syscall_write (int fd, const void *buffer, unsigned size) if (fd <= 0) return 0; - validate_user_pointer (buffer, size); + validate_user_pointer (buffer, size, false); if (fd == STDOUT_FILENO) { @@ -404,19 +405,26 @@ fd_get_file (int fd) } /* Validates if a block of memory starting at PTR and of size SIZE bytes is - fully contained within user virtual memory. Kills the thread (by calling - thread_exit) if the memory is invalid. Otherwise, returns the PTR given. + fully contained within user virtual memory. Returns NULL if the memory + is invalid. Otherwise, returns the PTR given. If the size is 0, the function does no checks and returns PTR.*/ static void * -validate_user_pointer (const void *ptr, size_t size) +validate_user_pointer (const void *ptr, size_t size, bool check_write) { - if (size > 0 && (ptr == NULL || - !is_user_vaddr (ptr) || - !is_user_vaddr (ptr + size - 1) || - pagedir_get_page (thread_current()->pagedir, ptr) == NULL)) + if (size == 0) + return ptr; + /* ptr < ptr + size - 1, so sufficient to check that (ptr + size -1) is a + valid user virtual memory address. */ + if (!is_user_vaddr (ptr + size - 1)) thread_exit (); - - return (void *) ptr; + /* Check read access to pointer. */ + int result; + if ((result = get_user (ptr)) == -1) + thread_exit (); + /* Check write access to pointer (if required). */ + if (check_write && !put_user (ptr, result)) + thread_exit (); + return ptr; } /* PROVIDED BY SPEC. From cf4bf90cbb06eaa90961bdeab41f83a99666a90a Mon Sep 17 00:00:00 2001 From: Gleb Koval Date: Tue, 12 Nov 2024 15:34:45 +0000 Subject: [PATCH 04/34] Implement user pointer checking for C strings --- src/userprog/syscall.c | 74 ++++++++++++++++++++++++++++-------------- 1 file changed, 50 insertions(+), 24 deletions(-) diff --git a/src/userprog/syscall.c b/src/userprog/syscall.c index ca6f116..2b0a551 100644 --- a/src/userprog/syscall.c +++ b/src/userprog/syscall.c @@ -14,6 +14,8 @@ #include #include +#define MAX_SYSCALL_ARGS 3 + static struct lock filesys_lock; static unsigned fd_counter = MIN_USER_FD; @@ -47,8 +49,9 @@ static unsigned syscall_tell (int fd); static void syscall_close (int fd); static struct open_file *fd_get_file (int fd); -static void *validate_user_pointer (const void *ptr, size_t size, - bool check_write); +static void validate_user_pointer (const void *ptr, size_t size, + bool check_write); +static void validate_user_string (const char *str, bool check_write); static int get_user (const uint8_t *); static bool put_user (uint8_t *, uint8_t); @@ -100,8 +103,8 @@ static void syscall_handler (struct intr_frame *f) { /* First, read the system call number from the stack. */ - validate_user_pointer (f->esp, 1, false); - unsigned syscall_number = *(int *)f->esp; + validate_user_pointer (f->esp, sizeof (uintptr_t), false); + uintptr_t syscall_number = *(int *)f->esp; /* Ensures the number corresponds to a system call that can be handled. */ if (syscall_number >= LOOKUP_SIZE) @@ -112,8 +115,8 @@ syscall_handler (struct intr_frame *f) /* Next, read and copy the arguments from the stack pointer. */ validate_user_pointer (f->esp + sizeof (uintptr_t), syscall.arity * sizeof (uintptr_t), false); - uintptr_t args[3] = { 0 }; - for (int i = 0; i < syscall.arity; i++) + uintptr_t args[MAX_SYSCALL_ARGS] = { 0 }; + for (int i = 0; i < syscall.arity && i < MAX_SYSCALL_ARGS; i++) args[i] = *(uintptr_t *)(f->esp + sizeof (uintptr_t) * (i + 1)); /* Call the function that handles this system call with the arguments. When @@ -145,7 +148,7 @@ syscall_exit (int status) static pid_t syscall_exec (const char *cmd_line) { - validate_user_pointer (cmd_line, 1, false); + validate_user_string (cmd_line, false); lock_acquire (&filesys_lock); pid_t pid = process_execute(cmd_line); @@ -168,7 +171,7 @@ syscall_wait (pid_t pid) static bool syscall_create (const char *file UNUSED, unsigned initial_size UNUSED) { - validate_user_pointer (file, 1, false); + validate_user_string (file, false); lock_acquire (&filesys_lock); bool status = filesys_create (file, initial_size); @@ -183,7 +186,7 @@ syscall_create (const char *file UNUSED, unsigned initial_size UNUSED) static bool syscall_remove (const char *file) { - validate_user_pointer (file, 1, false); + validate_user_string (file, false); lock_acquire (&filesys_lock); bool status = filesys_remove (file); @@ -199,7 +202,7 @@ syscall_remove (const char *file) static int syscall_open (const char *file) { - validate_user_pointer (file, 1, false); + validate_user_string (file, false); lock_acquire (&filesys_lock); struct file *ptr = filesys_open (file); @@ -405,26 +408,49 @@ fd_get_file (int fd) } /* Validates if a block of memory starting at PTR and of size SIZE bytes is - fully contained within user virtual memory. Returns NULL if the memory - is invalid. Otherwise, returns the PTR given. - If the size is 0, the function does no checks and returns PTR.*/ -static void * + fully contained within valid user virtual memory. thread_exit () if the + memory is invalid. + If the size is 0, the function does no checks and returns PTR. */ +static void validate_user_pointer (const void *ptr, size_t size, bool check_write) { if (size == 0) - return ptr; + return; /* ptr < ptr + size - 1, so sufficient to check that (ptr + size -1) is a valid user virtual memory address. */ - if (!is_user_vaddr (ptr + size - 1)) + void *last = ptr + size - 1; + if (!is_user_vaddr (last)) thread_exit (); - /* Check read access to pointer. */ - int result; - if ((result = get_user (ptr)) == -1) - thread_exit (); - /* Check write access to pointer (if required). */ - if (check_write && !put_user (ptr, result)) - thread_exit (); - return ptr; + for (; ptr <= last; ptr++) + { + int result; + /* Check read access to pointer. */ + if ((result = get_user (ptr)) == -1) + thread_exit (); + /* Check write access to pointer (if required). */ + if (check_write && !put_user (ptr, result)) + thread_exit (); + } +} + +/* Validates of a C-string starting at ptr is fully contained within valid + user virtual memory. thread_exit () if the memory is invalid. */ +static void +validate_user_string (const char *ptr, bool check_write) +{ + while (true) + { + if (!is_user_vaddr (ptr)) + thread_exit (); + int result; + if ((result = get_user ((const uint8_t *)ptr)) == -1) + thread_exit (); + if (check_write && !put_user ((uint8_t *)ptr, result)) + thread_exit (); + if (*ptr == '\0') + return; + ptr++; + } } /* PROVIDED BY SPEC. From 59e7a64f8e1e538c010cf6c31974c4bf9d5dcbe1 Mon Sep 17 00:00:00 2001 From: Gleb Koval Date: Tue, 12 Nov 2024 15:48:22 +0000 Subject: [PATCH 05/34] Only check user pages rather than all bytes in-between, for known-size pointers --- src/userprog/syscall.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/userprog/syscall.c b/src/userprog/syscall.c index 2b0a551..1be6c77 100644 --- a/src/userprog/syscall.c +++ b/src/userprog/syscall.c @@ -421,7 +421,8 @@ validate_user_pointer (const void *ptr, size_t size, bool check_write) void *last = ptr + size - 1; if (!is_user_vaddr (last)) thread_exit (); - for (; ptr <= last; ptr++) + ptr = pg_round_down (ptr); + while (ptr <= last) { int result; /* Check read access to pointer. */ @@ -430,6 +431,7 @@ validate_user_pointer (const void *ptr, size_t size, bool check_write) /* Check write access to pointer (if required). */ if (check_write && !put_user (ptr, result)) thread_exit (); + ptr += PGSIZE; } } From 3ef5264b6e3858b6e89311795a7f5e05ec02cb6a Mon Sep 17 00:00:00 2001 From: EDiasAlberto Date: Tue, 26 Nov 2024 04:43:25 +0000 Subject: [PATCH 06/34] feat: allow stack to grow for process up to 8MB in size --- src/Makefile.build | 1 + src/userprog/exception.c | 7 +++++++ src/vm/stackgrowth.c | 38 ++++++++++++++++++++++++++++++++++++++ src/vm/stackgrowth.h | 11 +++++++++++ 4 files changed, 57 insertions(+) create mode 100644 src/vm/stackgrowth.c create mode 100644 src/vm/stackgrowth.h diff --git a/src/Makefile.build b/src/Makefile.build index c0d535e..4167f37 100644 --- a/src/Makefile.build +++ b/src/Makefile.build @@ -63,6 +63,7 @@ userprog_SRC += userprog/tss.c # TSS management. # Virtual memory code. vm_SRC += devices/swap.c # Swap block manager. +vm_SRC += vm/stackgrowth.c #vm_SRC = vm/file.c # Some other file. # Filesystem code. diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 0a20b53..500cb89 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -4,6 +4,7 @@ #include "userprog/gdt.h" #include "threads/interrupt.h" #include "threads/thread.h" +#include "vm/stackgrowth.h" /* Number of page faults processed. */ static long long page_fault_cnt; @@ -145,6 +146,12 @@ page_fault (struct intr_frame *f) write = (f->error_code & PF_W) != 0; user = (f->error_code & PF_U) != 0; + if (user && needs_new_page (fault_addr, f->esp)) + { + if (grow_stack (fault_addr)) + return; + } + /* To implement virtual memory, delete the rest of the function body, and replace it with code that brings in the page to which fault_addr refers. */ diff --git a/src/vm/stackgrowth.c b/src/vm/stackgrowth.c new file mode 100644 index 0000000..164eb9d --- /dev/null +++ b/src/vm/stackgrowth.c @@ -0,0 +1,38 @@ +#include +#include "stackgrowth.h" +#include "threads/palloc.h" +#include "threads/thread.h" +#include "threads/vaddr.h" +#include "userprog/pagedir.h" + +/* Validates a given address for being <=32 bytes away from the stack pointer or + above the stack */ +bool needs_new_page (void *addr, void *esp) +{ + return (is_user_vaddr (addr) && + (uint32_t*)addr >= ((uint32_t*)esp - 32) && + ((PHYS_BASE - pg_round_down (addr)) + <= MAX_STACK_SIZE)); +} + +/* Extends the stack by the necessary number of pages */ +bool grow_stack (void *addr) +{ + struct thread *t = thread_current (); + void *last_page = pg_round_down (addr); + + uint8_t *new_page = palloc_get_page (PAL_USER | PAL_ZERO); + if ( new_page == NULL) + return false; + + bool added_page = pagedir_get_page (t->pagedir, last_page) == NULL + && pagedir_set_page (t->pagedir, last_page, new_page, true); + + if (!added_page) { + palloc_free_page (new_page); + return false; + } + return true; + + +} \ No newline at end of file diff --git a/src/vm/stackgrowth.h b/src/vm/stackgrowth.h new file mode 100644 index 0000000..0502210 --- /dev/null +++ b/src/vm/stackgrowth.h @@ -0,0 +1,11 @@ +#ifndef GROWSTACK_H +#define GROWSTACK_H + +#include + +#define MAX_STACK_SIZE 8388608 // (8MB) + +bool needs_new_page (void *addr, void *esp); +bool grow_stack (void *addr); + +#endif //GROWSTACK_H From af7f2ba873e482ea210ca737e5b79da204f1d4ab Mon Sep 17 00:00:00 2001 From: EDiasAlberto Date: Tue, 26 Nov 2024 04:54:00 +0000 Subject: [PATCH 07/34] Fix: Magic number in stackgrowth.c --- src/vm/stackgrowth.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/vm/stackgrowth.c b/src/vm/stackgrowth.c index 164eb9d..7d5470d 100644 --- a/src/vm/stackgrowth.c +++ b/src/vm/stackgrowth.c @@ -5,14 +5,16 @@ #include "threads/vaddr.h" #include "userprog/pagedir.h" +#define MAX_STACK_ACCESS_DIST 32 + /* Validates a given address for being <=32 bytes away from the stack pointer or above the stack */ bool needs_new_page (void *addr, void *esp) { return (is_user_vaddr (addr) && - (uint32_t*)addr >= ((uint32_t*)esp - 32) && - ((PHYS_BASE - pg_round_down (addr)) - <= MAX_STACK_SIZE)); + (uint32_t*)addr >= ((uint32_t*)esp - MAX_STACK_ACCESS_DIST) && + ((PHYS_BASE - pg_round_down (addr)) + <= MAX_STACK_SIZE)); } /* Extends the stack by the necessary number of pages */ From c670c29e47c009c4ccb0c9bb02bba06bb300f878 Mon Sep 17 00:00:00 2001 From: EDiasAlberto Date: Wed, 27 Nov 2024 18:57:20 +0000 Subject: [PATCH 08/34] update stack growth header to fit virtual memory naming format --- src/vm/stackgrowth.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/vm/stackgrowth.h b/src/vm/stackgrowth.h index 0502210..a23e481 100644 --- a/src/vm/stackgrowth.h +++ b/src/vm/stackgrowth.h @@ -1,5 +1,5 @@ -#ifndef GROWSTACK_H -#define GROWSTACK_H +#ifndef VM_GROWSTACK_H +#define VM_GROWSTACK_H #include @@ -8,4 +8,4 @@ bool needs_new_page (void *addr, void *esp); bool grow_stack (void *addr); -#endif //GROWSTACK_H +#endif /* vm/frame.h */ From c74a8c55aae4d4607670b9d0f192a3cc891acd95 Mon Sep 17 00:00:00 2001 From: EDiasAlberto Date: Wed, 27 Nov 2024 19:21:43 +0000 Subject: [PATCH 09/34] Implement stack growth for system calls and add stack pointer tracking to thread --- src/threads/thread.h | 4 ++++ src/userprog/syscall.c | 30 +++++++++++++++++++++++++++--- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/src/threads/thread.h b/src/threads/thread.h index 4a88577..60f91ce 100644 --- a/src/threads/thread.h +++ b/src/threads/thread.h @@ -143,6 +143,10 @@ struct thread struct hash open_files; /* Hash Table of FD -> Struct File. */ #endif +#ifdef VM + void *curr_esp; +#endif + /* Owned by thread.c. */ unsigned magic; /* Detects stack overflow. */ }; diff --git a/src/userprog/syscall.c b/src/userprog/syscall.c index 3efe7b5..7fbc939 100644 --- a/src/userprog/syscall.c +++ b/src/userprog/syscall.c @@ -10,6 +10,9 @@ #include "threads/synch.h" #include "userprog/process.h" #include "userprog/pagedir.h" +#ifdef VM +#include "vm/stackgrowth.h" +#endif #include #include @@ -98,6 +101,7 @@ syscall_handler (struct intr_frame *f) /* First, read the system call number from the stack. */ validate_user_pointer (f->esp, sizeof (uintptr_t)); uintptr_t syscall_number = *(int *) f->esp; + thread_current ()->curr_esp = f->esp; /* Ensures the number corresponds to a system call that can be handled. */ if (syscall_number >= LOOKUP_SIZE) @@ -451,6 +455,20 @@ fd_get_file (int fd) return hash_entry (e, struct open_file, elem); } +static bool +try_alloc_new_page (const void *ptr) +{ + if (needs_new_page (ptr, thread_current()->curr_esp)) + { + if (!grow_stack (ptr)) + return 0; + else + return 1; + } + else + return 0; +} + /* Validates if a block of memory starting at START and of size SIZE bytes is fully contained within user virtual memory. Kills the thread (by exiting with failure) if the memory is invalid. Otherwise, returns (nothing) normally. @@ -472,7 +490,10 @@ validate_user_pointer (const void *start, size_t size) memory by the page table. */ for (const void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE) if (pagedir_get_page (thread_current ()->pagedir, ptr) == NULL) - syscall_exit (EXIT_FAILURE); + { + if (!try_alloc_new_page (ptr)) + syscall_exit (EXIT_FAILURE); + } } /* Validates if a string is fully contained within user virtual memory. Kills @@ -495,10 +516,13 @@ validate_user_string (const char *str) /* If we reach addresses that are not mapped to physical memory before the end of the string, the thread is terminated. */ - if (!is_user_vaddr(page) || - pagedir_get_page (thread_current ()->pagedir, page) == NULL) + if (!is_user_vaddr(page)) syscall_exit (EXIT_FAILURE); + if (pagedir_get_page (thread_current ()->pagedir, page) == NULL) + if (!try_alloc_new_page (str)) + syscall_exit (EXIT_FAILURE); + while (offset < PGSIZE) { if (*str == '\0') From 4f84a83611aad4bde85641d83c5a7d00f2ec051e Mon Sep 17 00:00:00 2001 From: EDiasAlberto Date: Wed, 27 Nov 2024 19:41:22 +0000 Subject: [PATCH 10/34] Refactor: abstract new page allocation to one general function and make helper functions static --- src/userprog/exception.c | 4 ++-- src/userprog/syscall.c | 25 +++++++------------------ src/vm/stackgrowth.c | 30 ++++++++++++++++++++++++------ src/vm/stackgrowth.h | 3 +-- 4 files changed, 34 insertions(+), 28 deletions(-) diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 500cb89..272325e 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -146,9 +146,9 @@ page_fault (struct intr_frame *f) write = (f->error_code & PF_W) != 0; user = (f->error_code & PF_U) != 0; - if (user && needs_new_page (fault_addr, f->esp)) + if (user) { - if (grow_stack (fault_addr)) + if (try_alloc_new_page (fault_addr, f->esp)) return; } diff --git a/src/userprog/syscall.c b/src/userprog/syscall.c index 7fbc939..26a37e7 100644 --- a/src/userprog/syscall.c +++ b/src/userprog/syscall.c @@ -455,20 +455,6 @@ fd_get_file (int fd) return hash_entry (e, struct open_file, elem); } -static bool -try_alloc_new_page (const void *ptr) -{ - if (needs_new_page (ptr, thread_current()->curr_esp)) - { - if (!grow_stack (ptr)) - return 0; - else - return 1; - } - else - return 0; -} - /* Validates if a block of memory starting at START and of size SIZE bytes is fully contained within user virtual memory. Kills the thread (by exiting with failure) if the memory is invalid. Otherwise, returns (nothing) normally. @@ -480,6 +466,8 @@ validate_user_pointer (const void *start, size_t size) if (size == 0) return; + struct thread *t = thread_current (); + const void *end = start + size - 1; /* Check if the start and end pointers are valid user virtual addresses. */ @@ -489,9 +477,9 @@ validate_user_pointer (const void *start, size_t size) /* We now need to check if the entire memory block is mapped to physical memory by the page table. */ for (const void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE) - if (pagedir_get_page (thread_current ()->pagedir, ptr) == NULL) + if (pagedir_get_page (t->pagedir, ptr) == NULL) { - if (!try_alloc_new_page (ptr)) + if (!try_alloc_new_page (ptr, t->curr_esp)) syscall_exit (EXIT_FAILURE); } } @@ -508,6 +496,7 @@ validate_user_string (const char *str) /* Calculate the offset of the string within the (first) page. */ size_t offset = (uintptr_t) str % PGSIZE; + struct thread *t = thread_current (); /* We move page by page, checking if the page is mapped to physical memory. */ for (;;) @@ -519,8 +508,8 @@ validate_user_string (const char *str) if (!is_user_vaddr(page)) syscall_exit (EXIT_FAILURE); - if (pagedir_get_page (thread_current ()->pagedir, page) == NULL) - if (!try_alloc_new_page (str)) + if (pagedir_get_page (t->pagedir, page) == NULL) + if (!try_alloc_new_page (str, t->curr_esp)) syscall_exit (EXIT_FAILURE); while (offset < PGSIZE) diff --git a/src/vm/stackgrowth.c b/src/vm/stackgrowth.c index 7d5470d..50bdc39 100644 --- a/src/vm/stackgrowth.c +++ b/src/vm/stackgrowth.c @@ -7,9 +7,28 @@ #define MAX_STACK_ACCESS_DIST 32 -/* Validates a given address for being <=32 bytes away from the stack pointer or - above the stack */ -bool needs_new_page (void *addr, void *esp) +static bool needs_new_page (const void *addr, const void *esp); +static bool grow_stack (const void *addr); + +bool +try_alloc_new_page (const void *ptr, const void *esp) +{ + if (needs_new_page (ptr, esp)) + { + if (!grow_stack (ptr)) + return 0; + else + return 1; + } + else + return 0; +} + +/* Validates a given address for being a stack query and not a generic erroneous + address + */ +static bool +needs_new_page (const void *addr, const void *esp) { return (is_user_vaddr (addr) && (uint32_t*)addr >= ((uint32_t*)esp - MAX_STACK_ACCESS_DIST) && @@ -18,7 +37,8 @@ bool needs_new_page (void *addr, void *esp) } /* Extends the stack by the necessary number of pages */ -bool grow_stack (void *addr) +static bool +grow_stack (const void *addr) { struct thread *t = thread_current (); void *last_page = pg_round_down (addr); @@ -35,6 +55,4 @@ bool grow_stack (void *addr) return false; } return true; - - } \ No newline at end of file diff --git a/src/vm/stackgrowth.h b/src/vm/stackgrowth.h index a23e481..acd123e 100644 --- a/src/vm/stackgrowth.h +++ b/src/vm/stackgrowth.h @@ -5,7 +5,6 @@ #define MAX_STACK_SIZE 8388608 // (8MB) -bool needs_new_page (void *addr, void *esp); -bool grow_stack (void *addr); +bool try_alloc_new_page (const void *ptr, const void *esp); #endif /* vm/frame.h */ From 149bb42889705ee598b07f3012147d3f81f5abd5 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Fri, 29 Nov 2024 19:30:47 +0000 Subject: [PATCH 11/34] feat: implement clock (second-chance) page eviction algorithm --- src/Makefile.build | 2 +- src/userprog/process.c | 35 +++++--- src/vm/frame.c | 185 +++++++++++++++++++++++++++++++++-------- src/vm/frame.h | 3 +- src/vm/page.c | 20 +++++ src/vm/page.h | 9 ++ 6 files changed, 203 insertions(+), 51 deletions(-) create mode 100644 src/vm/page.c create mode 100644 src/vm/page.h diff --git a/src/Makefile.build b/src/Makefile.build index 4e57a13..7778f57 100644 --- a/src/Makefile.build +++ b/src/Makefile.build @@ -63,8 +63,8 @@ userprog_SRC += userprog/tss.c # TSS management. # Virtual memory code. vm_SRC += vm/frame.c # Frame table manager. +vm_SRC += vm/page.c # Page table manager. vm_SRC += devices/swap.c # Swap block manager. -#vm_SRC = vm/file.c # Some other file. # Filesystem code. filesys_SRC = filesys/filesys.c # Filesystem core. diff --git a/src/userprog/process.c b/src/userprog/process.c index a8f1d10..aa5091c 100644 --- a/src/userprog/process.c +++ b/src/userprog/process.c @@ -116,7 +116,7 @@ process_execute (const char *cmd) return tid; } -static void *get_usr_kpage (enum palloc_flags flags); +static void *get_usr_kpage (enum palloc_flags flags, void *upage); static void free_usr_kpage (void *kpage); static bool install_page (void *upage, void *kpage, bool writable); @@ -257,12 +257,13 @@ process_init_stack (char *cmd_saveptr, void **esp, char *file_name) int pages_needed = DIV_CEIL (overflow_bytes, PGSIZE); /* Allocate the pages and map them to the user process. */ + void *upage; + uint8_t *kpage; for (int i = 1; i < pages_needed + 1; i++) { - uint8_t *kpage = get_usr_kpage (PAL_ZERO); - if (!install_page (((uint8_t *) PHYS_BASE) - PGSIZE * (i + 1), - kpage, true)) - return false; + upage = ((uint8_t *) PHYS_BASE) - PGSIZE * (i + 1); + kpage = get_usr_kpage (PAL_ZERO, upage); + if (!install_page (upage, kpage, true)) return false; } } @@ -710,7 +711,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage, if (kpage == NULL){ /* Get a new page of memory. */ - kpage = get_usr_kpage (0); + kpage = get_usr_kpage (0, upage); if (kpage == NULL){ return false; } @@ -752,11 +753,13 @@ setup_stack (void **esp) { uint8_t *kpage; bool success = false; - - kpage = get_usr_kpage (PAL_ZERO); + + void *upage = ((uint8_t *) PHYS_BASE) - PGSIZE; + + kpage = get_usr_kpage (PAL_ZERO, upage); if (kpage != NULL) { - success = install_page (((uint8_t *) PHYS_BASE) - PGSIZE, kpage, true); + success = install_page (upage, kpage, true); if (success) *esp = PHYS_BASE; else @@ -765,14 +768,20 @@ setup_stack (void **esp) return success; } -/* Claims a page from the user pool and returns its kernel address, - updating the frame table if VM is enabled. */ +/* Claims a page from the user pool for ownership by the current thread + and returns its kernel address, updating the frame table if VM + is enabled. Requires the intended virtual address for where the page + will be installed. */ static void * -get_usr_kpage (enum palloc_flags flags) +get_usr_kpage (enum palloc_flags flags, void *upage) { void *page; #ifdef VM - page = frame_alloc (flags); + struct thread *t = thread_current (); + if (pagedir_get_page (t->pagedir, upage) != NULL) + return NULL; + else + page = frame_alloc (flags, upage, t); #else page = palloc_get_page (flags | PAL_USER); #endif diff --git a/src/vm/frame.c b/src/vm/frame.c index b030c59..8ed8f97 100644 --- a/src/vm/frame.c +++ b/src/vm/frame.c @@ -1,34 +1,42 @@ #include #include #include +#include #include "frame.h" +#include "page.h" #include "threads/malloc.h" +#include "threads/vaddr.h" +#include "userprog/pagedir.h" #include "threads/synch.h" +#include "devices/swap.h" /* Hash table that maps every active frame's kernel virtual address to its corresponding 'frame_metadata'.*/ struct hash frame_table; -/* Linked list of frame_metadata whose pages are predicted to currently - be in the working set of a process. They are not considered for - eviction, but are considered for demotion to the 'inactive' list. */ -struct list active_list; +/* Linked list used to represent the circular queue in the 'clock' + algorithm for page eviction. Iterating from the element that is + currently pointed at by 'next_victim' yields an ordering of the entries + from oldest to newest (in terms of when they were added or checked + for having been referenced by a process). */ +struct list lru_list; -/* Linked list of frame_metadata whose pages are predicted to leave the - working set of their processes soon, so are considered for eviction. - Pages are considered for eviction from the tail end, and are initially - demoted to 'inactive' at the head. */ -struct list inactive_list; +/* The next element in lru_list to be considered for eviction (oldest added + or referenced page in the circular queue). If this page has has an + 'accessed' bit of 0 when considering eviction, then it will be the next + victim. Otherwise, the next element in the queue is similarly considered. */ +struct list_elem *next_victim = NULL; /* Synchronisation variables. */ -/* Ensures mutual exclusion to accessing the 'head' and first element of - 'inactive_list', which is accessed every time a frame is allocated. */ -struct lock inactive_head_lock; +/* Protects access to 'lru_list'. */ +struct lock lru_lock; struct frame_metadata { void *frame; /* The kernel virtual address holding the frame. */ + void *upage; /* The user virtual address pointing to the frame. */ + struct thread *owner; /* Pointer to the thread that owns the frame. */ struct hash_elem hash_elem; /* Tracks the position of the frame metadata within 'frame_table', whose key is the kernel virtual address of the frame. */ @@ -40,56 +48,96 @@ struct frame_metadata hash_hash_func frame_metadata_hash; hash_less_func frame_metadata_less; +static struct list_elem *lru_next (struct list_elem *e); +static struct list_elem *lru_prev (struct list_elem *e); +static struct frame_metadata *get_victim (void); + /* Initialize the frame system by initializing the frame (hash) table with the frame_metadata hashing and comparison functions, as well as initializing - the active & inactive lists. Also initializes the system's synchronisation - primitives. */ + 'lru_list' and its associated synchronisation primitives. */ void frame_init (void) { hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL); - list_init (&active_list); - list_init (&inactive_list); - lock_init (&inactive_head_lock); + list_init (&lru_list); + lock_init (&lru_lock); } +/* TODO: Consider synchronisation more closely (i.e. just for hash + table). */ /* Attempt to allocate a frame for a user process, either by direct allocation of a user page if there is sufficient RAM, or by evicting a currently active page if memory allocated for user processes is fulled and storing it in swap. If swap is full in the former case, panic the kernel. */ void * -frame_alloc (enum palloc_flags flags) +frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) { + struct frame_metadata *frame_metadata; flags |= PAL_USER; - + + lock_acquire (&lru_lock); void *frame = palloc_get_page (flags); + + /* If a frame couldn't be allocated we must be out of main memory. Thus, + obtain a victim page to replace with our page, and swap the victim + into disk. */ if (frame == NULL) { - /* TODO: Find victim page to replace, and swap it with this new page. */ - return NULL; + /* 1. Obtain victim. */ + if (next_victim == NULL) + PANIC ("Couldn't allocate a single page to main memory!\n"); + + struct frame_metadata *victim = get_victim (); + ASSERT (victim != NULL); /* get_victim () should never return null. */ + + /* 2. Swap out victim into disk. */ + size_t swap_slot = swap_out (victim->frame); + page_set_swap (victim->owner, victim->upage, swap_slot); + + /* If zero flag is set, zero out the victim page. */ + if (flags & PAL_ZERO) + memset (victim->frame, 0, PGSIZE); + + /* 3. Indicate that the new frame's metadata will be stored + inside the same structure that stored the victim's metadata. + As both the new frame and the victim frame share the same kernel + virtual address, the hash map need not be updated, and neither + the list_elem value as both share the same lru_list position. */ + frame_metadata = victim; } - struct frame_metadata *frame_metadata = - malloc (sizeof (struct frame_metadata)); - frame_metadata->frame = frame; + /* If sufficient main memory allows the frame to be directly allocated, + we must update the frame table with a new entry, and grow lru_list. */ + else + { + /* Must own lru_lock here, as otherwise there is a race condition + with next_victim either being NULL or uninitialized. */ + frame_metadata = malloc (sizeof (struct frame_metadata)); + frame_metadata->frame = frame; - /* Newly faulted pages begin at the head of the inactive list. */ - lock_acquire (&inactive_head_lock); - list_push_front (&inactive_list, &frame_metadata->list_elem); - lock_release (&inactive_head_lock); + /* Newly allocated frames are pushed to the back of the circular queue + represented by lru_list. */ + struct list_elem *lru_tail = lru_prev (next_victim); + list_insert (lru_tail, &frame_metadata->list_elem); - /* Finally, insert frame metadata within the frame table, with the key as its - allocated kernel address. */ - hash_replace (&frame_table, &frame_metadata->hash_elem); + hash_insert (&frame_table, &frame_metadata->hash_elem); - return frame; + if (next_victim == NULL) + next_victim = &frame_metadata->list_elem; + } + + frame_metadata->upage = upage; + frame_metadata->owner = owner; + lock_release (&lru_lock); + + return frame_metadata->frame; } /* Attempt to deallocate a frame for a user process by removing it from the - frame table as well as active/inactive list, and freeing the underlying - page memory. Panics if the frame isn't active in memory. */ + frame table as well as lru_list, and freeing the underlying page + memory & metadata struct. Panics if the frame isn't active in memory. */ void frame_free (void *frame) { @@ -98,17 +146,58 @@ frame_free (void *frame) struct hash_elem *e = hash_delete (&frame_table, &key_metadata.hash_elem); - if (e == NULL) PANIC ("Attempted to free a frame without a corresponding " - "kernel address!\n"); + if (e == NULL) PANIC ("Attempted to free a frame at kernel address %p, " + "but this address is not allocated!\n", frame); struct frame_metadata *frame_metadata = hash_entry (e, struct frame_metadata, hash_elem); + lock_acquire (&lru_lock); list_remove (&frame_metadata->list_elem); + + /* If we're freeing the frame marked as the next victim, update + next_victim to either be the next least recently used page, or NULL + if no pages are loaded in main memory. */ + if (&frame_metadata->list_elem == next_victim) + { + if (list_empty (&lru_list)) + next_victim = NULL; + else + next_victim = lru_next (next_victim); + } + lock_release (&lru_lock); + free (frame_metadata); palloc_free_page (frame); } +/* TODO: Account for page aliases when checking accessed bit. */ +/* A pre-condition for calling this function is that the calling thread + owns lru_lock and that lru_list is non-empty. */ +static struct frame_metadata * +get_victim (void) + { + struct list_elem *e = next_victim; + struct frame_metadata *frame_metadata; + uint32_t *pd; + void *upage; + for (;;) + { + frame_metadata = list_entry (e, struct frame_metadata, list_elem); + pd = frame_metadata->owner->pagedir; + upage = frame_metadata->upage; + e = lru_next (e); + + if (!pagedir_is_accessed (pd, upage)) + break; + + pagedir_set_accessed (pd, upage, false); + } + + next_victim = e; + return frame_metadata; + } + /* Hash function for frame metadata, used for storing entries in the frame table. */ unsigned @@ -135,3 +224,27 @@ frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_, return a->frame < b->frame; } +/* Returns the next recently used element after the one provided, which + is achieved by iterating through lru_list like a circular queue + (wrapping around the list at the tail). */ +static struct list_elem * +lru_next (struct list_elem *e) +{ + if (!list_empty (&lru_list) && e == list_back (&lru_list)) + return list_front (&lru_list); + + return list_next (e); +} + +/* Returns the previous recently used element after the one provided, which + is achieved by iterating through lru_list like a circular queue + (wrapping around the list at the head). */ +static struct list_elem * +lru_prev (struct list_elem *e) +{ + if (!list_empty (&lru_list) && e == list_front (&lru_list)) + return list_back (&lru_list); + + return list_prev (e); +} + diff --git a/src/vm/frame.h b/src/vm/frame.h index 8e52ec2..93081d3 100644 --- a/src/vm/frame.h +++ b/src/vm/frame.h @@ -1,10 +1,11 @@ #ifndef VM_FRAME_H #define VM_FRAME_H +#include "threads/thread.h" #include "threads/palloc.h" void frame_init (void); -void *frame_alloc (enum palloc_flags); +void *frame_alloc (enum palloc_flags, void *, struct thread *); void frame_free (void *frame); #endif /* vm/frame.h */ diff --git a/src/vm/page.c b/src/vm/page.c new file mode 100644 index 0000000..8ebc71c --- /dev/null +++ b/src/vm/page.c @@ -0,0 +1,20 @@ +#include "page.h" + +/* Updates the 'owner' thread's page table entry for virtual address 'upage' + to have a present bit of 0 and stores the specified swap slot value in the + entry for later retrieval from disk. */ +void +page_set_swap (struct thread *owner, void *upage, size_t swap_slot) +{ + +} + +/* Given that the page with user address 'upage' owned by 'owner' is flagged + to be in the swap disk via the owner's page table, returns its stored + swap slot. Otherwise panics the kernel. */ +size_t +page_get_swap (struct thread *owner, void *upage) +{ + +} + diff --git a/src/vm/page.h b/src/vm/page.h new file mode 100644 index 0000000..7259ca9 --- /dev/null +++ b/src/vm/page.h @@ -0,0 +1,9 @@ +#ifndef VM_PAGE_H +#define VM_PAGE_H + +#include "threads/thread.h" + +void page_set_swap (struct thread *, void *, size_t); +size_t page_get_swap (struct thread *, void *); + +#endif /* vm/frame.h */ From 5c661c2e24bc83c1229516ea193b3c87237c2aa5 Mon Sep 17 00:00:00 2001 From: EDiasAlberto Date: Fri, 29 Nov 2024 23:49:49 +0000 Subject: [PATCH 12/34] Feat: pointer validation checks string across multiple pages and handle kernel page faults --- src/threads/thread.h | 2 -- src/userprog/Make.vars | 4 ++-- src/userprog/exception.c | 20 ++++++++++---------- src/userprog/syscall.c | 38 +++++++++++++++++++++++++------------- 4 files changed, 37 insertions(+), 27 deletions(-) diff --git a/src/threads/thread.h b/src/threads/thread.h index 60f91ce..f031981 100644 --- a/src/threads/thread.h +++ b/src/threads/thread.h @@ -143,9 +143,7 @@ struct thread struct hash open_files; /* Hash Table of FD -> Struct File. */ #endif -#ifdef VM void *curr_esp; -#endif /* Owned by thread.c. */ unsigned magic; /* Detects stack overflow. */ diff --git a/src/userprog/Make.vars b/src/userprog/Make.vars index e4dbb08..23bae3d 100644 --- a/src/userprog/Make.vars +++ b/src/userprog/Make.vars @@ -1,7 +1,7 @@ # -*- makefile -*- -kernel.bin: DEFINES = -DUSERPROG -DFILESYS -KERNEL_SUBDIRS = threads devices lib lib/kernel userprog filesys +kernel.bin: DEFINES = -DUSERPROG -DFILESYS -DVM +KERNEL_SUBDIRS = threads devices lib lib/kernel userprog filesys vm TEST_SUBDIRS = tests/userprog tests/userprog/no-vm tests/filesys/base GRADING_FILE = $(SRCDIR)/tests/userprog/Grading SIMULATOR = --qemu diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 3e3b133..1fcbe61 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -146,19 +146,19 @@ page_fault (struct intr_frame *f) write = (f->error_code & PF_W) != 0; user = (f->error_code & PF_U) != 0; - /* Kernel page fault is further handled by the kernel itself. */ - if (kernel) + if (user && not_present) { - f->eip = (void *)f->eax; + if (try_alloc_new_page (fault_addr, f->esp)) + return; + } + else + { + if (try_alloc_new_page (fault_addr, thread_current ()->curr_esp)) + return; + f->eip = (void *)f->eax; f->eax = 0xffffffff; return; - } - - if (user) - { - if (try_alloc_new_page (fault_addr, f->esp)) - return; - } + } /* To implement virtual memory, delete the rest of the function body, and replace it with code that brings in the page to diff --git a/src/userprog/syscall.c b/src/userprog/syscall.c index da6d77d..4bc34ca 100644 --- a/src/userprog/syscall.c +++ b/src/userprog/syscall.c @@ -10,9 +10,6 @@ #include "threads/synch.h" #include "userprog/process.h" #include "userprog/pagedir.h" -#ifdef VM -#include "vm/stackgrowth.h" -#endif #include #include #include @@ -465,17 +462,17 @@ validate_user_pointer (const void *ptr, size_t size, bool check_write) valid user virtual memory address. */ void *last = ptr + size - 1; if (!is_user_vaddr (last)) - thread_exit (); + syscall_exit (EXIT_FAILURE); ptr = pg_round_down (ptr); while (ptr <= last) { int result; /* Check read access to pointer. */ if ((result = get_user (ptr)) == -1) - thread_exit (); + syscall_exit (EXIT_FAILURE); /* Check write access to pointer (if required). */ if (check_write && !put_user (ptr, result)) - thread_exit (); + syscall_exit (EXIT_FAILURE); ptr += PGSIZE; } } @@ -485,18 +482,33 @@ validate_user_pointer (const void *ptr, size_t size, bool check_write) static void validate_user_string (const char *ptr, bool check_write) { - while (true) + size_t offset = (uintptr_t) ptr % PGSIZE; + + for (;;) { + void *page = pg_round_down (ptr); + + if (!is_user_vaddr (page)) + syscall_exit (EXIT_FAILURE); if (!is_user_vaddr (ptr)) - thread_exit (); + syscall_exit (EXIT_FAILURE); int result; if ((result = get_user ((const uint8_t *)ptr)) == -1) - thread_exit (); + syscall_exit (EXIT_FAILURE); if (check_write && !put_user ((uint8_t *)ptr, result)) - thread_exit (); - if (*ptr == '\0') - return; - ptr++; + syscall_exit (EXIT_FAILURE); + + while (offset < PGSIZE) + { + if (*ptr == '\0') + return; /* We reached the end of the string without issues. */ + + ptr++; + offset++; + } + + offset = 0; + } } From 13de832586e55a4899e0aa1b09abb97ddd98a571 Mon Sep 17 00:00:00 2001 From: EDiasAlberto Date: Fri, 29 Nov 2024 23:52:05 +0000 Subject: [PATCH 13/34] Refactor stack growth code to remove messy conditions --- src/vm/stackgrowth.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/vm/stackgrowth.c b/src/vm/stackgrowth.c index 50bdc39..bc9717c 100644 --- a/src/vm/stackgrowth.c +++ b/src/vm/stackgrowth.c @@ -13,15 +13,7 @@ static bool grow_stack (const void *addr); bool try_alloc_new_page (const void *ptr, const void *esp) { - if (needs_new_page (ptr, esp)) - { - if (!grow_stack (ptr)) - return 0; - else - return 1; - } - else - return 0; + return needs_new_page (ptr, esp) && grow_stack (ptr); } /* Validates a given address for being a stack query and not a generic erroneous From 94adc11f03a386e1e69f00fedecfe682c7317085 Mon Sep 17 00:00:00 2001 From: EDiasAlberto Date: Sat, 30 Nov 2024 03:21:34 +0000 Subject: [PATCH 14/34] Feat: implement page_get_swap and page_set_swap functions --- src/userprog/pagedir.c | 2 +- src/userprog/pagedir.h | 1 + src/vm/page.c | 23 +++++++++++++++++++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/userprog/pagedir.c b/src/userprog/pagedir.c index ef5bbff..f222c18 100644 --- a/src/userprog/pagedir.c +++ b/src/userprog/pagedir.c @@ -53,7 +53,7 @@ pagedir_destroy (uint32_t *pd) on CREATE. If CREATE is true, then a new page table is created and a pointer into it is returned. Otherwise, a null pointer is returned. */ -static uint32_t * +uint32_t * lookup_page (uint32_t *pd, const void *vaddr, bool create) { uint32_t *pt, *pde; diff --git a/src/userprog/pagedir.h b/src/userprog/pagedir.h index 06e45d2..b9ec549 100644 --- a/src/userprog/pagedir.h +++ b/src/userprog/pagedir.h @@ -6,6 +6,7 @@ uint32_t *pagedir_create (void); void pagedir_destroy (uint32_t *pd); +uint32_t *lookup_page (uint32_t *pd, const void *vaddr, bool create); bool pagedir_set_page (uint32_t *pd, void *upage, void *kpage, bool rw); void *pagedir_get_page (uint32_t *pd, const void *upage); void pagedir_clear_page (uint32_t *pd, void *upage); diff --git a/src/vm/page.c b/src/vm/page.c index 8ebc71c..01eeda7 100644 --- a/src/vm/page.c +++ b/src/vm/page.c @@ -1,4 +1,9 @@ #include "page.h" +#include "userprog/pagedir.h" +#include "threads/pte.h" + +#define SWAP_FLAG_BIT 9 +#define ADDR_START_BIT 12 /* Updates the 'owner' thread's page table entry for virtual address 'upage' to have a present bit of 0 and stores the specified swap slot value in the @@ -6,6 +11,17 @@ void page_set_swap (struct thread *owner, void *upage, size_t swap_slot) { + uint32_t *pte = lookup_page (owner->pagedir, upage, false); + *pte &= ~PTE_P; //clears the first bit (present bit) to be 0 + + //ASSERT (swap_slot < (1 << 20)); //not sure if this is needed + + //shifts the swap slot addr to take up bits 31 to 12 + //uses bitwise & to make sure it does not affect flags + //then applies it to pte + *pte |= (1 << SWAP_FLAG_BIT); // sets the 9th bit + uint32_t swap_slot_bits = (swap_slot << ADDR_START_BIT) & PTE_ADDR; + *pte = (*pte & PTE_FLAGS) | swap_slot_bits; } @@ -15,6 +31,13 @@ page_set_swap (struct thread *owner, void *upage, size_t swap_slot) size_t page_get_swap (struct thread *owner, void *upage) { + uint32_t *pte = lookup_page (owner->pagedir, upage, false); + //these should always be checked and true before using this func + ASSERT ((*pte & PTE_P) == 0); + ASSERT ((*pte & (1 << 9)) == 1); + + //masks address bits and returns truncated value + return ((*pte & PTE_ADDR) >> 12); } From 6f85d7642d8d023de8b03dc4efed32c5df7d4340 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Sat, 30 Nov 2024 22:40:13 +0000 Subject: [PATCH 15/34] feat: implement clock (second-chance) page eviction algorithm --- src/Makefile.build | 2 +- src/userprog/process.c | 35 +++++--- src/vm/frame.c | 191 +++++++++++++++++++++++++++++++++-------- src/vm/frame.h | 3 +- src/vm/page.c | 20 +++++ src/vm/page.h | 9 ++ 6 files changed, 209 insertions(+), 51 deletions(-) create mode 100644 src/vm/page.c create mode 100644 src/vm/page.h diff --git a/src/Makefile.build b/src/Makefile.build index 4e57a13..7778f57 100644 --- a/src/Makefile.build +++ b/src/Makefile.build @@ -63,8 +63,8 @@ userprog_SRC += userprog/tss.c # TSS management. # Virtual memory code. vm_SRC += vm/frame.c # Frame table manager. +vm_SRC += vm/page.c # Page table manager. vm_SRC += devices/swap.c # Swap block manager. -#vm_SRC = vm/file.c # Some other file. # Filesystem code. filesys_SRC = filesys/filesys.c # Filesystem core. diff --git a/src/userprog/process.c b/src/userprog/process.c index a8f1d10..aa5091c 100644 --- a/src/userprog/process.c +++ b/src/userprog/process.c @@ -116,7 +116,7 @@ process_execute (const char *cmd) return tid; } -static void *get_usr_kpage (enum palloc_flags flags); +static void *get_usr_kpage (enum palloc_flags flags, void *upage); static void free_usr_kpage (void *kpage); static bool install_page (void *upage, void *kpage, bool writable); @@ -257,12 +257,13 @@ process_init_stack (char *cmd_saveptr, void **esp, char *file_name) int pages_needed = DIV_CEIL (overflow_bytes, PGSIZE); /* Allocate the pages and map them to the user process. */ + void *upage; + uint8_t *kpage; for (int i = 1; i < pages_needed + 1; i++) { - uint8_t *kpage = get_usr_kpage (PAL_ZERO); - if (!install_page (((uint8_t *) PHYS_BASE) - PGSIZE * (i + 1), - kpage, true)) - return false; + upage = ((uint8_t *) PHYS_BASE) - PGSIZE * (i + 1); + kpage = get_usr_kpage (PAL_ZERO, upage); + if (!install_page (upage, kpage, true)) return false; } } @@ -710,7 +711,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage, if (kpage == NULL){ /* Get a new page of memory. */ - kpage = get_usr_kpage (0); + kpage = get_usr_kpage (0, upage); if (kpage == NULL){ return false; } @@ -752,11 +753,13 @@ setup_stack (void **esp) { uint8_t *kpage; bool success = false; - - kpage = get_usr_kpage (PAL_ZERO); + + void *upage = ((uint8_t *) PHYS_BASE) - PGSIZE; + + kpage = get_usr_kpage (PAL_ZERO, upage); if (kpage != NULL) { - success = install_page (((uint8_t *) PHYS_BASE) - PGSIZE, kpage, true); + success = install_page (upage, kpage, true); if (success) *esp = PHYS_BASE; else @@ -765,14 +768,20 @@ setup_stack (void **esp) return success; } -/* Claims a page from the user pool and returns its kernel address, - updating the frame table if VM is enabled. */ +/* Claims a page from the user pool for ownership by the current thread + and returns its kernel address, updating the frame table if VM + is enabled. Requires the intended virtual address for where the page + will be installed. */ static void * -get_usr_kpage (enum palloc_flags flags) +get_usr_kpage (enum palloc_flags flags, void *upage) { void *page; #ifdef VM - page = frame_alloc (flags); + struct thread *t = thread_current (); + if (pagedir_get_page (t->pagedir, upage) != NULL) + return NULL; + else + page = frame_alloc (flags, upage, t); #else page = palloc_get_page (flags | PAL_USER); #endif diff --git a/src/vm/frame.c b/src/vm/frame.c index b030c59..cdb141c 100644 --- a/src/vm/frame.c +++ b/src/vm/frame.c @@ -1,34 +1,42 @@ #include #include #include +#include #include "frame.h" +#include "page.h" #include "threads/malloc.h" +#include "threads/vaddr.h" +#include "userprog/pagedir.h" #include "threads/synch.h" +#include "devices/swap.h" /* Hash table that maps every active frame's kernel virtual address to its corresponding 'frame_metadata'.*/ struct hash frame_table; -/* Linked list of frame_metadata whose pages are predicted to currently - be in the working set of a process. They are not considered for - eviction, but are considered for demotion to the 'inactive' list. */ -struct list active_list; +/* Linked list used to represent the circular queue in the 'clock' + algorithm for page eviction. Iterating from the element that is + currently pointed at by 'next_victim' yields an ordering of the entries + from oldest to newest (in terms of when they were added or checked + for having been referenced by a process). */ +struct list lru_list; -/* Linked list of frame_metadata whose pages are predicted to leave the - working set of their processes soon, so are considered for eviction. - Pages are considered for eviction from the tail end, and are initially - demoted to 'inactive' at the head. */ -struct list inactive_list; +/* The next element in lru_list to be considered for eviction (oldest added + or referenced page in the circular queue). If this page has has an + 'accessed' bit of 0 when considering eviction, then it will be the next + victim. Otherwise, the next element in the queue is similarly considered. */ +struct list_elem *next_victim = NULL; /* Synchronisation variables. */ -/* Ensures mutual exclusion to accessing the 'head' and first element of - 'inactive_list', which is accessed every time a frame is allocated. */ -struct lock inactive_head_lock; +/* Protects access to 'lru_list'. */ +struct lock lru_lock; struct frame_metadata { void *frame; /* The kernel virtual address holding the frame. */ + void *upage; /* The user virtual address pointing to the frame. */ + struct thread *owner; /* Pointer to the thread that owns the frame. */ struct hash_elem hash_elem; /* Tracks the position of the frame metadata within 'frame_table', whose key is the kernel virtual address of the frame. */ @@ -40,56 +48,102 @@ struct frame_metadata hash_hash_func frame_metadata_hash; hash_less_func frame_metadata_less; +static struct list_elem *lru_next (struct list_elem *e); +static struct list_elem *lru_prev (struct list_elem *e); +static struct frame_metadata *get_victim (void); + /* Initialize the frame system by initializing the frame (hash) table with the frame_metadata hashing and comparison functions, as well as initializing - the active & inactive lists. Also initializes the system's synchronisation - primitives. */ + 'lru_list' and its associated synchronisation primitives. */ void frame_init (void) { hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL); - list_init (&active_list); - list_init (&inactive_list); - lock_init (&inactive_head_lock); + list_init (&lru_list); + lock_init (&lru_lock); } +/* TODO: Consider synchronisation more closely (i.e. just for hash + table). */ /* Attempt to allocate a frame for a user process, either by direct allocation of a user page if there is sufficient RAM, or by evicting a currently active page if memory allocated for user processes is fulled and storing it in swap. If swap is full in the former case, panic the kernel. */ void * -frame_alloc (enum palloc_flags flags) +frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) { + struct frame_metadata *frame_metadata; flags |= PAL_USER; - + + lock_acquire (&lru_lock); void *frame = palloc_get_page (flags); + + /* If a frame couldn't be allocated we must be out of main memory. Thus, + obtain a victim page to replace with our page, and swap the victim + into disk. */ if (frame == NULL) { - /* TODO: Find victim page to replace, and swap it with this new page. */ - return NULL; + /* 1. Obtain victim. */ + if (next_victim == NULL) + PANIC ("Couldn't allocate a single page to main memory!\n"); + + struct frame_metadata *victim = get_victim (); + ASSERT (victim != NULL); /* get_victim () should never return null. */ + + /* 2. Swap out victim into disk. */ + size_t swap_slot = swap_out (victim->frame); + page_set_swap (victim->owner, victim->upage, swap_slot); + + /* If zero flag is set, zero out the victim page. */ + if (flags & PAL_ZERO) + memset (victim->frame, 0, PGSIZE); + + /* 3. Indicate that the new frame's metadata will be stored + inside the same structure that stored the victim's metadata. + As both the new frame and the victim frame share the same kernel + virtual address, the hash map need not be updated, and neither + the list_elem value as both share the same lru_list position. */ + frame_metadata = victim; } - struct frame_metadata *frame_metadata = - malloc (sizeof (struct frame_metadata)); - frame_metadata->frame = frame; + /* If sufficient main memory allows the frame to be directly allocated, + we must update the frame table with a new entry, and grow lru_list. */ + else + { + /* Must own lru_lock here, as otherwise there is a race condition + with next_victim either being NULL or uninitialized. */ + frame_metadata = malloc (sizeof (struct frame_metadata)); + frame_metadata->frame = frame; - /* Newly faulted pages begin at the head of the inactive list. */ - lock_acquire (&inactive_head_lock); - list_push_front (&inactive_list, &frame_metadata->list_elem); - lock_release (&inactive_head_lock); + /* Newly allocated frames are pushed to the back of the circular queue + represented by lru_list. Must explicitly handle the case where the + circular queue is empty (when next_victim == NULL). */ + if (next_victim == NULL) + { + list_push_back (&lru_list, &frame_metadata->list_elem); + next_victim = &frame_metadata->list_elem; + } + else + { + struct list_elem *lru_tail = lru_prev (next_victim); + list_insert (lru_tail, &frame_metadata->list_elem); + } - /* Finally, insert frame metadata within the frame table, with the key as its - allocated kernel address. */ - hash_replace (&frame_table, &frame_metadata->hash_elem); + hash_insert (&frame_table, &frame_metadata->hash_elem); + } - return frame; + frame_metadata->upage = upage; + frame_metadata->owner = owner; + lock_release (&lru_lock); + + return frame_metadata->frame; } /* Attempt to deallocate a frame for a user process by removing it from the - frame table as well as active/inactive list, and freeing the underlying - page memory. Panics if the frame isn't active in memory. */ + frame table as well as lru_list, and freeing the underlying page + memory & metadata struct. Panics if the frame isn't active in memory. */ void frame_free (void *frame) { @@ -98,17 +152,58 @@ frame_free (void *frame) struct hash_elem *e = hash_delete (&frame_table, &key_metadata.hash_elem); - if (e == NULL) PANIC ("Attempted to free a frame without a corresponding " - "kernel address!\n"); + if (e == NULL) PANIC ("Attempted to free a frame at kernel address %p, " + "but this address is not allocated!\n", frame); struct frame_metadata *frame_metadata = hash_entry (e, struct frame_metadata, hash_elem); + lock_acquire (&lru_lock); list_remove (&frame_metadata->list_elem); + + /* If we're freeing the frame marked as the next victim, update + next_victim to either be the next least recently used page, or NULL + if no pages are loaded in main memory. */ + if (&frame_metadata->list_elem == next_victim) + { + if (list_empty (&lru_list)) + next_victim = NULL; + else + next_victim = lru_next (next_victim); + } + lock_release (&lru_lock); + free (frame_metadata); palloc_free_page (frame); } +/* TODO: Account for page aliases when checking accessed bit. */ +/* A pre-condition for calling this function is that the calling thread + owns lru_lock and that lru_list is non-empty. */ +static struct frame_metadata * +get_victim (void) + { + struct list_elem *e = next_victim; + struct frame_metadata *frame_metadata; + uint32_t *pd; + void *upage; + for (;;) + { + frame_metadata = list_entry (e, struct frame_metadata, list_elem); + pd = frame_metadata->owner->pagedir; + upage = frame_metadata->upage; + e = lru_next (e); + + if (!pagedir_is_accessed (pd, upage)) + break; + + pagedir_set_accessed (pd, upage, false); + } + + next_victim = e; + return frame_metadata; + } + /* Hash function for frame metadata, used for storing entries in the frame table. */ unsigned @@ -135,3 +230,27 @@ frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_, return a->frame < b->frame; } +/* Returns the next recently used element after the one provided, which + is achieved by iterating through lru_list like a circular queue + (wrapping around the list at the tail). */ +static struct list_elem * +lru_next (struct list_elem *e) +{ + if (!list_empty (&lru_list) && e == list_back (&lru_list)) + return list_front (&lru_list); + + return list_next (e); +} + +/* Returns the previous recently used element after the one provided, which + is achieved by iterating through lru_list like a circular queue + (wrapping around the list at the head). */ +static struct list_elem * +lru_prev (struct list_elem *e) +{ + if (!list_empty (&lru_list) && e == list_front (&lru_list)) + return list_back (&lru_list); + + return list_prev (e); +} + diff --git a/src/vm/frame.h b/src/vm/frame.h index 8e52ec2..93081d3 100644 --- a/src/vm/frame.h +++ b/src/vm/frame.h @@ -1,10 +1,11 @@ #ifndef VM_FRAME_H #define VM_FRAME_H +#include "threads/thread.h" #include "threads/palloc.h" void frame_init (void); -void *frame_alloc (enum palloc_flags); +void *frame_alloc (enum palloc_flags, void *, struct thread *); void frame_free (void *frame); #endif /* vm/frame.h */ diff --git a/src/vm/page.c b/src/vm/page.c new file mode 100644 index 0000000..8ebc71c --- /dev/null +++ b/src/vm/page.c @@ -0,0 +1,20 @@ +#include "page.h" + +/* Updates the 'owner' thread's page table entry for virtual address 'upage' + to have a present bit of 0 and stores the specified swap slot value in the + entry for later retrieval from disk. */ +void +page_set_swap (struct thread *owner, void *upage, size_t swap_slot) +{ + +} + +/* Given that the page with user address 'upage' owned by 'owner' is flagged + to be in the swap disk via the owner's page table, returns its stored + swap slot. Otherwise panics the kernel. */ +size_t +page_get_swap (struct thread *owner, void *upage) +{ + +} + diff --git a/src/vm/page.h b/src/vm/page.h new file mode 100644 index 0000000..7259ca9 --- /dev/null +++ b/src/vm/page.h @@ -0,0 +1,9 @@ +#ifndef VM_PAGE_H +#define VM_PAGE_H + +#include "threads/thread.h" + +void page_set_swap (struct thread *, void *, size_t); +size_t page_get_swap (struct thread *, void *); + +#endif /* vm/frame.h */ From 7ce512305e0fd78efd423cc2d3623dd0c603ba94 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Sun, 1 Dec 2024 00:41:09 +0000 Subject: [PATCH 16/34] fix: remove DVM flag when compiling outside of vm directory --- src/userprog/Make.vars | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/userprog/Make.vars b/src/userprog/Make.vars index 23bae3d..3541b02 100644 --- a/src/userprog/Make.vars +++ b/src/userprog/Make.vars @@ -1,6 +1,6 @@ # -*- makefile -*- -kernel.bin: DEFINES = -DUSERPROG -DFILESYS -DVM +kernel.bin: DEFINES = -DUSERPROG -DFILESYS KERNEL_SUBDIRS = threads devices lib lib/kernel userprog filesys vm TEST_SUBDIRS = tests/userprog tests/userprog/no-vm tests/filesys/base GRADING_FILE = $(SRCDIR)/tests/userprog/Grading From bb16abdc0d21ca064ba01ead9bae40e3979c8fc3 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Sun, 1 Dec 2024 23:30:50 +0000 Subject: [PATCH 17/34] refactor: supplemental page table helper functions follow code style --- src/vm/page.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/vm/page.c b/src/vm/page.c index 01eeda7..6a3bb1a 100644 --- a/src/vm/page.c +++ b/src/vm/page.c @@ -12,17 +12,16 @@ void page_set_swap (struct thread *owner, void *upage, size_t swap_slot) { uint32_t *pte = lookup_page (owner->pagedir, upage, false); - *pte &= ~PTE_P; //clears the first bit (present bit) to be 0 - //ASSERT (swap_slot < (1 << 20)); //not sure if this is needed - - //shifts the swap slot addr to take up bits 31 to 12 - //uses bitwise & to make sure it does not affect flags - //then applies it to pte - *pte |= (1 << SWAP_FLAG_BIT); // sets the 9th bit + /* Store the provided swap slot in the address bits of the page table + entry, truncating excess bits. */ + *pte |= (1 << SWAP_FLAG_BIT); uint32_t swap_slot_bits = (swap_slot << ADDR_START_BIT) & PTE_ADDR; *pte = (*pte & PTE_FLAGS) | swap_slot_bits; + /* Mark page as 'not present' and flag the page directory as having + been modified. */ + pagedir_clear_page (owner->pagedir, upage); } /* Given that the page with user address 'upage' owned by 'owner' is flagged @@ -33,11 +32,10 @@ page_get_swap (struct thread *owner, void *upage) { uint32_t *pte = lookup_page (owner->pagedir, upage, false); - //these should always be checked and true before using this func ASSERT ((*pte & PTE_P) == 0); - ASSERT ((*pte & (1 << 9)) == 1); + ASSERT ((*pte & (1 << SWAP_FLAG_BIT)) != 0); - //masks address bits and returns truncated value - return ((*pte & PTE_ADDR) >> 12); + /* Masks the address bits and returns truncated value. */ + return ((*pte & PTE_ADDR) >> ADDR_START_BIT); } From 05a48cf9c6710908b6bff7fcc78ba475e88739f6 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Sun, 1 Dec 2024 23:36:55 +0000 Subject: [PATCH 18/34] refactor: page fault exception handler follows code style --- src/userprog/exception.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 1fcbe61..2e812ec 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -148,17 +148,18 @@ page_fault (struct intr_frame *f) if (user && not_present) { - if (try_alloc_new_page (fault_addr, f->esp)) + if (try_alloc_new_page (fault_addr, f->esp)) return; } else { if (try_alloc_new_page (fault_addr, thread_current ()->curr_esp)) return; - f->eip = (void *)f->eax; + + f->eip = (void *)f->eax; f->eax = 0xffffffff; return; - } + } /* To implement virtual memory, delete the rest of the function body, and replace it with code that brings in the page to From 6adf2e743b0e6045b721dd3e4d3d0185f2e4f460 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Mon, 2 Dec 2024 19:50:40 +0000 Subject: [PATCH 19/34] refactor: dynamic stack growth functions to follow code style --- src/userprog/exception.c | 4 ++-- src/vm/stackgrowth.c | 50 ++++++++++++++++++++++++---------------- src/vm/stackgrowth.h | 2 +- 3 files changed, 33 insertions(+), 23 deletions(-) diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 2e812ec..8988c0a 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -148,12 +148,12 @@ page_fault (struct intr_frame *f) if (user && not_present) { - if (try_alloc_new_page (fault_addr, f->esp)) + if (handle_stack_fault (fault_addr, f->esp)) return; } else { - if (try_alloc_new_page (fault_addr, thread_current ()->curr_esp)) + if (handle_stack_fault (fault_addr, thread_current ()->curr_esp)) return; f->eip = (void *)f->eax; diff --git a/src/vm/stackgrowth.c b/src/vm/stackgrowth.c index bc9717c..8dae21a 100644 --- a/src/vm/stackgrowth.c +++ b/src/vm/stackgrowth.c @@ -1,5 +1,6 @@ #include #include "stackgrowth.h" +#include "frame.h" #include "threads/palloc.h" #include "threads/thread.h" #include "threads/vaddr.h" @@ -7,44 +8,53 @@ #define MAX_STACK_ACCESS_DIST 32 -static bool needs_new_page (const void *addr, const void *esp); +static bool is_stack_fault (const void *addr, const void *esp); static bool grow_stack (const void *addr); +/* Determine whether a particular page fault occured due to a stack + access below the stack pointer that should induce stack growth, and + if so grow the stack by a single page (capped at MAX_STACK_SIZE). */ bool -try_alloc_new_page (const void *ptr, const void *esp) +handle_stack_fault (const void *ptr, const void *esp) { - return needs_new_page (ptr, esp) && grow_stack (ptr); + return is_stack_fault (ptr, esp) && grow_stack (ptr); } -/* Validates a given address for being a stack query and not a generic erroneous - address - */ +/* Determines whether a particular page fault appears to be caused by + a stack access that should induce dynamic stack growth. Stack size + is capped at MAX_STACK_SIZE. */ static bool -needs_new_page (const void *addr, const void *esp) +is_stack_fault (const void *addr, const void *esp) { - return (is_user_vaddr (addr) && - (uint32_t*)addr >= ((uint32_t*)esp - MAX_STACK_ACCESS_DIST) && - ((PHYS_BASE - pg_round_down (addr)) - <= MAX_STACK_SIZE)); + return (is_user_vaddr (addr) && + (uint32_t*)addr >= ((uint32_t*)esp - MAX_STACK_ACCESS_DIST) && + ((PHYS_BASE - pg_round_down (addr)) <= MAX_STACK_SIZE)); } -/* Extends the stack by the necessary number of pages */ +/* Grows the stack of the process running inside the current thread by a single + page given a user virtual address inside of the page wherein the new section + of the stack should be allocated. */ static bool grow_stack (const void *addr) { struct thread *t = thread_current (); void *last_page = pg_round_down (addr); - uint8_t *new_page = palloc_get_page (PAL_USER | PAL_ZERO); - if ( new_page == NULL) + /* This function should only be called when dealing with a faulting stack + access that induces stack growth, so the provided address shouldn't be + present in a page within the current thread's page directory. */ + ASSERT (pagedir_get_page (t->pagedir, last_page) == NULL); + + uint8_t *new_page = frame_alloc (PAL_ZERO, last_page, t); + if (new_page == NULL) return false; - bool added_page = pagedir_get_page (t->pagedir, last_page) == NULL - && pagedir_set_page (t->pagedir, last_page, new_page, true); - - if (!added_page) { - palloc_free_page (new_page); + if (!pagedir_set_page (t->pagedir, last_page, new_page, true)) + { + frame_free (new_page); return false; } + return true; -} \ No newline at end of file +} + diff --git a/src/vm/stackgrowth.h b/src/vm/stackgrowth.h index acd123e..a19366e 100644 --- a/src/vm/stackgrowth.h +++ b/src/vm/stackgrowth.h @@ -5,6 +5,6 @@ #define MAX_STACK_SIZE 8388608 // (8MB) -bool try_alloc_new_page (const void *ptr, const void *esp); +bool handle_stack_fault (const void *ptr, const void *esp); #endif /* vm/frame.h */ From 6190d1bee631f89a4fe33686374f806b85182569 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Mon, 2 Dec 2024 20:44:54 +0000 Subject: [PATCH 20/34] fix: disable dynamic stack growth when VM flag is disabled --- src/userprog/Make.vars | 2 +- src/userprog/exception.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/userprog/Make.vars b/src/userprog/Make.vars index 3541b02..e4dbb08 100644 --- a/src/userprog/Make.vars +++ b/src/userprog/Make.vars @@ -1,7 +1,7 @@ # -*- makefile -*- kernel.bin: DEFINES = -DUSERPROG -DFILESYS -KERNEL_SUBDIRS = threads devices lib lib/kernel userprog filesys vm +KERNEL_SUBDIRS = threads devices lib lib/kernel userprog filesys TEST_SUBDIRS = tests/userprog tests/userprog/no-vm tests/filesys/base GRADING_FILE = $(SRCDIR)/tests/userprog/Grading SIMULATOR = --qemu diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 8988c0a..cecd143 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -146,6 +146,7 @@ page_fault (struct intr_frame *f) write = (f->error_code & PF_W) != 0; user = (f->error_code & PF_U) != 0; +#ifdef VM if (user && not_present) { if (handle_stack_fault (fault_addr, f->esp)) @@ -160,6 +161,7 @@ page_fault (struct intr_frame *f) f->eax = 0xffffffff; return; } +#endif /* To implement virtual memory, delete the rest of the function body, and replace it with code that brings in the page to From fbcd3c9f1952d3b6c675af6e20a4a9d239518217 Mon Sep 17 00:00:00 2001 From: "Demetriades, Themis" Date: Mon, 2 Dec 2024 20:57:05 +0000 Subject: [PATCH 21/34] ci: include dynamic stack growth tests in VM test pipeline --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 768269b..a82d826 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -37,4 +37,4 @@ test_vm: extends: .pintos_tests variables: DIR: vm - IGNORE: (tests/vm/pt-grow-stack|tests/vm/pt-grow-pusha|tests/vm/pt-big-stk-obj|tests/vm/pt-overflowstk|tests/vm/pt-write-code2|tests/vm/pt-grow-stk-sc|tests/vm/page-linear|tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-read|tests/vm/mmap-close|tests/vm/mmap-overlap|tests/vm/mmap-twice|tests/vm/mmap-write|tests/vm/mmap-exit|tests/vm/mmap-shuffle|tests/vm/mmap-clean|tests/vm/mmap-inherit|tests/vm/mmap-misalign|tests/vm/mmap-null|tests/vm/mmap-over-code|tests/vm/mmap-over-data|tests/vm/mmap-over-stk|tests/vm/mmap-remove) + IGNORE: (tests/vm/pt-overflowstk|tests/vm/page-linear|tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-read|tests/vm/mmap-close|tests/vm/mmap-overlap|tests/vm/mmap-twice|tests/vm/mmap-write|tests/vm/mmap-exit|tests/vm/mmap-shuffle|tests/vm/mmap-clean|tests/vm/mmap-inherit|tests/vm/mmap-misalign|tests/vm/mmap-null|tests/vm/mmap-over-code|tests/vm/mmap-over-data|tests/vm/mmap-over-stk|tests/vm/mmap-remove) From df7d847978c9caaf10500406b4c03c753231d740 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Mon, 2 Dec 2024 21:07:17 +0000 Subject: [PATCH 22/34] fix: remove stack fault checks for page faults outside user non-present addresses --- src/userprog/exception.c | 6 +----- src/vm/stackgrowth.c | 3 +-- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/src/userprog/exception.c b/src/userprog/exception.c index cecd143..2fe8648 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -149,14 +149,10 @@ page_fault (struct intr_frame *f) #ifdef VM if (user && not_present) { - if (handle_stack_fault (fault_addr, f->esp)) - return; + if (handle_stack_fault (fault_addr, f->esp)) return; } else { - if (handle_stack_fault (fault_addr, thread_current ()->curr_esp)) - return; - f->eip = (void *)f->eax; f->eax = 0xffffffff; return; diff --git a/src/vm/stackgrowth.c b/src/vm/stackgrowth.c index 8dae21a..cf44ed5 100644 --- a/src/vm/stackgrowth.c +++ b/src/vm/stackgrowth.c @@ -26,8 +26,7 @@ handle_stack_fault (const void *ptr, const void *esp) static bool is_stack_fault (const void *addr, const void *esp) { - return (is_user_vaddr (addr) && - (uint32_t*)addr >= ((uint32_t*)esp - MAX_STACK_ACCESS_DIST) && + return ((uint32_t*)addr >= ((uint32_t*)esp - MAX_STACK_ACCESS_DIST) && ((PHYS_BASE - pg_round_down (addr)) <= MAX_STACK_SIZE)); } From 08eafcf7ef771ec764e2abcf83764aa679cf229d Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Tue, 3 Dec 2024 16:53:47 +0000 Subject: [PATCH 23/34] feat: implement page swapping --- src/userprog/exception.c | 29 +++++++++++++++++++++++++++-- src/vm/page.c | 12 +++++++++++- src/vm/page.h | 1 + 3 files changed, 39 insertions(+), 3 deletions(-) diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 2fe8648..7b8d3ee 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -1,10 +1,18 @@ #include "userprog/exception.h" #include #include +#include "stdbool.h" #include "userprog/gdt.h" #include "threads/interrupt.h" #include "threads/thread.h" +#include "userprog/pagedir.h" +#ifdef VM #include "vm/stackgrowth.h" +#include "vm/frame.h" +#include "vm/page.h" +#include "devices/swap.h" +#include "threads/vaddr.h" +#endif /* Number of page faults processed. */ static long long page_fault_cnt; @@ -147,9 +155,26 @@ page_fault (struct intr_frame *f) user = (f->error_code & PF_U) != 0; #ifdef VM - if (user && not_present) + if (user) { - if (handle_stack_fault (fault_addr, f->esp)) return; + if (not_present) + { + struct thread *t = thread_current (); + if (page_in_swap (t, fault_addr)) + { + size_t swap_slot = page_get_swap (t, fault_addr); + void *upage = pg_round_down (fault_addr); + void *kpage = frame_alloc (0, upage, t); + swap_in (kpage, swap_slot); + + bool writeable = pagedir_is_writable (t->pagedir, upage); + if (pagedir_set_page (t->pagedir, upage, kpage, writeable)) return; + } + + /* Handle page faults that need to be resolved by dynamic stack growth + by checking if this is such a fault and resolving it appropriately. */ + if (handle_stack_fault (fault_addr, f->esp)) return; + } } else { diff --git a/src/vm/page.c b/src/vm/page.c index 6a3bb1a..3b45b14 100644 --- a/src/vm/page.c +++ b/src/vm/page.c @@ -24,6 +24,16 @@ page_set_swap (struct thread *owner, void *upage, size_t swap_slot) pagedir_clear_page (owner->pagedir, upage); } +/* Returns true iff the page with user address 'upage' owned by 'owner' + is flagged to be in the swap disk via the owner's page table. */ +bool +page_in_swap (struct thread *owner, void *upage) +{ + uint32_t *pte = lookup_page (owner->pagedir, upage, false); + return pte != NULL && + (*pte & (1 << SWAP_FLAG_BIT)) != 0; +} + /* Given that the page with user address 'upage' owned by 'owner' is flagged to be in the swap disk via the owner's page table, returns its stored swap slot. Otherwise panics the kernel. */ @@ -32,8 +42,8 @@ page_get_swap (struct thread *owner, void *upage) { uint32_t *pte = lookup_page (owner->pagedir, upage, false); + ASSERT (pte != NULL); ASSERT ((*pte & PTE_P) == 0); - ASSERT ((*pte & (1 << SWAP_FLAG_BIT)) != 0); /* Masks the address bits and returns truncated value. */ return ((*pte & PTE_ADDR) >> ADDR_START_BIT); diff --git a/src/vm/page.h b/src/vm/page.h index 7259ca9..2b3ca6d 100644 --- a/src/vm/page.h +++ b/src/vm/page.h @@ -4,6 +4,7 @@ #include "threads/thread.h" void page_set_swap (struct thread *, void *, size_t); +bool page_in_swap (struct thread *, void *); size_t page_get_swap (struct thread *, void *); #endif /* vm/frame.h */ From 9a3c8a1c385726b2ab73abb3beaf7b5bb9a80337 Mon Sep 17 00:00:00 2001 From: EDiasAlberto Date: Tue, 3 Dec 2024 20:56:10 +0000 Subject: [PATCH 24/34] fix: grow stack upon page fault in kernel context to support syscall stack growth --- src/userprog/exception.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 7b8d3ee..6c707b1 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -177,7 +177,10 @@ page_fault (struct intr_frame *f) } } else - { + { + /* Allows for stack growth in kernel context, due to syscall failure */ + if (handle_stack_fault (fault_addr, thread_current ()->curr_esp)) return; + f->eip = (void *)f->eax; f->eax = 0xffffffff; return; From 47a7dfae049f5049df7b89838ac0d7868c3cea68 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Tue, 3 Dec 2024 21:47:59 +0000 Subject: [PATCH 25/34] refactor: add comments describing each type of page fault dealt by the page fault handler --- src/userprog/exception.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 6c707b1..7c4f2ad 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -155,11 +155,15 @@ page_fault (struct intr_frame *f) user = (f->error_code & PF_U) != 0; #ifdef VM + struct thread *t = thread_current (); if (user) { if (not_present) { - struct thread *t = thread_current (); + /* Check if the non-present user page is in the swap partition. + If so, swap it back into main memory, updating the PTE for + the faulted virtual address to point to the newly allocated + frame. */ if (page_in_swap (t, fault_addr)) { size_t swap_slot = page_get_swap (t, fault_addr); @@ -171,15 +175,18 @@ page_fault (struct intr_frame *f) if (pagedir_set_page (t->pagedir, upage, kpage, writeable)) return; } - /* Handle page faults that need to be resolved by dynamic stack growth - by checking if this is such a fault and resolving it appropriately. */ + /* Handle user page faults that need to be resolved by dynamic + stack growth by checking if this is such a fault and responding + accordingly. */ if (handle_stack_fault (fault_addr, f->esp)) return; } } else { - /* Allows for stack growth in kernel context, due to syscall failure */ - if (handle_stack_fault (fault_addr, thread_current ()->curr_esp)) return; + /* Handle kernel page faults that need to be resolved by dynamic stack + growth by checking if this is such a fault and responding + accordingly. */ + if (not_present && handle_stack_fault (fault_addr, t->curr_esp)) return; f->eip = (void *)f->eax; f->eax = 0xffffffff; From 1b73e415d7cf0d65f58a63631703bf2ddb133e67 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Wed, 4 Dec 2024 15:02:49 +0000 Subject: [PATCH 26/34] fix: invalidate PTEs of evicted pages before eviction occurs to prevent modificationof pages mid-eviction --- src/userprog/pagedir.c | 3 +-- src/userprog/pagedir.h | 1 + src/vm/frame.c | 7 +++++++ src/vm/page.c | 8 +++----- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/userprog/pagedir.c b/src/userprog/pagedir.c index f222c18..886bb25 100644 --- a/src/userprog/pagedir.c +++ b/src/userprog/pagedir.c @@ -7,7 +7,6 @@ #include "threads/palloc.h" static uint32_t *active_pd (void); -static void invalidate_pagedir (uint32_t *); /* Creates a new page directory that has mappings for kernel virtual addresses, but none for user virtual addresses. @@ -278,7 +277,7 @@ active_pd (void) This function invalidates the TLB if PD is the active page directory. (If PD is not active then its entries are not in the TLB, so there is no need to invalidate anything.) */ -static void +void invalidate_pagedir (uint32_t *pd) { if (active_pd () == pd) diff --git a/src/userprog/pagedir.h b/src/userprog/pagedir.h index b9ec549..6b8fd26 100644 --- a/src/userprog/pagedir.h +++ b/src/userprog/pagedir.h @@ -17,5 +17,6 @@ void pagedir_set_accessed (uint32_t *pd, const void *upage, bool accessed); bool pagedir_is_writable (uint32_t *pd, const void *upage); void pagedir_set_writable (uint32_t *pd, const void *upage, bool writable); void pagedir_activate (uint32_t *pd); +void invalidate_pagedir (uint32_t *pd); #endif /* userprog/pagedir.h */ diff --git a/src/vm/frame.c b/src/vm/frame.c index cdb141c..0f515ac 100644 --- a/src/vm/frame.c +++ b/src/vm/frame.c @@ -93,6 +93,13 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) ASSERT (victim != NULL); /* get_victim () should never return null. */ /* 2. Swap out victim into disk. */ + /* Mark page as 'not present' and flag the page directory as having + been modified *before* eviction begins to prevent the owner of the + victim page from accessing/modifying it mid-eviction. */ + pagedir_clear_page (owner->pagedir, upage); + + // TODO: Lock PTE of victim page for victim process. + size_t swap_slot = swap_out (victim->frame); page_set_swap (victim->owner, victim->upage, swap_slot); diff --git a/src/vm/page.c b/src/vm/page.c index 3b45b14..1f4ef42 100644 --- a/src/vm/page.c +++ b/src/vm/page.c @@ -6,8 +6,8 @@ #define ADDR_START_BIT 12 /* Updates the 'owner' thread's page table entry for virtual address 'upage' - to have a present bit of 0 and stores the specified swap slot value in the - entry for later retrieval from disk. */ + to flag the page as being stored in swap, and stores the specified swap slot + value in the entry at the address bits for later retrieval from disk. */ void page_set_swap (struct thread *owner, void *upage, size_t swap_slot) { @@ -19,9 +19,7 @@ page_set_swap (struct thread *owner, void *upage, size_t swap_slot) uint32_t swap_slot_bits = (swap_slot << ADDR_START_BIT) & PTE_ADDR; *pte = (*pte & PTE_FLAGS) | swap_slot_bits; - /* Mark page as 'not present' and flag the page directory as having - been modified. */ - pagedir_clear_page (owner->pagedir, upage); + invalidate_pagedir (owner->pagedir); } /* Returns true iff the page with user address 'upage' owned by 'owner' From fb73d694bf4d640cb58029e398377678d6fa4013 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Wed, 4 Dec 2024 16:41:13 +0000 Subject: [PATCH 27/34] fix: frame allocation now invalidates the victim process page directory, not the caller's --- src/vm/frame.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vm/frame.c b/src/vm/frame.c index 0f515ac..6d401b0 100644 --- a/src/vm/frame.c +++ b/src/vm/frame.c @@ -96,7 +96,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) /* Mark page as 'not present' and flag the page directory as having been modified *before* eviction begins to prevent the owner of the victim page from accessing/modifying it mid-eviction. */ - pagedir_clear_page (owner->pagedir, upage); + pagedir_clear_page (victim->owner->pagedir, victim->upage); // TODO: Lock PTE of victim page for victim process. From 4bf6914cfa59724ef987fe9fdd007280cbc6e0be Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Wed, 4 Dec 2024 16:45:31 +0000 Subject: [PATCH 28/34] feat: incorporate lazy-loading data & helpers into supplemental page table --- src/threads/thread.h | 4 ++ src/userprog/process.c | 3 +- src/userprog/process.h | 2 + src/vm/page.c | 124 ++++++++++++++++++++++++++++++++++++++--- src/vm/page.h | 29 ++++++++++ 5 files changed, 152 insertions(+), 10 deletions(-) diff --git a/src/threads/thread.h b/src/threads/thread.h index f031981..eb4e9a6 100644 --- a/src/threads/thread.h +++ b/src/threads/thread.h @@ -143,6 +143,10 @@ struct thread struct hash open_files; /* Hash Table of FD -> Struct File. */ #endif +#ifdef VM + struct hash pages; /* Table of open user pages. */ +#endif + void *curr_esp; /* Owned by thread.c. */ diff --git a/src/userprog/process.c b/src/userprog/process.c index aa5091c..85ebee5 100644 --- a/src/userprog/process.c +++ b/src/userprog/process.c @@ -118,7 +118,6 @@ process_execute (const char *cmd) static void *get_usr_kpage (enum palloc_flags flags, void *upage); static void free_usr_kpage (void *kpage); -static bool install_page (void *upage, void *kpage, bool writable); static bool process_init_stack (char *cmd_saveptr, void **esp, char *file_name); static void *push_to_stack (void **esp, void *data, size_t data_size); @@ -809,7 +808,7 @@ free_usr_kpage (void *kpage) with palloc_get_page(). Returns true on success, false if UPAGE is already mapped or if memory allocation fails. */ -static bool +bool install_page (void *upage, void *kpage, bool writable) { struct thread *t = thread_current (); diff --git a/src/userprog/process.h b/src/userprog/process.h index 688cd2a..7cf3df4 100644 --- a/src/userprog/process.h +++ b/src/userprog/process.h @@ -8,4 +8,6 @@ int process_wait (tid_t); void process_exit (void); void process_activate (void); +bool install_page (void *upage, void *kpage, bool writable); + #endif /* userprog/process.h */ diff --git a/src/vm/page.c b/src/vm/page.c index 1f4ef42..7133fca 100644 --- a/src/vm/page.c +++ b/src/vm/page.c @@ -1,25 +1,133 @@ #include "page.h" -#include "userprog/pagedir.h" +#include +#include +#include "filesys/file.h" #include "threads/pte.h" +#include "threads/malloc.h" +#include "threads/palloc.h" +#include "userprog/process.h" +#include "userprog/pagedir.h" +#include "vm/frame.h" #define SWAP_FLAG_BIT 9 #define ADDR_START_BIT 12 +/* Hashing function needed for the SPT table. Returns a hash for an entry, + based on its upage. */ +unsigned +page_hash (const struct hash_elem *e, UNUSED void *aux) +{ + struct page_entry *page = hash_entry (e, struct page_entry, elem); + return hash_ptr (page->upage); +} + +/* Comparator function for the SPT table. Compares two entries based on their + upages. */ +bool +page_less (const struct hash_elem *a_, const struct hash_elem *b_, + void *aux UNUSED) +{ + const struct page_entry *a = hash_entry (a_, struct page_entry, elem); + const struct page_entry *b = hash_entry (b_, struct page_entry, elem); + + return a->upage < b->upage; +} + +/* Allocate and insert a new page entry into the thread's page table. */ +struct page_entry * +page_insert (struct file *file, off_t ofs, void *upage, uint32_t read_bytes, + uint32_t zero_bytes, bool writable, enum page_type type) +{ + struct page_entry *page = malloc(sizeof (struct page_entry)); + if (page == NULL) + return NULL; + + page->file = file; + page->offset = ofs; + page->upage = upage; + page->read_bytes = read_bytes; + page->zero_bytes = zero_bytes; + page->writable = writable; + page->type = type; + + hash_insert (&thread_current ()->pages, &page->elem); + return page; +} + +/* Gets a page_entry from the starting address of the page. Returns NULL if no + such page_entry exists in the hash map.*/ +struct page_entry * +page_get (void *upage) +{ + struct page_entry fake_page_entry; + fake_page_entry.upage = upage; + + struct hash_elem *e + = hash_find (&thread_current ()->pages, &fake_page_entry.elem); + + if (e == NULL) + return NULL; + + return hash_entry (e, struct page_entry, elem); +} + +bool +page_load (struct page_entry *page, bool writable) +{ + /* Allocate a frame for the page. If a frame allocation fails, then + frame_alloc should try to evict a page. If it is still NULL, the OS + panics as this should not happen if eviction is working correctly. */ + void *frame = frame_alloc (0, page->upage, thread_current ()); + if (frame == NULL) + PANIC ("Could not allocate a frame to load page into memory."); + + /* Map the page to the frame. */ + if (!install_page (page->upage, frame, writable)) + { + frame_free (frame); + return false; + } + + /* Move the file pointer to the correct location in the file. Then, read the + data from the file into the frame. Checks that we were able to read the + expected number of bytes. */ + file_seek (page->file, page->offset); + if (file_read (page->file, frame, page->read_bytes) != (int) page->read_bytes) + { + frame_free (frame); + return false; + } + + /* Zero out the remaining bytes in the frame. */ + memset (frame + page->read_bytes, 0, page->zero_bytes); + + /* Mark the page as loaded successfully. */ + return true; +} + +/* Function to clean up a page_entry. Given the elem of that page_entry, frees + the page_entry itself. */ +void +page_cleanup (struct hash_elem *e, void *aux UNUSED) +{ + free (hash_entry (e, struct page_entry, elem)); +} + /* Updates the 'owner' thread's page table entry for virtual address 'upage' to flag the page as being stored in swap, and stores the specified swap slot value in the entry at the address bits for later retrieval from disk. */ void page_set_swap (struct thread *owner, void *upage, size_t swap_slot) { - uint32_t *pte = lookup_page (owner->pagedir, upage, false); + uint32_t *pte = lookup_page (owner->pagedir, upage, false); - /* Store the provided swap slot in the address bits of the page table - entry, truncating excess bits. */ - *pte |= (1 << SWAP_FLAG_BIT); - uint32_t swap_slot_bits = (swap_slot << ADDR_START_BIT) & PTE_ADDR; - *pte = (*pte & PTE_FLAGS) | swap_slot_bits; + /* Store the provided swap slot in the address bits of the page table + entry, truncating excess bits. */ + *pte |= (1 << SWAP_FLAG_BIT); + uint32_t swap_slot_bits = (swap_slot << ADDR_START_BIT) & PTE_ADDR; + *pte = (*pte & PTE_FLAGS) | swap_slot_bits; - invalidate_pagedir (owner->pagedir); + invalidate_pagedir (owner->pagedir); } /* Returns true iff the page with user address 'upage' owned by 'owner' diff --git a/src/vm/page.h b/src/vm/page.h index 2b3ca6d..a7b39fe 100644 --- a/src/vm/page.h +++ b/src/vm/page.h @@ -2,7 +2,36 @@ #define VM_PAGE_H #include "threads/thread.h" +#include "filesys/off_t.h" +enum page_type { + PAGE_EXECUTABLE, + PAGE_EMPTY +}; + +struct page_entry { + enum page_type type; /* Type of Data that should go into the page */ + void *upage; /* Start Address of the User Page (Key of hash table). */ + + /* File Data */ + struct file *file; /* Pointer to the file for executables. */ + off_t offset; /* Offset of the page content within the file. */ + uint32_t read_bytes; /* Number of bytes to read within the page. */ + uint32_t zero_bytes; /* Number of bytes to zero within the page. */ + bool writable; /* Flag for whether this page is writable or not. */ + + struct hash_elem elem; /* An elem for the hash table. */ +}; + +unsigned page_hash (const struct hash_elem *e, void *aux); +bool page_less (const struct hash_elem *a_, const struct hash_elem *b_, + void *aux); +struct page_entry *page_insert (struct file *file, off_t ofs, void *upage, + uint32_t read_bytes, uint32_t zero_bytes, + bool writable, enum page_type type); +struct page_entry *page_get (void *upage); +bool page_load (struct page_entry *page, bool writable); +void page_cleanup (struct hash_elem *e, void *aux); void page_set_swap (struct thread *, void *, size_t); bool page_in_swap (struct thread *, void *); size_t page_get_swap (struct thread *, void *); From 1e236a5c47c44040ddfc720a5307286ebe327328 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Wed, 4 Dec 2024 19:11:37 +0000 Subject: [PATCH 29/34] Merge branch 'vm/lazy-loading' into vm/page-swap-synch --- src/threads/thread.c | 4 +++- src/userprog/exception.c | 43 ++++++++++++++++++++++++++++++++++-- src/userprog/exception.h | 4 ++++ src/userprog/process.c | 47 +++++++++------------------------------- src/userprog/syscall.c | 2 +- src/vm/frame.c | 1 - src/vm/page.c | 1 - 7 files changed, 59 insertions(+), 43 deletions(-) diff --git a/src/threads/thread.c b/src/threads/thread.c index 91a12b5..c2944cc 100644 --- a/src/threads/thread.c +++ b/src/threads/thread.c @@ -15,6 +15,7 @@ #include "threads/switch.h" #include "threads/synch.h" #include "threads/vaddr.h" +#include "vm/page.h" #ifdef USERPROG #include "userprog/process.h" #include "userprog/syscall.h" @@ -264,7 +265,8 @@ thread_create (const char *name, int priority, if (!hash_init (&t->open_files, fd_hash, fd_less, NULL) || !hash_init (&t->child_results, process_result_hash, - process_result_less, t)) + process_result_less, t) + || !hash_init (&t->pages, page_hash, page_less, NULL)) { palloc_free_page (t); free (t->result); diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 7c4f2ad..ed6a6ba 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -5,13 +5,13 @@ #include "userprog/gdt.h" #include "threads/interrupt.h" #include "threads/thread.h" -#include "userprog/pagedir.h" #ifdef VM #include "vm/stackgrowth.h" #include "vm/frame.h" #include "vm/page.h" #include "devices/swap.h" #include "threads/vaddr.h" +#include "userprog/pagedir.h" #endif /* Number of page faults processed. */ @@ -19,6 +19,7 @@ static long long page_fault_cnt; static void kill (struct intr_frame *); static void page_fault (struct intr_frame *); +bool try_fetch_page (void *upage, bool write); /* Registers handlers for interrupts that can be caused by user programs. @@ -156,6 +157,7 @@ page_fault (struct intr_frame *f) #ifdef VM struct thread *t = thread_current (); + void *upage = pg_round_down (fault_addr); if (user) { if (not_present) @@ -167,7 +169,6 @@ page_fault (struct intr_frame *f) if (page_in_swap (t, fault_addr)) { size_t swap_slot = page_get_swap (t, fault_addr); - void *upage = pg_round_down (fault_addr); void *kpage = frame_alloc (0, upage, t); swap_in (kpage, swap_slot); @@ -192,6 +193,15 @@ page_fault (struct intr_frame *f) f->eax = 0xffffffff; return; } + + /* If the fault address is in a user page that is not present, then it might + just need to be lazily loaded. So, we check our SPT to see if the page + is expected to have data loaded in memory. */ + if (not_present && is_user_vaddr (upage) && upage != NULL) + { + if (try_fetch_page (upage, write)) + return; + } #endif /* To implement virtual memory, delete the rest of the function @@ -205,3 +215,32 @@ page_fault (struct intr_frame *f) kill (f); } +bool +try_fetch_page (void *upage, bool write) +{ + /* Check if the page is in the supplemental page table. That is, it is a page + that is expected to be in memory. */ + struct page_entry *page = page_get (upage); + if (page == NULL) + return false; + + /* An attempt to write to a non-writeable should fail. */ + if (write && !page->writable) + return false; + + /* Load the page into memory based on the type of data it is expecting. */ + bool success = false; + switch (page->type) { + case PAGE_EXECUTABLE: + success = page_load (page, page->writable); + break; + default: + return false; + } + + if (success && page->writable && + !pagedir_is_writable(thread_current()->pagedir, upage)) + pagedir_set_writable(thread_current()->pagedir, upage, true); + + return success; +} diff --git a/src/userprog/exception.h b/src/userprog/exception.h index f83e615..663db4b 100644 --- a/src/userprog/exception.h +++ b/src/userprog/exception.h @@ -1,6 +1,8 @@ #ifndef USERPROG_EXCEPTION_H #define USERPROG_EXCEPTION_H +#include + /* Page fault error code bits that describe the cause of the exception. */ #define PF_P 0x1 /* 0: not-present page. 1: access rights violation. */ #define PF_W 0x2 /* 0: read, 1: write. */ @@ -8,5 +10,7 @@ void exception_init (void); void exception_print_stats (void); +bool +try_fetch_page (void *upage, bool write); #endif /* userprog/exception.h */ diff --git a/src/userprog/process.c b/src/userprog/process.c index 85ebee5..f4a7439 100644 --- a/src/userprog/process.c +++ b/src/userprog/process.c @@ -24,6 +24,7 @@ #include "threads/vaddr.h" #include "threads/synch.h" #include "devices/timer.h" +#include "vm/page.h" #ifdef VM #include "vm/frame.h" #endif @@ -118,6 +119,7 @@ process_execute (const char *cmd) static void *get_usr_kpage (enum palloc_flags flags, void *upage); static void free_usr_kpage (void *kpage); +bool install_page (void *upage, void *kpage, bool writable); static bool process_init_stack (char *cmd_saveptr, void **esp, char *file_name); static void *push_to_stack (void **esp, void *data, size_t data_size); @@ -363,6 +365,7 @@ process_exit (void) /* Clean up all open files */ hash_destroy (&cur->open_files, fd_cleanup); + hash_destroy (&cur->pages, page_cleanup); /* Close the executable file, implicitly allowing it to be written to. */ if (cur->exec_file != NULL) @@ -620,7 +623,6 @@ load (const char *file_name, void (**eip) (void), void **esp) done: /* We arrive here whether the load is successful or not. */ - file_close (file); lock_release (&filesys_lock); return success; } @@ -688,58 +690,29 @@ validate_segment (const struct Elf32_Phdr *phdr, struct file *file) or disk read error occurs. */ static bool load_segment (struct file *file, off_t ofs, uint8_t *upage, - uint32_t read_bytes, uint32_t zero_bytes, bool writable) + uint32_t read_bytes, uint32_t zero_bytes, bool writable) { ASSERT ((read_bytes + zero_bytes) % PGSIZE == 0); ASSERT (pg_ofs (upage) == 0); ASSERT (ofs % PGSIZE == 0); - file_seek (file, ofs); - while (read_bytes > 0 || zero_bytes > 0) + while (read_bytes > 0 || zero_bytes > 0) { /* Calculate how to fill this page. We will read PAGE_READ_BYTES bytes from FILE and zero the final PAGE_ZERO_BYTES bytes. */ size_t page_read_bytes = read_bytes < PGSIZE ? read_bytes : PGSIZE; size_t page_zero_bytes = PGSIZE - page_read_bytes; - - /* Check if virtual page already allocated */ - struct thread *t = thread_current (); - uint8_t *kpage = pagedir_get_page (t->pagedir, upage); - - if (kpage == NULL){ - - /* Get a new page of memory. */ - kpage = get_usr_kpage (0, upage); - if (kpage == NULL){ - return false; - } - - /* Add the page to the process's address space. */ - if (!install_page (upage, kpage, writable)) - { - free_usr_kpage (kpage); - return false; - } - - } else { - - /* Check if writable flag for the page should be updated */ - if(writable && !pagedir_is_writable(t->pagedir, upage)){ - pagedir_set_writable(t->pagedir, upage, writable); - } - - } - /* Load data into the page. */ - if (file_read (file, kpage, page_read_bytes) != (int) page_read_bytes){ - return false; - } - memset (kpage + page_read_bytes, 0, page_zero_bytes); + /* Add the page metadata to the SPT to be lazy loaded later on */ + if (page_insert (file, ofs, upage, page_read_bytes, page_zero_bytes, + writable, PAGE_EXECUTABLE) == NULL) + return false; /* Advance. */ read_bytes -= page_read_bytes; zero_bytes -= page_zero_bytes; + ofs += PGSIZE; upage += PGSIZE; } return true; diff --git a/src/userprog/syscall.c b/src/userprog/syscall.c index 4bc34ca..2ccf3a2 100644 --- a/src/userprog/syscall.c +++ b/src/userprog/syscall.c @@ -536,4 +536,4 @@ put_user (uint8_t *udst, uint8_t byte) : "=&a"(error_code), "=m"(*udst) : "q"(byte)); return error_code != -1; -} \ No newline at end of file +} diff --git a/src/vm/frame.c b/src/vm/frame.c index 6d401b0..98339f8 100644 --- a/src/vm/frame.c +++ b/src/vm/frame.c @@ -260,4 +260,3 @@ lru_prev (struct list_elem *e) return list_prev (e); } - diff --git a/src/vm/page.c b/src/vm/page.c index 7133fca..79c176f 100644 --- a/src/vm/page.c +++ b/src/vm/page.c @@ -154,4 +154,3 @@ page_get_swap (struct thread *owner, void *upage) /* Masks the address bits and returns truncated value. */ return ((*pte & PTE_ADDR) >> ADDR_START_BIT); } - From 723055f4855ed8f2237cb9d4629518a5b76cbd0f Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Wed, 4 Dec 2024 21:33:21 +0000 Subject: [PATCH 30/34] fix: only use lazy loading if VM flag is enabled --- src/threads/thread.c | 13 ++++++--- src/userprog/exception.c | 3 +++ src/userprog/process.c | 57 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 4 deletions(-) diff --git a/src/threads/thread.c b/src/threads/thread.c index c2944cc..b512aee 100644 --- a/src/threads/thread.c +++ b/src/threads/thread.c @@ -259,14 +259,19 @@ thread_create (const char *name, int priority, return TID_ERROR; } +#define USERPROG #ifdef USERPROG /* Initialize the thread's file descriptor table. */ t->fd_counter = MINIMUM_USER_FD; - if (!hash_init (&t->open_files, fd_hash, fd_less, NULL) - || !hash_init (&t->child_results, process_result_hash, - process_result_less, t) - || !hash_init (&t->pages, page_hash, page_less, NULL)) + bool success = hash_init (&t->open_files, fd_hash, fd_less, NULL); + success = success && hash_init (&t->child_results, process_result_hash, + process_result_less, t); +#ifdef VM + success = success && hash_init (&t->pages, page_hash, page_less, NULL); +#endif + + if (!success) { palloc_free_page (t); free (t->result); diff --git a/src/userprog/exception.c b/src/userprog/exception.c index ed6a6ba..4e104d5 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -215,6 +215,7 @@ page_fault (struct intr_frame *f) kill (f); } +#ifdef VM bool try_fetch_page (void *upage, bool write) { @@ -244,3 +245,5 @@ try_fetch_page (void *upage, bool write) return success; } +#endif + diff --git a/src/userprog/process.c b/src/userprog/process.c index f4a7439..9ef5282 100644 --- a/src/userprog/process.c +++ b/src/userprog/process.c @@ -365,7 +365,9 @@ process_exit (void) /* Clean up all open files */ hash_destroy (&cur->open_files, fd_cleanup); +#ifdef VM hash_destroy (&cur->pages, page_cleanup); +#endif /* Close the executable file, implicitly allowing it to be written to. */ if (cur->exec_file != NULL) @@ -623,6 +625,9 @@ load (const char *file_name, void (**eip) (void), void **esp) done: /* We arrive here whether the load is successful or not. */ +#ifndef VM + file_close (file); +#endif lock_release (&filesys_lock); return success; } @@ -696,6 +701,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage, ASSERT (pg_ofs (upage) == 0); ASSERT (ofs % PGSIZE == 0); +#ifdef VM while (read_bytes > 0 || zero_bytes > 0) { /* Calculate how to fill this page. @@ -716,6 +722,57 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage, upage += PGSIZE; } return true; +#else + file_seek (file, ofs); + while (read_bytes > 0 || zero_bytes > 0) + { + /* Calculate how to fill this page. + We will read PAGE_READ_BYTES bytes from FILE + and zero the final PAGE_ZERO_BYTES bytes. */ + size_t page_read_bytes = read_bytes < PGSIZE ? read_bytes : PGSIZE; + size_t page_zero_bytes = PGSIZE - page_read_bytes; + + /* Check if virtual page already allocated */ + struct thread *t = thread_current (); + uint8_t *kpage = pagedir_get_page (t->pagedir, upage); + + if (kpage == NULL){ + + /* Get a new page of memory. */ + kpage = get_usr_kpage (0, upage); + if (kpage == NULL){ + return false; + } + + /* Add the page to the process's address space. */ + if (!install_page (upage, kpage, writable)) + { + free_usr_kpage (kpage); + return false; + } + + } else { + + /* Check if writable flag for the page should be updated */ + if(writable && !pagedir_is_writable(t->pagedir, upage)){ + pagedir_set_writable(t->pagedir, upage, writable); + } + + } + + /* Load data into the page. */ + if (file_read (file, kpage, page_read_bytes) != (int) page_read_bytes){ + return false; + } + memset (kpage + page_read_bytes, 0, page_zero_bytes); + + /* Advance. */ + read_bytes -= page_read_bytes; + zero_bytes -= page_zero_bytes; + upage += PGSIZE; + } + return true; +#endif } /* Create a minimal stack by mapping a zeroed page at the top of From 60faf995ea664d9d76bb8c919564f5383642271d Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Wed, 4 Dec 2024 22:21:31 +0000 Subject: [PATCH 31/34] fix: lazy load executable files of user processes even when accessed in a kernel context --- src/userprog/exception.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 4e104d5..39a8173 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -158,6 +158,16 @@ page_fault (struct intr_frame *f) #ifdef VM struct thread *t = thread_current (); void *upage = pg_round_down (fault_addr); + + /* If the fault address is in a user page that is not present, then it might + be an executable file page that needs to be lazily loaded. So, we check the + SPT to determine if this is the case, and if so load the page from disk. */ + if (not_present && is_user_vaddr (upage)) + { + if (try_fetch_page (upage, write)) + return; + } + if (user) { if (not_present) @@ -193,15 +203,6 @@ page_fault (struct intr_frame *f) f->eax = 0xffffffff; return; } - - /* If the fault address is in a user page that is not present, then it might - just need to be lazily loaded. So, we check our SPT to see if the page - is expected to have data loaded in memory. */ - if (not_present && is_user_vaddr (upage) && upage != NULL) - { - if (try_fetch_page (upage, write)) - return; - } #endif /* To implement virtual memory, delete the rest of the function From 0288e132069cfa138d43fe953b293e91f5afd41d Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Wed, 4 Dec 2024 23:46:31 +0000 Subject: [PATCH 32/34] fix: don't discriminate between user and kernel page fault contexts for stack growth, lazy loading, and swapping --- src/userprog/exception.c | 65 ++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 36 deletions(-) diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 39a8173..0edf38e 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -156,49 +156,42 @@ page_fault (struct intr_frame *f) user = (f->error_code & PF_U) != 0; #ifdef VM - struct thread *t = thread_current (); void *upage = pg_round_down (fault_addr); - - /* If the fault address is in a user page that is not present, then it might - be an executable file page that needs to be lazily loaded. So, we check the - SPT to determine if this is the case, and if so load the page from disk. */ - if (not_present && is_user_vaddr (upage)) + if (not_present && is_user_vaddr(upage)) { + struct thread *t = thread_current (); + void *esp = user ? f->esp : t->curr_esp; + + /* Check if the non-present user page is in the swap partition. + If so, swap it back into main memory, updating the PTE for + the faulted virtual address to point to the newly allocated + frame. */ + if (page_in_swap (t, fault_addr)) + { + size_t swap_slot = page_get_swap (t, fault_addr); + void *kpage = frame_alloc (0, upage, t); + swap_in (kpage, swap_slot); + + bool writeable = pagedir_is_writable (t->pagedir, upage); + if (pagedir_set_page (t->pagedir, upage, kpage, writeable)) return; + } + + /* Handle user page faults that need to be resolved by dynamic + stack growth by checking if this is such a fault and responding + accordingly. */ + if (handle_stack_fault (fault_addr, esp)) return; + + /* Handle user page faults that need to be resolved by lazy loading + of executable files by checking if they contain entries in the + SPT hash map and responding accordingly. */ if (try_fetch_page (upage, write)) return; } - if (user) + /* Allows for page faults within a kernel context to communicate with + user pages for sending error codes. */ + if (!user) { - if (not_present) - { - /* Check if the non-present user page is in the swap partition. - If so, swap it back into main memory, updating the PTE for - the faulted virtual address to point to the newly allocated - frame. */ - if (page_in_swap (t, fault_addr)) - { - size_t swap_slot = page_get_swap (t, fault_addr); - void *kpage = frame_alloc (0, upage, t); - swap_in (kpage, swap_slot); - - bool writeable = pagedir_is_writable (t->pagedir, upage); - if (pagedir_set_page (t->pagedir, upage, kpage, writeable)) return; - } - - /* Handle user page faults that need to be resolved by dynamic - stack growth by checking if this is such a fault and responding - accordingly. */ - if (handle_stack_fault (fault_addr, f->esp)) return; - } - } - else - { - /* Handle kernel page faults that need to be resolved by dynamic stack - growth by checking if this is such a fault and responding - accordingly. */ - if (not_present && handle_stack_fault (fault_addr, t->curr_esp)) return; - f->eip = (void *)f->eax; f->eax = 0xffffffff; return; From 19d5b02341e8d93ab985e95ff7aa6c6c7c5c66a3 Mon Sep 17 00:00:00 2001 From: Themis Demetriades Date: Wed, 4 Dec 2024 23:48:51 +0000 Subject: [PATCH 33/34] fix: remove use of USERPROG compiler flag specific code when the flag is disabled --- src/threads/thread.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/threads/thread.c b/src/threads/thread.c index b512aee..5a292b5 100644 --- a/src/threads/thread.c +++ b/src/threads/thread.c @@ -259,7 +259,6 @@ thread_create (const char *name, int priority, return TID_ERROR; } -#define USERPROG #ifdef USERPROG /* Initialize the thread's file descriptor table. */ t->fd_counter = MINIMUM_USER_FD; From f06c91cf0d7c8f545d82a9d3adf4c2a8e6e14aee Mon Sep 17 00:00:00 2001 From: "Demetriades, Themis" Date: Thu, 5 Dec 2024 00:29:49 +0000 Subject: [PATCH 34/34] ci: include linear page tests in VM test pipeline --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a82d826..b4b143c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -37,4 +37,4 @@ test_vm: extends: .pintos_tests variables: DIR: vm - IGNORE: (tests/vm/pt-overflowstk|tests/vm/page-linear|tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-read|tests/vm/mmap-close|tests/vm/mmap-overlap|tests/vm/mmap-twice|tests/vm/mmap-write|tests/vm/mmap-exit|tests/vm/mmap-shuffle|tests/vm/mmap-clean|tests/vm/mmap-inherit|tests/vm/mmap-misalign|tests/vm/mmap-null|tests/vm/mmap-over-code|tests/vm/mmap-over-data|tests/vm/mmap-over-stk|tests/vm/mmap-remove) + IGNORE: (tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-read|tests/vm/mmap-close|tests/vm/mmap-overlap|tests/vm/mmap-twice|tests/vm/mmap-write|tests/vm/mmap-exit|tests/vm/mmap-shuffle|tests/vm/mmap-clean|tests/vm/mmap-inherit|tests/vm/mmap-misalign|tests/vm/mmap-null|tests/vm/mmap-over-code|tests/vm/mmap-over-data|tests/vm/mmap-over-stk|tests/vm/mmap-remove)