diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 768269b..b4b143c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -37,4 +37,4 @@ test_vm: extends: .pintos_tests variables: DIR: vm - IGNORE: (tests/vm/pt-grow-stack|tests/vm/pt-grow-pusha|tests/vm/pt-big-stk-obj|tests/vm/pt-overflowstk|tests/vm/pt-write-code2|tests/vm/pt-grow-stk-sc|tests/vm/page-linear|tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-read|tests/vm/mmap-close|tests/vm/mmap-overlap|tests/vm/mmap-twice|tests/vm/mmap-write|tests/vm/mmap-exit|tests/vm/mmap-shuffle|tests/vm/mmap-clean|tests/vm/mmap-inherit|tests/vm/mmap-misalign|tests/vm/mmap-null|tests/vm/mmap-over-code|tests/vm/mmap-over-data|tests/vm/mmap-over-stk|tests/vm/mmap-remove) + IGNORE: (tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-read|tests/vm/mmap-close|tests/vm/mmap-overlap|tests/vm/mmap-twice|tests/vm/mmap-write|tests/vm/mmap-exit|tests/vm/mmap-shuffle|tests/vm/mmap-clean|tests/vm/mmap-inherit|tests/vm/mmap-misalign|tests/vm/mmap-null|tests/vm/mmap-over-code|tests/vm/mmap-over-data|tests/vm/mmap-over-stk|tests/vm/mmap-remove) diff --git a/src/Makefile.build b/src/Makefile.build index 7778f57..53ac6d3 100644 --- a/src/Makefile.build +++ b/src/Makefile.build @@ -65,6 +65,8 @@ userprog_SRC += userprog/tss.c # TSS management. vm_SRC += vm/frame.c # Frame table manager. vm_SRC += vm/page.c # Page table manager. vm_SRC += devices/swap.c # Swap block manager. +vm_SRC += vm/stackgrowth.c # Stack growth functions. +#vm_SRC = vm/file.c # Some other file. # Filesystem code. filesys_SRC = filesys/filesys.c # Filesystem core. diff --git a/src/threads/thread.c b/src/threads/thread.c index ed37d2f..b891cbc 100644 --- a/src/threads/thread.c +++ b/src/threads/thread.c @@ -265,10 +265,14 @@ thread_create (const char *name, int priority, /* Initialize the thread's file descriptor table. */ t->fd_counter = MINIMUM_USER_FD; - if (!hash_init (&t->open_files, fd_hash, fd_less, NULL) - || !hash_init (&t->child_results, process_result_hash, - process_result_less, t) - || !init_pages (t)) + bool success = hash_init (&t->open_files, fd_hash, fd_less, NULL); + success = success && hash_init (&t->child_results, process_result_hash, + process_result_less, t); +#ifdef VM + success = success && init_pages (t); +#endif + + if (!success) { palloc_free_page (t); free (t->result); diff --git a/src/threads/thread.h b/src/threads/thread.h index bdfad35..eb4e9a6 100644 --- a/src/threads/thread.h +++ b/src/threads/thread.h @@ -135,8 +135,6 @@ struct thread /* Shared between thread.c and synch.c. */ struct list_elem elem; /* List element. */ - struct hash pages; /* Table of open user pages. */ - #ifdef USERPROG /* Owned by userprog/process.c. */ uint32_t *pagedir; /* Page directory. */ @@ -145,6 +143,12 @@ struct thread struct hash open_files; /* Hash Table of FD -> Struct File. */ #endif +#ifdef VM + struct hash pages; /* Table of open user pages. */ +#endif + + void *curr_esp; + /* Owned by thread.c. */ unsigned magic; /* Detects stack overflow. */ }; diff --git a/src/userprog/exception.c b/src/userprog/exception.c index 8be1c71..54f6e20 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -1,19 +1,26 @@ #include "userprog/exception.h" #include #include +#include "stdbool.h" #include "userprog/gdt.h" #include "userprog/pagedir.h" #include "threads/interrupt.h" #include "threads/thread.h" -#include "threads/vaddr.h" +#ifdef VM +#include "vm/stackgrowth.h" +#include "vm/frame.h" #include "vm/page.h" +#include "devices/swap.h" +#include "threads/vaddr.h" +#include "userprog/pagedir.h" +#endif /* Number of page faults processed. */ static long long page_fault_cnt; static void kill (struct intr_frame *); static void page_fault (struct intr_frame *); -bool try_fetch_page (void *upage, bool write); +static bool try_fetch_page (void *upage, bool write); /* Registers handlers for interrupts that can be caused by user programs. @@ -149,16 +156,49 @@ page_fault (struct intr_frame *f) write = (f->error_code & PF_W) != 0; user = (f->error_code & PF_U) != 0; - /* If the fault address is in a user page that is not present, then it might - just need to be lazily loaded. So, we check our SPT to see if the page - is expected to have data loaded in memory. */ +#ifdef VM void *upage = pg_round_down (fault_addr); - if (not_present && is_user_vaddr (upage) && upage != NULL) + if (not_present && is_user_vaddr(upage)) { + struct thread *t = thread_current (); + void *esp = user ? f->esp : t->curr_esp; + + /* Check if the non-present user page is in the swap partition. + If so, swap it back into main memory, updating the PTE for + the faulted virtual address to point to the newly allocated + frame. */ + if (page_in_swap (t, fault_addr)) + { + size_t swap_slot = page_get_swap (t, fault_addr); + void *kpage = frame_alloc (0, upage, t); + swap_in (kpage, swap_slot); + + bool writeable = pagedir_is_writable (t->pagedir, upage); + if (pagedir_set_page (t->pagedir, upage, kpage, writeable)) return; + } + + /* Handle user page faults that need to be resolved by dynamic + stack growth by checking if this is such a fault and responding + accordingly. */ + if (handle_stack_fault (fault_addr, esp)) return; + + /* Handle user page faults that need to be resolved by lazy loading + of executable files by checking if they contain entries in the + SPT hash map and responding accordingly. */ if (try_fetch_page (upage, write)) return; } + /* Allows for page faults within a kernel context to communicate with + user pages for sending error codes. */ + if (!user) + { + f->eip = (void *)f->eax; + f->eax = 0xffffffff; + return; + } +#endif + /* To implement virtual memory, delete the rest of the function body, and replace it with code that brings in the page to which fault_addr refers. */ @@ -170,7 +210,8 @@ page_fault (struct intr_frame *f) kill (f); } -bool +#ifdef VM +static bool try_fetch_page (void *upage, bool write) { /* Check if the page is in the supplemental page table. That is, it is a page @@ -195,3 +236,4 @@ try_fetch_page (void *upage, bool write) return success; } +#endif diff --git a/src/userprog/exception.h b/src/userprog/exception.h index 663db4b..211cc3c 100644 --- a/src/userprog/exception.h +++ b/src/userprog/exception.h @@ -10,7 +10,5 @@ void exception_init (void); void exception_print_stats (void); -bool -try_fetch_page (void *upage, bool write); #endif /* userprog/exception.h */ diff --git a/src/userprog/pagedir.c b/src/userprog/pagedir.c index ef5bbff..886bb25 100644 --- a/src/userprog/pagedir.c +++ b/src/userprog/pagedir.c @@ -7,7 +7,6 @@ #include "threads/palloc.h" static uint32_t *active_pd (void); -static void invalidate_pagedir (uint32_t *); /* Creates a new page directory that has mappings for kernel virtual addresses, but none for user virtual addresses. @@ -53,7 +52,7 @@ pagedir_destroy (uint32_t *pd) on CREATE. If CREATE is true, then a new page table is created and a pointer into it is returned. Otherwise, a null pointer is returned. */ -static uint32_t * +uint32_t * lookup_page (uint32_t *pd, const void *vaddr, bool create) { uint32_t *pt, *pde; @@ -278,7 +277,7 @@ active_pd (void) This function invalidates the TLB if PD is the active page directory. (If PD is not active then its entries are not in the TLB, so there is no need to invalidate anything.) */ -static void +void invalidate_pagedir (uint32_t *pd) { if (active_pd () == pd) diff --git a/src/userprog/pagedir.h b/src/userprog/pagedir.h index 06e45d2..6b8fd26 100644 --- a/src/userprog/pagedir.h +++ b/src/userprog/pagedir.h @@ -6,6 +6,7 @@ uint32_t *pagedir_create (void); void pagedir_destroy (uint32_t *pd); +uint32_t *lookup_page (uint32_t *pd, const void *vaddr, bool create); bool pagedir_set_page (uint32_t *pd, void *upage, void *kpage, bool rw); void *pagedir_get_page (uint32_t *pd, const void *upage); void pagedir_clear_page (uint32_t *pd, void *upage); @@ -16,5 +17,6 @@ void pagedir_set_accessed (uint32_t *pd, const void *upage, bool accessed); bool pagedir_is_writable (uint32_t *pd, const void *upage); void pagedir_set_writable (uint32_t *pd, const void *upage, bool writable); void pagedir_activate (uint32_t *pd); +void invalidate_pagedir (uint32_t *pd); #endif /* userprog/pagedir.h */ diff --git a/src/userprog/process.c b/src/userprog/process.c index 205d0f2..ccd44b0 100644 --- a/src/userprog/process.c +++ b/src/userprog/process.c @@ -366,8 +366,10 @@ process_exit (void) /* Clean up all open files */ hash_destroy (&cur->open_files, fd_cleanup); +#ifdef VM hash_destroy (&cur->pages, page_cleanup); unuse_shared_file (cur->exec_file); +#endif /* Close the executable file, implicitly allowing it to be written to. */ if (cur->exec_file != NULL) @@ -625,6 +627,9 @@ load (const char *file_name, void (**eip) (void), void **esp) done: /* We arrive here whether the load is successful or not. */ +#ifndef VM + file_close (file); +#endif lock_release (&filesys_lock); return success; } @@ -698,6 +703,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage, ASSERT (pg_ofs (upage) == 0); ASSERT (ofs % PGSIZE == 0); +#ifdef VM while (read_bytes > 0 || zero_bytes > 0) { /* Calculate how to fill this page. @@ -718,6 +724,58 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage, upage += PGSIZE; } return true; +#else + file_seek (file, ofs); + while (read_bytes > 0 || zero_bytes > 0) + { + /* Calculate how to fill this page. + We will read PAGE_READ_BYTES bytes from FILE + and zero the final PAGE_ZERO_BYTES bytes. */ + size_t page_read_bytes = read_bytes < PGSIZE ? read_bytes : PGSIZE; + size_t page_zero_bytes = PGSIZE - page_read_bytes; + + /* Check if virtual page already allocated */ + struct thread *t = thread_current (); + uint8_t *kpage = pagedir_get_page (t->pagedir, upage); + + if (kpage == NULL){ + + /* Get a new page of memory. */ + kpage = get_usr_kpage (0, upage); + if (kpage == NULL){ + return false; + } + + /* Add the page to the process's address space. */ + if (!install_page (upage, kpage, writable)) + { + free_usr_kpage (kpage); + return false; + } + + } else { + + /* Check if writable flag for the page should be updated */ + if(writable && !pagedir_is_writable(t->pagedir, upage)){ + pagedir_set_writable(t->pagedir, upage, writable); + } + + } + + /* Load data into the page. */ + if (file_read (file, kpage, page_read_bytes) != (int) page_read_bytes){ + return false; + } + memset (kpage + page_read_bytes, 0, page_zero_bytes); + + /* Advance. */ + read_bytes -= page_read_bytes; + zero_bytes -= page_zero_bytes; + ofs += PGSIZE; + upage += PGSIZE; + } + return true; +#endif } /* Create a minimal stack by mapping a zeroed page at the top of diff --git a/src/userprog/syscall.c b/src/userprog/syscall.c index c6d4259..18da74b 100644 --- a/src/userprog/syscall.c +++ b/src/userprog/syscall.c @@ -12,6 +12,7 @@ #include "userprog/process.h" #include "userprog/pagedir.h" #include +#include #include #define MAX_SYSCALL_ARGS 3 @@ -47,8 +48,11 @@ static unsigned syscall_tell (int fd); static void syscall_close (int fd); static struct open_file *fd_get_file (int fd); -static void validate_user_pointer (const void *start, size_t size, bool write); -static void validate_user_string (const char *str); +static void validate_user_pointer (const void *ptr, size_t size, + bool check_write); +static void validate_user_string (const char *str, bool check_write); +static int get_user (const uint8_t *); +static bool put_user (uint8_t *, uint8_t); /* A struct defining a syscall_function pointer along with its arity. */ struct syscall_arguments @@ -98,7 +102,8 @@ syscall_handler (struct intr_frame *f) { /* First, read the system call number from the stack. */ validate_user_pointer (f->esp, sizeof (uintptr_t), false); - uintptr_t syscall_number = *(int *) f->esp; + uintptr_t syscall_number = *(int *)f->esp; + thread_current ()->curr_esp = f->esp; /* Ensures the number corresponds to a system call that can be handled. */ if (syscall_number >= LOOKUP_SIZE) @@ -109,10 +114,9 @@ syscall_handler (struct intr_frame *f) /* Next, read and copy the arguments from the stack pointer. */ validate_user_pointer (f->esp + sizeof (uintptr_t), syscall.arity * sizeof (uintptr_t), false); - - uintptr_t args[MAX_SYSCALL_ARGS] = {0}; + uintptr_t args[MAX_SYSCALL_ARGS] = { 0 }; for (int i = 0; i < syscall.arity && i < MAX_SYSCALL_ARGS; i++) - args[i] = *(uintptr_t *) (f->esp + sizeof (uintptr_t) * (i + 1)); + args[i] = *(uintptr_t *)(f->esp + sizeof (uintptr_t) * (i + 1)); /* Call the function that handles this system call with the arguments. When there is a return value it is stored in f->eax. */ @@ -141,8 +145,7 @@ syscall_exit (int status) static pid_t syscall_exec (const char *cmd_line) { - /* Validate the user string before executing the process. */ - validate_user_string (cmd_line); + validate_user_string (cmd_line, false); return process_execute (cmd_line); /* Returns the PID of the new process */ } @@ -161,8 +164,7 @@ syscall_wait (pid_t pid) static bool syscall_create (const char *file, unsigned initial_size) { - /* Validate the user string before creating the file. */ - validate_user_string (file); + validate_user_string (file, false); /* Acquire the file system lock to prevent race conditions. */ lock_acquire (&filesys_lock); @@ -179,8 +181,7 @@ syscall_create (const char *file, unsigned initial_size) static bool syscall_remove (const char *file) { - /* Validate the user string before removing the file. */ - validate_user_string (file); + validate_user_string (file, false); /* Acquire the file system lock to prevent race conditions. */ lock_acquire (&filesys_lock); @@ -198,8 +199,7 @@ syscall_remove (const char *file) static int syscall_open (const char *file) { - /* Validate the user string before opening the file. */ - validate_user_string (file); + validate_user_string (file, false); /* Acquire the file system lock to prevent race conditions. */ lock_acquire (&filesys_lock); @@ -265,7 +265,6 @@ syscall_read (int fd, void *buffer, unsigned size) if (fd < STDIN_FILENO || fd == STDOUT_FILENO) return EXIT_FAILURE; - /* Validate the user buffer for the provided size before reading. */ validate_user_pointer (buffer, size, true); if (fd == STDIN_FILENO) @@ -309,7 +308,6 @@ syscall_write (int fd, const void *buffer, unsigned size) if (fd <= 0) return 0; - /* Validate the user buffer for the provided size before writing. */ validate_user_pointer (buffer, size, false); if (fd == STDOUT_FILENO) @@ -452,67 +450,91 @@ fd_get_file (int fd) return hash_entry (e, struct open_file, elem); } -/* Validates if a block of memory starting at START and of size SIZE bytes is - fully contained within user virtual memory. Kills the thread (by exiting with - failure) if the memory is invalid. Otherwise, returns (nothing) normally. - If the size is 0, the function does no checks and returns the given ptr. */ +/* Validates if a block of memory starting at PTR and of size SIZE bytes is + fully contained within valid user virtual memory. thread_exit () if the + memory is invalid. + If the size is 0, the function does no checks and returns PTR. */ static void -validate_user_pointer (const void *start, size_t size, bool write) +validate_user_pointer (const void *ptr, size_t size, bool check_write) { - /* If the size is 0, we do not need to check anything. */ if (size == 0) return; - - const void *end = start + size - 1; - - /* Check if the start and end pointers are valid user virtual addresses. */ - if (start == NULL || !is_user_vaddr (start) || !is_user_vaddr (end)) + /* ptr < ptr + size - 1, so sufficient to check that (ptr + size -1) is a + valid user virtual memory address. */ + void *last = ptr + size - 1; + if (!is_user_vaddr (last)) syscall_exit (EXIT_FAILURE); - - /* We no longer check if the memory is mapped to physical memory. This is - because the data may not necessarily be there at the time of the syscall, - but it may be lazily loaded later. In such case, we try to preload the - page. If that fails, we exit the thread. */ - for (void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE) - if (pagedir_get_page (thread_current ()->pagedir, ptr) == NULL && - !try_fetch_page (ptr, write)) - syscall_exit (EXIT_FAILURE); + ptr = pg_round_down (ptr); + while (ptr <= last) + { + int result; + /* Check read access to pointer. */ + if ((result = get_user (ptr)) == -1) + syscall_exit (EXIT_FAILURE); + /* Check write access to pointer (if required). */ + if (check_write && !put_user (ptr, result)) + syscall_exit (EXIT_FAILURE); + ptr += PGSIZE; + } } -/* Validates if a string is fully contained within user virtual memory. Kills - the thread (by exiting with failure) if the memory is invalid. Otherwise, - returns (nothing) normally. */ +/* Validates of a C-string starting at ptr is fully contained within valid + user virtual memory. thread_exit () if the memory is invalid. */ static void -validate_user_string (const char *str) +validate_user_string (const char *ptr, bool check_write) { - /* Check if the string pointer is a valid user virtual address. */ - if (str == NULL || !is_user_vaddr (str)) - syscall_exit (EXIT_FAILURE); + size_t offset = (uintptr_t) ptr % PGSIZE; - /* Calculate the offset of the string within the (first) page. */ - size_t offset = (uintptr_t) str % PGSIZE; - - /* We move page by page, checking if the page is mapped to physical memory. */ for (;;) - { - void *page = pg_round_down (str); + { + void *page = pg_round_down (ptr); - /* If we reach addresses that are not mapped to physical memory before the - end of the string, the thread is terminated. */ - if (!is_user_vaddr(page) || - (pagedir_get_page (thread_current ()->pagedir, page) == NULL && - !try_fetch_page (page, false))) - syscall_exit (EXIT_FAILURE); + if (!is_user_vaddr (page)) + syscall_exit (EXIT_FAILURE); + if (!is_user_vaddr (ptr)) + syscall_exit (EXIT_FAILURE); + int result; + if ((result = get_user ((const uint8_t *)ptr)) == -1) + syscall_exit (EXIT_FAILURE); + if (check_write && !put_user ((uint8_t *)ptr, result)) + syscall_exit (EXIT_FAILURE); - while (offset < PGSIZE) + while (offset < PGSIZE) { - if (*str == '\0') + if (*ptr == '\0') return; /* We reached the end of the string without issues. */ - str++; + ptr++; offset++; } - offset = 0; /* Next page will start at the beginning. */ - } + offset = 0; + + } +} + +/* PROVIDED BY SPEC. + Reads a byte at user virtual address UADDR. + UADDR must be below PHYS_BASE. + Returns the byte value if successful, -1 if a segfault occurred. */ +static int +get_user (const uint8_t *uaddr) +{ + int result; + asm ("movl $1f, %0; movzbl %1, %0; 1:" : "=&a"(result) : "m"(*uaddr)); + return result; +} + +/* PROVIDED BY SPEC. + Writes BYTE to user address UDST. + UDST must be below PHYS_BASE. + Returns true if successful, false if a segfault occurred. */ +static bool +put_user (uint8_t *udst, uint8_t byte) +{ + int error_code; + asm ("movl $1f, %0; movb %b2, %1; 1:" + : "=&a"(error_code), "=m"(*udst) + : "q"(byte)); + return error_code != -1; } diff --git a/src/vm/frame.c b/src/vm/frame.c index d8e5da3..98339f8 100644 --- a/src/vm/frame.c +++ b/src/vm/frame.c @@ -7,6 +7,7 @@ #include "page.h" #include "threads/malloc.h" #include "threads/vaddr.h" +#include "userprog/pagedir.h" #include "threads/synch.h" #include "devices/swap.h" @@ -14,20 +15,22 @@ to its corresponding 'frame_metadata'.*/ struct hash frame_table; -/* Linked list of frame_metadata whose pages are predicted to currently - be in the working set of a process. They are not considered for - eviction, but are considered for demotion to the 'inactive' list. */ -struct list active_list; +/* Linked list used to represent the circular queue in the 'clock' + algorithm for page eviction. Iterating from the element that is + currently pointed at by 'next_victim' yields an ordering of the entries + from oldest to newest (in terms of when they were added or checked + for having been referenced by a process). */ +struct list lru_list; -/* Linked list of frame_metadata whose pages are predicted to leave the - working set of their processes soon, so are considered for eviction. - Pages are considered for eviction from the tail end, and are initially - demoted to 'inactive' at the head. */ -struct list inactive_list; +/* The next element in lru_list to be considered for eviction (oldest added + or referenced page in the circular queue). If this page has has an + 'accessed' bit of 0 when considering eviction, then it will be the next + victim. Otherwise, the next element in the queue is similarly considered. */ +struct list_elem *next_victim = NULL; /* Synchronisation variables. */ -/* Protects access to the 'inactive' list. */ -struct lock inactive_lock; +/* Protects access to 'lru_list'. */ +struct lock lru_lock; struct frame_metadata { @@ -45,22 +48,24 @@ struct frame_metadata hash_hash_func frame_metadata_hash; hash_less_func frame_metadata_less; +static struct list_elem *lru_next (struct list_elem *e); +static struct list_elem *lru_prev (struct list_elem *e); static struct frame_metadata *get_victim (void); /* Initialize the frame system by initializing the frame (hash) table with the frame_metadata hashing and comparison functions, as well as initializing - the active & inactive lists. Also initializes the system's synchronisation - primitives. */ + 'lru_list' and its associated synchronisation primitives. */ void frame_init (void) { hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL); - list_init (&active_list); - list_init (&inactive_list); - lock_init (&inactive_lock); + list_init (&lru_list); + lock_init (&lru_lock); } +/* TODO: Consider synchronisation more closely (i.e. just for hash + table). */ /* Attempt to allocate a frame for a user process, either by direct allocation of a user page if there is sufficient RAM, or by evicting a currently active page if memory allocated for user @@ -69,7 +74,10 @@ frame_init (void) void * frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) { + struct frame_metadata *frame_metadata; flags |= PAL_USER; + + lock_acquire (&lru_lock); void *frame = palloc_get_page (flags); /* If a frame couldn't be allocated we must be out of main memory. Thus, @@ -77,11 +85,20 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) into disk. */ if (frame == NULL) { - /* TODO: Deal with race condition wherein a page may be evicted in one - thread while it's in the middle of being evicted in another. */ + /* 1. Obtain victim. */ + if (next_victim == NULL) + PANIC ("Couldn't allocate a single page to main memory!\n"); + struct frame_metadata *victim = get_victim (); - if (victim == NULL) - return NULL; + ASSERT (victim != NULL); /* get_victim () should never return null. */ + + /* 2. Swap out victim into disk. */ + /* Mark page as 'not present' and flag the page directory as having + been modified *before* eviction begins to prevent the owner of the + victim page from accessing/modifying it mid-eviction. */ + pagedir_clear_page (victim->owner->pagedir, victim->upage); + + // TODO: Lock PTE of victim page for victim process. size_t swap_slot = swap_out (victim->frame); page_set_swap (victim->owner, victim->upage, swap_slot); @@ -90,30 +107,50 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) if (flags & PAL_ZERO) memset (victim->frame, 0, PGSIZE); - frame = victim->frame; + /* 3. Indicate that the new frame's metadata will be stored + inside the same structure that stored the victim's metadata. + As both the new frame and the victim frame share the same kernel + virtual address, the hash map need not be updated, and neither + the list_elem value as both share the same lru_list position. */ + frame_metadata = victim; + } + + /* If sufficient main memory allows the frame to be directly allocated, + we must update the frame table with a new entry, and grow lru_list. */ + else + { + /* Must own lru_lock here, as otherwise there is a race condition + with next_victim either being NULL or uninitialized. */ + frame_metadata = malloc (sizeof (struct frame_metadata)); + frame_metadata->frame = frame; + + /* Newly allocated frames are pushed to the back of the circular queue + represented by lru_list. Must explicitly handle the case where the + circular queue is empty (when next_victim == NULL). */ + if (next_victim == NULL) + { + list_push_back (&lru_list, &frame_metadata->list_elem); + next_victim = &frame_metadata->list_elem; + } + else + { + struct list_elem *lru_tail = lru_prev (next_victim); + list_insert (lru_tail, &frame_metadata->list_elem); + } + + hash_insert (&frame_table, &frame_metadata->hash_elem); } - struct frame_metadata *frame_metadata = - malloc (sizeof (struct frame_metadata)); - frame_metadata->frame = frame; frame_metadata->upage = upage; frame_metadata->owner = owner; + lock_release (&lru_lock); - /* Newly faulted pages begin at the head of the inactive list. */ - lock_acquire (&inactive_lock); - list_push_front (&inactive_list, &frame_metadata->list_elem); - lock_release (&inactive_lock); - - /* Finally, insert frame metadata within the frame table, with the key as its - allocated kernel address. */ - hash_replace (&frame_table, &frame_metadata->hash_elem); - - return frame; + return frame_metadata->frame; } /* Attempt to deallocate a frame for a user process by removing it from the - frame table as well as active/inactive list, and freeing the underlying - page memory. Panics if the frame isn't active in memory. */ + frame table as well as lru_list, and freeing the underlying page + memory & metadata struct. Panics if the frame isn't active in memory. */ void frame_free (void *frame) { @@ -122,33 +159,56 @@ frame_free (void *frame) struct hash_elem *e = hash_delete (&frame_table, &key_metadata.hash_elem); - if (e == NULL) PANIC ("Attempted to free a frame without a corresponding " - "kernel address!\n"); + if (e == NULL) PANIC ("Attempted to free a frame at kernel address %p, " + "but this address is not allocated!\n", frame); struct frame_metadata *frame_metadata = hash_entry (e, struct frame_metadata, hash_elem); + lock_acquire (&lru_lock); list_remove (&frame_metadata->list_elem); + + /* If we're freeing the frame marked as the next victim, update + next_victim to either be the next least recently used page, or NULL + if no pages are loaded in main memory. */ + if (&frame_metadata->list_elem == next_victim) + { + if (list_empty (&lru_list)) + next_victim = NULL; + else + next_victim = lru_next (next_victim); + } + lock_release (&lru_lock); + free (frame_metadata); palloc_free_page (frame); } -/* Obtain a pointer to the metadata of the frame we should evict next. */ +/* TODO: Account for page aliases when checking accessed bit. */ +/* A pre-condition for calling this function is that the calling thread + owns lru_lock and that lru_list is non-empty. */ static struct frame_metadata * get_victim (void) { - lock_acquire (&inactive_lock); - if (list_empty (&inactive_list)) + struct list_elem *e = next_victim; + struct frame_metadata *frame_metadata; + uint32_t *pd; + void *upage; + for (;;) { - return NULL; - } - else - { - struct list_elem *victim_elem = list_pop_back (&inactive_list); - lock_release (&inactive_lock); - - return list_entry (victim_elem, struct frame_metadata, list_elem); + frame_metadata = list_entry (e, struct frame_metadata, list_elem); + pd = frame_metadata->owner->pagedir; + upage = frame_metadata->upage; + e = lru_next (e); + + if (!pagedir_is_accessed (pd, upage)) + break; + + pagedir_set_accessed (pd, upage, false); } + + next_victim = e; + return frame_metadata; } /* Hash function for frame metadata, used for storing entries in the @@ -177,3 +237,26 @@ frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_, return a->frame < b->frame; } +/* Returns the next recently used element after the one provided, which + is achieved by iterating through lru_list like a circular queue + (wrapping around the list at the tail). */ +static struct list_elem * +lru_next (struct list_elem *e) +{ + if (!list_empty (&lru_list) && e == list_back (&lru_list)) + return list_front (&lru_list); + + return list_next (e); +} + +/* Returns the previous recently used element after the one provided, which + is achieved by iterating through lru_list like a circular queue + (wrapping around the list at the head). */ +static struct list_elem * +lru_prev (struct list_elem *e) +{ + if (!list_empty (&lru_list) && e == list_front (&lru_list)) + return list_back (&lru_list); + + return list_prev (e); +} diff --git a/src/vm/page.c b/src/vm/page.c index ecfcea5..39a212f 100644 --- a/src/vm/page.c +++ b/src/vm/page.c @@ -4,11 +4,15 @@ #include "filesys/filesys.h" #include "threads/malloc.h" #include "threads/palloc.h" +#include "threads/pte.h" #include "threads/vaddr.h" #include "userprog/pagedir.h" #include "userprog/process.h" #include "vm/frame.h" +#define SWAP_FLAG_BIT 9 +#define ADDR_START_BIT 12 + static unsigned page_hash (const struct hash_elem *e, void *aux UNUSED); static bool page_less (const struct hash_elem *a_, const struct hash_elem *b_, void *aux UNUSED); @@ -127,7 +131,7 @@ page_load (struct page_entry *page) /* Allocate a frame for the page. If a frame allocation fails, then frame_alloc should try to evict a page. If it is still NULL, the OS panics as this should not happen if eviction is working correctly. */ - void *frame = frame_alloc (PAL_USER, page->upage, thread_current ()); + void *frame = frame_alloc (PAL_USER, page->upage, thread_current ()); // TODO : PAL_USER or 0??? if (frame == NULL) PANIC ("Could not allocate a frame to load page into memory."); @@ -369,12 +373,30 @@ shared_page_insert (struct file *file, void *upage, void *frame) } /* Updates the 'owner' thread's page table entry for virtual address 'upage' - to have a present bit of 0 and stores the specified swap slot value in the - entry for later retrieval from disk. */ + to flag the page as being stored in swap, and stores the specified swap slot + value in the entry at the address bits for later retrieval from disk. */ void page_set_swap (struct thread *owner, void *upage, size_t swap_slot) { + uint32_t *pte = lookup_page (owner->pagedir, upage, false); + /* Store the provided swap slot in the address bits of the page table + entry, truncating excess bits. */ + *pte |= (1 << SWAP_FLAG_BIT); + uint32_t swap_slot_bits = (swap_slot << ADDR_START_BIT) & PTE_ADDR; + *pte = (*pte & PTE_FLAGS) | swap_slot_bits; + + invalidate_pagedir (owner->pagedir); +} + +/* Returns true iff the page with user address 'upage' owned by 'owner' + is flagged to be in the swap disk via the owner's page table. */ +bool +page_in_swap (struct thread *owner, void *upage) +{ + uint32_t *pte = lookup_page (owner->pagedir, upage, false); + return pte != NULL && + (*pte & (1 << SWAP_FLAG_BIT)) != 0; } /* Given that the page with user address 'upage' owned by 'owner' is flagged @@ -383,5 +405,11 @@ page_set_swap (struct thread *owner, void *upage, size_t swap_slot) size_t page_get_swap (struct thread *owner, void *upage) { - return 0; + uint32_t *pte = lookup_page (owner->pagedir, upage, false); + + ASSERT (pte != NULL); + ASSERT ((*pte & PTE_P) == 0); + + /* Masks the address bits and returns truncated value. */ + return ((*pte & PTE_ADDR) >> ADDR_START_BIT); } diff --git a/src/vm/page.h b/src/vm/page.h index b23703d..6dc5b84 100644 --- a/src/vm/page.h +++ b/src/vm/page.h @@ -50,12 +50,12 @@ struct page_entry *page_insert (struct file *file, off_t ofs, void *upage, struct page_entry *page_get (void *upage); bool page_load (struct page_entry *page); void page_cleanup (struct hash_elem *e, void *aux UNUSED); +void page_set_swap (struct thread *, void *, size_t); +bool page_in_swap (struct thread *, void *); +size_t page_get_swap (struct thread *, void *); void shared_files_init (); bool use_shared_file (struct file *file); bool unuse_shared_file (struct file *file); -void page_set_swap (struct thread *, void *, size_t); -size_t page_get_swap (struct thread *, void *); - #endif /* vm/frame.h */ diff --git a/src/vm/stackgrowth.c b/src/vm/stackgrowth.c new file mode 100644 index 0000000..cf44ed5 --- /dev/null +++ b/src/vm/stackgrowth.c @@ -0,0 +1,59 @@ +#include +#include "stackgrowth.h" +#include "frame.h" +#include "threads/palloc.h" +#include "threads/thread.h" +#include "threads/vaddr.h" +#include "userprog/pagedir.h" + +#define MAX_STACK_ACCESS_DIST 32 + +static bool is_stack_fault (const void *addr, const void *esp); +static bool grow_stack (const void *addr); + +/* Determine whether a particular page fault occured due to a stack + access below the stack pointer that should induce stack growth, and + if so grow the stack by a single page (capped at MAX_STACK_SIZE). */ +bool +handle_stack_fault (const void *ptr, const void *esp) +{ + return is_stack_fault (ptr, esp) && grow_stack (ptr); +} + +/* Determines whether a particular page fault appears to be caused by + a stack access that should induce dynamic stack growth. Stack size + is capped at MAX_STACK_SIZE. */ +static bool +is_stack_fault (const void *addr, const void *esp) +{ + return ((uint32_t*)addr >= ((uint32_t*)esp - MAX_STACK_ACCESS_DIST) && + ((PHYS_BASE - pg_round_down (addr)) <= MAX_STACK_SIZE)); +} + +/* Grows the stack of the process running inside the current thread by a single + page given a user virtual address inside of the page wherein the new section + of the stack should be allocated. */ +static bool +grow_stack (const void *addr) +{ + struct thread *t = thread_current (); + void *last_page = pg_round_down (addr); + + /* This function should only be called when dealing with a faulting stack + access that induces stack growth, so the provided address shouldn't be + present in a page within the current thread's page directory. */ + ASSERT (pagedir_get_page (t->pagedir, last_page) == NULL); + + uint8_t *new_page = frame_alloc (PAL_ZERO, last_page, t); + if (new_page == NULL) + return false; + + if (!pagedir_set_page (t->pagedir, last_page, new_page, true)) + { + frame_free (new_page); + return false; + } + + return true; +} + diff --git a/src/vm/stackgrowth.h b/src/vm/stackgrowth.h new file mode 100644 index 0000000..a19366e --- /dev/null +++ b/src/vm/stackgrowth.h @@ -0,0 +1,10 @@ +#ifndef VM_GROWSTACK_H +#define VM_GROWSTACK_H + +#include + +#define MAX_STACK_SIZE 8388608 // (8MB) + +bool handle_stack_fault (const void *ptr, const void *esp); + +#endif /* vm/frame.h */