Compare commits
54 Commits
vm/memory-
...
vm/mmap-wr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a34bbbed08 | ||
|
|
2811ea0eb3 | ||
|
|
7860f3863f | ||
|
|
d03e253046 | ||
|
|
e779e8ac7c | ||
|
|
16db01d3d8 | ||
|
|
c12cd95093 | ||
|
|
f13fd435cd | ||
|
|
ac31fb1e1e | ||
|
|
1a8eb1bbe5 | ||
|
|
52ec8fe779 | ||
|
|
f171a05108 | ||
|
|
f06c91cf0d | ||
|
|
5265fed288 | ||
|
|
19d5b02341 | ||
|
|
0288e13206 | ||
|
|
60faf995ea | ||
|
|
723055f485 | ||
|
|
1e236a5c47 | ||
|
|
4bf6914cfa | ||
|
|
fb73d694bf | ||
|
|
1b73e415d7 | ||
|
|
47a7dfae04 | ||
|
|
9a3c8a1c38 | ||
|
|
08eafcf7ef | ||
|
|
df7d847978 | ||
|
|
fbcd3c9f19 | ||
|
|
6190d1bee6 | ||
|
|
6adf2e743b | ||
|
|
05a48cf9c6 | ||
|
|
bb16abdc0d | ||
|
|
8e278b349a | ||
|
|
9d35beb2e4 | ||
|
|
7ce512305e | ||
|
|
775b73a3e9 | ||
|
|
d8edc6d3fe | ||
|
|
5682974f9d | ||
|
|
6f85d7642d | ||
|
|
94adc11f03 | ||
|
|
40c553d68b | ||
|
|
13de832586 | ||
|
|
5c661c2e24 | ||
|
|
5f40d83e66 | ||
|
|
149bb42889 | ||
|
|
4f84a83611 | ||
|
|
c74a8c55aa | ||
|
|
c670c29e47 | ||
|
|
af7f2ba873 | ||
|
|
3ef5264b6e | ||
|
59e7a64f8e
|
|||
|
cf4bf90cbb
|
|||
|
9a6abab95e
|
|||
|
44f6a85163
|
|||
|
83e044cf68
|
@@ -66,6 +66,7 @@ vm_SRC += vm/frame.c # Frame table manager.
|
|||||||
vm_SRC += vm/page.c # Page table manager.
|
vm_SRC += vm/page.c # Page table manager.
|
||||||
vm_SRC += vm/mmap.c # Memory-mapped files.
|
vm_SRC += vm/mmap.c # Memory-mapped files.
|
||||||
vm_SRC += devices/swap.c # Swap block manager.
|
vm_SRC += devices/swap.c # Swap block manager.
|
||||||
|
#vm_SRC = vm/file.c # Some other file.
|
||||||
|
|
||||||
# Filesystem code.
|
# Filesystem code.
|
||||||
filesys_SRC = filesys/filesys.c # Filesystem core.
|
filesys_SRC = filesys/filesys.c # Filesystem core.
|
||||||
|
|||||||
@@ -149,6 +149,8 @@ struct thread
|
|||||||
struct hash open_files; /* Hash Table of FD -> Struct File. */
|
struct hash open_files; /* Hash Table of FD -> Struct File. */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void *curr_esp;
|
||||||
|
|
||||||
/* Owned by thread.c. */
|
/* Owned by thread.c. */
|
||||||
unsigned magic; /* Detects stack overflow. */
|
unsigned magic; /* Detects stack overflow. */
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,19 +1,31 @@
|
|||||||
#include "userprog/exception.h"
|
#include "userprog/exception.h"
|
||||||
#include <inttypes.h>
|
#include <inttypes.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
#include "stdbool.h"
|
||||||
|
#include "threads/synch.h"
|
||||||
#include "userprog/gdt.h"
|
#include "userprog/gdt.h"
|
||||||
#include "userprog/pagedir.h"
|
#include "userprog/pagedir.h"
|
||||||
|
#include "userprog/process.h"
|
||||||
#include "threads/interrupt.h"
|
#include "threads/interrupt.h"
|
||||||
|
#include "threads/palloc.h"
|
||||||
#include "threads/thread.h"
|
#include "threads/thread.h"
|
||||||
#include "threads/vaddr.h"
|
#include "threads/vaddr.h"
|
||||||
|
#include "vm/frame.h"
|
||||||
#include "vm/page.h"
|
#include "vm/page.h"
|
||||||
|
#include "devices/swap.h"
|
||||||
|
#include "userprog/pagedir.h"
|
||||||
|
|
||||||
|
#define MAX_STACK_SIZE (8 * 1024 * 1024) // 8MB
|
||||||
|
#define MAX_STACK_OFFSET 32 // 32 bytes offset below stack pointer (ESP)
|
||||||
/* Number of page faults processed. */
|
/* Number of page faults processed. */
|
||||||
static long long page_fault_cnt;
|
static long long page_fault_cnt;
|
||||||
|
|
||||||
static void kill (struct intr_frame *);
|
static void kill (struct intr_frame *);
|
||||||
static void page_fault (struct intr_frame *);
|
static void page_fault (struct intr_frame *);
|
||||||
bool try_fetch_page (void *upage, bool write);
|
|
||||||
|
static bool is_valid_stack_access (const void *fault_addr, const void *esp);
|
||||||
|
static bool grow_stack (void *upage);
|
||||||
|
bool fetch_page (void *upage, bool write);
|
||||||
|
|
||||||
/* Registers handlers for interrupts that can be caused by user
|
/* Registers handlers for interrupts that can be caused by user
|
||||||
programs.
|
programs.
|
||||||
@@ -149,6 +161,26 @@ page_fault (struct intr_frame *f)
|
|||||||
write = (f->error_code & PF_W) != 0;
|
write = (f->error_code & PF_W) != 0;
|
||||||
user = (f->error_code & PF_U) != 0;
|
user = (f->error_code & PF_U) != 0;
|
||||||
|
|
||||||
|
/* Select the appropriate stack pointer based on the context of the fault. */
|
||||||
|
void *esp = user ? f->esp : thread_current()->curr_esp;
|
||||||
|
|
||||||
|
/* If the fault address is in a user page that is not present, then it might
|
||||||
|
be just that the stack needs to grow or that it needs to be lazily loaded.
|
||||||
|
So we attempt to grow the stack. If this does not work, we check our SPT to
|
||||||
|
see if the page is expected to have data loaded in memory. */
|
||||||
|
void *upage = pg_round_down (fault_addr);
|
||||||
|
if (not_present && is_user_vaddr (upage) && upage != NULL)
|
||||||
|
{
|
||||||
|
if (fetch_page (upage, write))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (is_valid_stack_access (fault_addr, esp))
|
||||||
|
if (grow_stack (upage))
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If the page fault occurred in kernel mode, then we intentionally indicate
|
||||||
|
a fault (for get_user() etc). */
|
||||||
if (!user)
|
if (!user)
|
||||||
{
|
{
|
||||||
f->eip = (void *)f->eax;
|
f->eip = (void *)f->eax;
|
||||||
@@ -156,17 +188,6 @@ page_fault (struct intr_frame *f)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* If the fault address is in a user page that is not present, then it might
|
|
||||||
just need to be lazily loaded. So, we check our SPT to see if the page
|
|
||||||
is expected to have data loaded in memory. */
|
|
||||||
void *upage = pg_round_down (fault_addr);
|
|
||||||
if (not_present && is_user_vaddr (upage) && upage != NULL)
|
|
||||||
{
|
|
||||||
if (try_fetch_page (upage, write))
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* To implement virtual memory, delete the rest of the function
|
/* To implement virtual memory, delete the rest of the function
|
||||||
body, and replace it with code that brings in the page to
|
body, and replace it with code that brings in the page to
|
||||||
which fault_addr refers. */
|
which fault_addr refers. */
|
||||||
@@ -178,8 +199,56 @@ page_fault (struct intr_frame *f)
|
|||||||
kill (f);
|
kill (f);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Validates whether the fault address is a valid stack access. Access is a
|
||||||
|
valid stack access under the following two conditions:
|
||||||
|
1. The fault address must be within MAX_STACK_OFFSET (32) bytes below
|
||||||
|
the current stack pointer. (Accounts for both PUSH and PUSHA instructions)
|
||||||
|
2. Growing this stack to this address does not cause it to exceed the
|
||||||
|
MAX_STACK_SIZE (8MB) limit.
|
||||||
|
|
||||||
|
Returns true if both conditions are met, false otherwise.
|
||||||
|
|
||||||
|
Pre: fault_addr is a valid user virtual address (so also not NULL). */
|
||||||
|
static bool
|
||||||
|
is_valid_stack_access (const void *fault_addr, const void *esp)
|
||||||
|
{
|
||||||
|
uint32_t new_stack_size = PHYS_BASE - pg_round_down (fault_addr);
|
||||||
|
|
||||||
|
uint32_t *lowest_valid_push_addr = (uint32_t *)esp - MAX_STACK_OFFSET;
|
||||||
|
bool is_within_push_range = (uint32_t *)fault_addr >= lowest_valid_push_addr;
|
||||||
|
|
||||||
|
return is_within_push_range && new_stack_size <= MAX_STACK_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Attempts to grow the stack by allocating and mapping a new page.
|
||||||
|
This involves:
|
||||||
|
1. Allocating a zeroed page from the user pool
|
||||||
|
2. Installing it into the page table with write permissions
|
||||||
|
|
||||||
|
Returns true if the stack was successfully grown, false if either
|
||||||
|
allocation or installation fails.
|
||||||
|
|
||||||
|
Pre: upage is a valid page-aligned address (so also not NULL). */
|
||||||
|
static bool
|
||||||
|
grow_stack (void *upage)
|
||||||
|
{
|
||||||
|
/* Allocate new page for stack */
|
||||||
|
void *new_page = frame_alloc (PAL_ZERO, upage, thread_current ());
|
||||||
|
if (new_page == NULL)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* Install the page into user page table */
|
||||||
|
if (!pagedir_set_page (thread_current ()->pagedir, upage, new_page, true))
|
||||||
|
{
|
||||||
|
frame_free (new_page);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
try_fetch_page (void *upage, bool write)
|
fetch_page (void *upage, bool write)
|
||||||
{
|
{
|
||||||
/* Check if the page is in the supplemental page table. That is, it is a page
|
/* Check if the page is in the supplemental page table. That is, it is a page
|
||||||
that is expected to be in memory. */
|
that is expected to be in memory. */
|
||||||
@@ -187,6 +256,31 @@ try_fetch_page (void *upage, bool write)
|
|||||||
if (page == NULL)
|
if (page == NULL)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/* Check if the non-present user page is in the swap partition.
|
||||||
|
If so, swap it back into main memory, updating the PTE for
|
||||||
|
the faulted virtual address to point to the newly allocated
|
||||||
|
frame. */
|
||||||
|
struct thread *t = thread_current ();
|
||||||
|
if (page_in_swap (t, upage))
|
||||||
|
{
|
||||||
|
/* NOTE: This code should be refactored and moved into helper functions
|
||||||
|
within 'page.c'.*/
|
||||||
|
void *kpage = frame_alloc (0, upage, t);
|
||||||
|
lock_acquire (&page->lock);
|
||||||
|
|
||||||
|
size_t swap_slot = page_get_swap (t, upage);
|
||||||
|
swap_in (kpage, swap_slot);
|
||||||
|
|
||||||
|
lock_release (&page->lock);
|
||||||
|
|
||||||
|
bool writeable = pagedir_is_writable (t->pagedir, upage);
|
||||||
|
|
||||||
|
/* TODO: When this returns false we should quit the page fault,
|
||||||
|
but currently we continue and check the stack conditions in the
|
||||||
|
page fault handler. */
|
||||||
|
return pagedir_set_page (t->pagedir, upage, kpage, writeable);
|
||||||
|
}
|
||||||
|
|
||||||
/* An attempt to write to a non-writeable should fail. */
|
/* An attempt to write to a non-writeable should fail. */
|
||||||
if (write && !page->writable)
|
if (write && !page->writable)
|
||||||
return false;
|
return false;
|
||||||
@@ -194,8 +288,9 @@ try_fetch_page (void *upage, bool write)
|
|||||||
/* Load the page into memory based on the type of data it is expecting. */
|
/* Load the page into memory based on the type of data it is expecting. */
|
||||||
bool success = false;
|
bool success = false;
|
||||||
switch (page->type) {
|
switch (page->type) {
|
||||||
|
case PAGE_MMAP:
|
||||||
case PAGE_FILE:
|
case PAGE_FILE:
|
||||||
success = page_load (page, page->writable);
|
success = page_load_file (page, page->writable);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
|
|||||||
@@ -7,7 +7,6 @@
|
|||||||
#include "threads/palloc.h"
|
#include "threads/palloc.h"
|
||||||
|
|
||||||
static uint32_t *active_pd (void);
|
static uint32_t *active_pd (void);
|
||||||
static void invalidate_pagedir (uint32_t *);
|
|
||||||
|
|
||||||
/* Creates a new page directory that has mappings for kernel
|
/* Creates a new page directory that has mappings for kernel
|
||||||
virtual addresses, but none for user virtual addresses.
|
virtual addresses, but none for user virtual addresses.
|
||||||
@@ -53,7 +52,7 @@ pagedir_destroy (uint32_t *pd)
|
|||||||
on CREATE. If CREATE is true, then a new page table is
|
on CREATE. If CREATE is true, then a new page table is
|
||||||
created and a pointer into it is returned. Otherwise, a null
|
created and a pointer into it is returned. Otherwise, a null
|
||||||
pointer is returned. */
|
pointer is returned. */
|
||||||
static uint32_t *
|
uint32_t *
|
||||||
lookup_page (uint32_t *pd, const void *vaddr, bool create)
|
lookup_page (uint32_t *pd, const void *vaddr, bool create)
|
||||||
{
|
{
|
||||||
uint32_t *pt, *pde;
|
uint32_t *pt, *pde;
|
||||||
@@ -278,7 +277,7 @@ active_pd (void)
|
|||||||
This function invalidates the TLB if PD is the active page
|
This function invalidates the TLB if PD is the active page
|
||||||
directory. (If PD is not active then its entries are not in
|
directory. (If PD is not active then its entries are not in
|
||||||
the TLB, so there is no need to invalidate anything.) */
|
the TLB, so there is no need to invalidate anything.) */
|
||||||
static void
|
void
|
||||||
invalidate_pagedir (uint32_t *pd)
|
invalidate_pagedir (uint32_t *pd)
|
||||||
{
|
{
|
||||||
if (active_pd () == pd)
|
if (active_pd () == pd)
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
uint32_t *pagedir_create (void);
|
uint32_t *pagedir_create (void);
|
||||||
void pagedir_destroy (uint32_t *pd);
|
void pagedir_destroy (uint32_t *pd);
|
||||||
|
uint32_t *lookup_page (uint32_t *pd, const void *vaddr, bool create);
|
||||||
bool pagedir_set_page (uint32_t *pd, void *upage, void *kpage, bool rw);
|
bool pagedir_set_page (uint32_t *pd, void *upage, void *kpage, bool rw);
|
||||||
void *pagedir_get_page (uint32_t *pd, const void *upage);
|
void *pagedir_get_page (uint32_t *pd, const void *upage);
|
||||||
void pagedir_clear_page (uint32_t *pd, void *upage);
|
void pagedir_clear_page (uint32_t *pd, void *upage);
|
||||||
@@ -16,5 +17,6 @@ void pagedir_set_accessed (uint32_t *pd, const void *upage, bool accessed);
|
|||||||
bool pagedir_is_writable (uint32_t *pd, const void *upage);
|
bool pagedir_is_writable (uint32_t *pd, const void *upage);
|
||||||
void pagedir_set_writable (uint32_t *pd, const void *upage, bool writable);
|
void pagedir_set_writable (uint32_t *pd, const void *upage, bool writable);
|
||||||
void pagedir_activate (uint32_t *pd);
|
void pagedir_activate (uint32_t *pd);
|
||||||
|
void invalidate_pagedir (uint32_t *pd);
|
||||||
|
|
||||||
#endif /* userprog/pagedir.h */
|
#endif /* userprog/pagedir.h */
|
||||||
|
|||||||
@@ -369,6 +369,8 @@ process_exit (void)
|
|||||||
|
|
||||||
/* Clean up all open files */
|
/* Clean up all open files */
|
||||||
hash_destroy (&cur->open_files, fd_cleanup);
|
hash_destroy (&cur->open_files, fd_cleanup);
|
||||||
|
|
||||||
|
/* Clean up the thread's supplemental page table. */
|
||||||
hash_destroy (&cur->pages, page_cleanup);
|
hash_destroy (&cur->pages, page_cleanup);
|
||||||
|
|
||||||
/* Close the executable file, implicitly allowing it to be written to. */
|
/* Close the executable file, implicitly allowing it to be written to. */
|
||||||
@@ -627,6 +629,9 @@ load (const char *file_name, void (**eip) (void), void **esp)
|
|||||||
|
|
||||||
done:
|
done:
|
||||||
/* We arrive here whether the load is successful or not. */
|
/* We arrive here whether the load is successful or not. */
|
||||||
|
#ifndef VM
|
||||||
|
file_close (file);
|
||||||
|
#endif
|
||||||
lock_release (&filesys_lock);
|
lock_release (&filesys_lock);
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
@@ -709,7 +714,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
|
|||||||
size_t page_zero_bytes = PGSIZE - page_read_bytes;
|
size_t page_zero_bytes = PGSIZE - page_read_bytes;
|
||||||
|
|
||||||
/* Add the page metadata to the SPT to be lazy loaded later on */
|
/* Add the page metadata to the SPT to be lazy loaded later on */
|
||||||
if (page_insert (file, ofs, upage, page_read_bytes, page_zero_bytes,
|
if (page_insert_file (file, ofs, upage, page_read_bytes, page_zero_bytes,
|
||||||
writable, PAGE_FILE) == NULL)
|
writable, PAGE_FILE) == NULL)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@@ -758,6 +763,7 @@ get_usr_kpage (enum palloc_flags flags, void *upage)
|
|||||||
return NULL;
|
return NULL;
|
||||||
else
|
else
|
||||||
page = frame_alloc (flags, upage, t);
|
page = frame_alloc (flags, upage, t);
|
||||||
|
pagedir_set_accessed (t->pagedir, upage, true);
|
||||||
#else
|
#else
|
||||||
page = palloc_get_page (flags | PAL_USER);
|
page = palloc_get_page (flags | PAL_USER);
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
#include "userprog/syscall.h"
|
#include "userprog/syscall.h"
|
||||||
#include "userprog/exception.h"
|
|
||||||
#include "devices/shutdown.h"
|
#include "devices/shutdown.h"
|
||||||
#include "devices/input.h"
|
#include "devices/input.h"
|
||||||
#include "filesys/file.h"
|
#include "filesys/file.h"
|
||||||
@@ -14,6 +13,7 @@
|
|||||||
#include "vm/page.h"
|
#include "vm/page.h"
|
||||||
#include "vm/mmap.h"
|
#include "vm/mmap.h"
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
#include <stdbool.h>
|
||||||
#include <syscall-nr.h>
|
#include <syscall-nr.h>
|
||||||
|
|
||||||
#define MAX_SYSCALL_ARGS 3
|
#define MAX_SYSCALL_ARGS 3
|
||||||
@@ -52,8 +52,11 @@ static mapid_t syscall_mmap (int fd, void *addr);
|
|||||||
static void syscall_munmap (mapid_t mapping);
|
static void syscall_munmap (mapid_t mapping);
|
||||||
|
|
||||||
static struct open_file *fd_get_file (int fd);
|
static struct open_file *fd_get_file (int fd);
|
||||||
static void validate_user_pointer (const void *start, size_t size, bool write);
|
static void validate_user_pointer (const void *ptr, size_t size,
|
||||||
static void validate_user_string (const char *str);
|
bool check_write);
|
||||||
|
static void validate_user_string (const char *str, bool check_write);
|
||||||
|
static int get_user (const uint8_t *);
|
||||||
|
static bool put_user (uint8_t *, uint8_t);
|
||||||
|
|
||||||
/* A struct defining a syscall_function pointer along with its arity. */
|
/* A struct defining a syscall_function pointer along with its arity. */
|
||||||
struct syscall_arguments
|
struct syscall_arguments
|
||||||
@@ -105,7 +108,8 @@ syscall_handler (struct intr_frame *f)
|
|||||||
{
|
{
|
||||||
/* First, read the system call number from the stack. */
|
/* First, read the system call number from the stack. */
|
||||||
validate_user_pointer (f->esp, sizeof (uintptr_t), false);
|
validate_user_pointer (f->esp, sizeof (uintptr_t), false);
|
||||||
uintptr_t syscall_number = *(int *) f->esp;
|
uintptr_t syscall_number = *(int *)f->esp;
|
||||||
|
thread_current ()->curr_esp = f->esp;
|
||||||
|
|
||||||
/* Ensures the number corresponds to a system call that can be handled. */
|
/* Ensures the number corresponds to a system call that can be handled. */
|
||||||
if (syscall_number >= LOOKUP_SIZE)
|
if (syscall_number >= LOOKUP_SIZE)
|
||||||
@@ -116,10 +120,9 @@ syscall_handler (struct intr_frame *f)
|
|||||||
/* Next, read and copy the arguments from the stack pointer. */
|
/* Next, read and copy the arguments from the stack pointer. */
|
||||||
validate_user_pointer (f->esp + sizeof (uintptr_t),
|
validate_user_pointer (f->esp + sizeof (uintptr_t),
|
||||||
syscall.arity * sizeof (uintptr_t), false);
|
syscall.arity * sizeof (uintptr_t), false);
|
||||||
|
uintptr_t args[MAX_SYSCALL_ARGS] = { 0 };
|
||||||
uintptr_t args[MAX_SYSCALL_ARGS] = {0};
|
|
||||||
for (int i = 0; i < syscall.arity && i < MAX_SYSCALL_ARGS; i++)
|
for (int i = 0; i < syscall.arity && i < MAX_SYSCALL_ARGS; i++)
|
||||||
args[i] = *(uintptr_t *) (f->esp + sizeof (uintptr_t) * (i + 1));
|
args[i] = *(uintptr_t *)(f->esp + sizeof (uintptr_t) * (i + 1));
|
||||||
|
|
||||||
/* Call the function that handles this system call with the arguments. When
|
/* Call the function that handles this system call with the arguments. When
|
||||||
there is a return value it is stored in f->eax. */
|
there is a return value it is stored in f->eax. */
|
||||||
@@ -148,8 +151,7 @@ syscall_exit (int status)
|
|||||||
static pid_t
|
static pid_t
|
||||||
syscall_exec (const char *cmd_line)
|
syscall_exec (const char *cmd_line)
|
||||||
{
|
{
|
||||||
/* Validate the user string before executing the process. */
|
validate_user_string (cmd_line, false);
|
||||||
validate_user_string (cmd_line);
|
|
||||||
|
|
||||||
return process_execute (cmd_line); /* Returns the PID of the new process */
|
return process_execute (cmd_line); /* Returns the PID of the new process */
|
||||||
}
|
}
|
||||||
@@ -168,8 +170,7 @@ syscall_wait (pid_t pid)
|
|||||||
static bool
|
static bool
|
||||||
syscall_create (const char *file, unsigned initial_size)
|
syscall_create (const char *file, unsigned initial_size)
|
||||||
{
|
{
|
||||||
/* Validate the user string before creating the file. */
|
validate_user_string (file, false);
|
||||||
validate_user_string (file);
|
|
||||||
|
|
||||||
/* Acquire the file system lock to prevent race conditions. */
|
/* Acquire the file system lock to prevent race conditions. */
|
||||||
lock_acquire (&filesys_lock);
|
lock_acquire (&filesys_lock);
|
||||||
@@ -186,8 +187,7 @@ syscall_create (const char *file, unsigned initial_size)
|
|||||||
static bool
|
static bool
|
||||||
syscall_remove (const char *file)
|
syscall_remove (const char *file)
|
||||||
{
|
{
|
||||||
/* Validate the user string before removing the file. */
|
validate_user_string (file, false);
|
||||||
validate_user_string (file);
|
|
||||||
|
|
||||||
/* Acquire the file system lock to prevent race conditions. */
|
/* Acquire the file system lock to prevent race conditions. */
|
||||||
lock_acquire (&filesys_lock);
|
lock_acquire (&filesys_lock);
|
||||||
@@ -205,8 +205,7 @@ syscall_remove (const char *file)
|
|||||||
static int
|
static int
|
||||||
syscall_open (const char *file)
|
syscall_open (const char *file)
|
||||||
{
|
{
|
||||||
/* Validate the user string before opening the file. */
|
validate_user_string (file, false);
|
||||||
validate_user_string (file);
|
|
||||||
|
|
||||||
/* Acquire the file system lock to prevent race conditions. */
|
/* Acquire the file system lock to prevent race conditions. */
|
||||||
lock_acquire (&filesys_lock);
|
lock_acquire (&filesys_lock);
|
||||||
@@ -272,7 +271,6 @@ syscall_read (int fd, void *buffer, unsigned size)
|
|||||||
if (fd < STDIN_FILENO || fd == STDOUT_FILENO)
|
if (fd < STDIN_FILENO || fd == STDOUT_FILENO)
|
||||||
return EXIT_FAILURE;
|
return EXIT_FAILURE;
|
||||||
|
|
||||||
/* Validate the user buffer for the provided size before reading. */
|
|
||||||
validate_user_pointer (buffer, size, true);
|
validate_user_pointer (buffer, size, true);
|
||||||
|
|
||||||
if (fd == STDIN_FILENO)
|
if (fd == STDIN_FILENO)
|
||||||
@@ -316,7 +314,6 @@ syscall_write (int fd, const void *buffer, unsigned size)
|
|||||||
if (fd <= 0)
|
if (fd <= 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Validate the user buffer for the provided size before writing. */
|
|
||||||
validate_user_pointer (buffer, size, false);
|
validate_user_pointer (buffer, size, false);
|
||||||
|
|
||||||
if (fd == STDOUT_FILENO)
|
if (fd == STDOUT_FILENO)
|
||||||
@@ -429,6 +426,10 @@ syscall_mmap (int fd, void *addr)
|
|||||||
if (file_size == 0)
|
if (file_size == 0)
|
||||||
return MMAP_FAILURE;
|
return MMAP_FAILURE;
|
||||||
|
|
||||||
|
/* Ensure that the mmap page doesn't overlap with the stack. */
|
||||||
|
if (addr >= (thread_current ()->curr_esp) - PGSIZE)
|
||||||
|
return MMAP_FAILURE;
|
||||||
|
|
||||||
/* Check and ensure that there is enough space in the user virtual memory to
|
/* Check and ensure that there is enough space in the user virtual memory to
|
||||||
hold the entire file. */
|
hold the entire file. */
|
||||||
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
|
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
|
||||||
@@ -443,8 +444,8 @@ syscall_mmap (int fd, void *addr)
|
|||||||
off_t read_bytes = file_size - ofs < PGSIZE ? file_size - ofs : PGSIZE;
|
off_t read_bytes = file_size - ofs < PGSIZE ? file_size - ofs : PGSIZE;
|
||||||
off_t zero_bytes = PGSIZE - read_bytes;
|
off_t zero_bytes = PGSIZE - read_bytes;
|
||||||
|
|
||||||
if (page_insert (file, ofs, addr + ofs, read_bytes, zero_bytes, true,
|
if (page_insert_file (file, ofs, addr + ofs, read_bytes, zero_bytes, true,
|
||||||
PAGE_FILE) == NULL)
|
PAGE_MMAP) == NULL)
|
||||||
return MMAP_FAILURE;
|
return MMAP_FAILURE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -453,7 +454,6 @@ syscall_mmap (int fd, void *addr)
|
|||||||
if (mmap == NULL)
|
if (mmap == NULL)
|
||||||
return MMAP_FAILURE;
|
return MMAP_FAILURE;
|
||||||
|
|
||||||
|
|
||||||
return mmap->mapping;
|
return mmap->mapping;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -532,67 +532,91 @@ fd_get_file (int fd)
|
|||||||
return hash_entry (e, struct open_file, elem);
|
return hash_entry (e, struct open_file, elem);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validates if a block of memory starting at START and of size SIZE bytes is
|
/* Validates if a block of memory starting at PTR and of size SIZE bytes is
|
||||||
fully contained within user virtual memory. Kills the thread (by exiting with
|
fully contained within valid user virtual memory. thread_exit () if the
|
||||||
failure) if the memory is invalid. Otherwise, returns (nothing) normally.
|
memory is invalid.
|
||||||
If the size is 0, the function does no checks and returns the given ptr. */
|
If the size is 0, the function does no checks and returns PTR. */
|
||||||
static void
|
static void
|
||||||
validate_user_pointer (const void *start, size_t size, bool write)
|
validate_user_pointer (const void *ptr, size_t size, bool check_write)
|
||||||
{
|
{
|
||||||
/* If the size is 0, we do not need to check anything. */
|
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
return;
|
return;
|
||||||
|
/* ptr < ptr + size - 1, so sufficient to check that (ptr + size -1) is a
|
||||||
const void *end = start + size - 1;
|
valid user virtual memory address. */
|
||||||
|
void *last = ptr + size - 1;
|
||||||
/* Check if the start and end pointers are valid user virtual addresses. */
|
if (!is_user_vaddr (last))
|
||||||
if (start == NULL || !is_user_vaddr (start) || !is_user_vaddr (end))
|
|
||||||
syscall_exit (EXIT_FAILURE);
|
syscall_exit (EXIT_FAILURE);
|
||||||
|
ptr = pg_round_down (ptr);
|
||||||
/* We no longer check if the memory is mapped to physical memory. This is
|
while (ptr <= last)
|
||||||
because the data may not necessarily be there at the time of the syscall,
|
{
|
||||||
but it may be lazily loaded later. In such case, we try to preload the
|
int result;
|
||||||
page. If that fails, we exit the thread. */
|
/* Check read access to pointer. */
|
||||||
for (void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
|
if ((result = get_user (ptr)) == -1)
|
||||||
if (pagedir_get_page (thread_current ()->pagedir, ptr) == NULL &&
|
|
||||||
!try_fetch_page (ptr, write))
|
|
||||||
syscall_exit (EXIT_FAILURE);
|
syscall_exit (EXIT_FAILURE);
|
||||||
|
/* Check write access to pointer (if required). */
|
||||||
|
if (check_write && !put_user (ptr, result))
|
||||||
|
syscall_exit (EXIT_FAILURE);
|
||||||
|
ptr += PGSIZE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validates if a string is fully contained within user virtual memory. Kills
|
/* Validates of a C-string starting at ptr is fully contained within valid
|
||||||
the thread (by exiting with failure) if the memory is invalid. Otherwise,
|
user virtual memory. thread_exit () if the memory is invalid. */
|
||||||
returns (nothing) normally. */
|
|
||||||
static void
|
static void
|
||||||
validate_user_string (const char *str)
|
validate_user_string (const char *ptr, bool check_write)
|
||||||
{
|
{
|
||||||
/* Check if the string pointer is a valid user virtual address. */
|
size_t offset = (uintptr_t) ptr % PGSIZE;
|
||||||
if (str == NULL || !is_user_vaddr (str))
|
|
||||||
syscall_exit (EXIT_FAILURE);
|
|
||||||
|
|
||||||
/* Calculate the offset of the string within the (first) page. */
|
|
||||||
size_t offset = (uintptr_t) str % PGSIZE;
|
|
||||||
|
|
||||||
/* We move page by page, checking if the page is mapped to physical memory. */
|
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
void *page = pg_round_down (str);
|
void *page = pg_round_down (ptr);
|
||||||
|
|
||||||
/* If we reach addresses that are not mapped to physical memory before the
|
if (!is_user_vaddr (page))
|
||||||
end of the string, the thread is terminated. */
|
syscall_exit (EXIT_FAILURE);
|
||||||
if (!is_user_vaddr(page) ||
|
if (!is_user_vaddr (ptr))
|
||||||
(pagedir_get_page (thread_current ()->pagedir, page) == NULL &&
|
syscall_exit (EXIT_FAILURE);
|
||||||
!try_fetch_page (page, false)))
|
int result;
|
||||||
|
if ((result = get_user ((const uint8_t *)ptr)) == -1)
|
||||||
|
syscall_exit (EXIT_FAILURE);
|
||||||
|
if (check_write && !put_user ((uint8_t *)ptr, result))
|
||||||
syscall_exit (EXIT_FAILURE);
|
syscall_exit (EXIT_FAILURE);
|
||||||
|
|
||||||
while (offset < PGSIZE)
|
while (offset < PGSIZE)
|
||||||
{
|
{
|
||||||
if (*str == '\0')
|
if (*ptr == '\0')
|
||||||
return; /* We reached the end of the string without issues. */
|
return; /* We reached the end of the string without issues. */
|
||||||
|
|
||||||
str++;
|
ptr++;
|
||||||
offset++;
|
offset++;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = 0; /* Next page will start at the beginning. */
|
offset = 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* PROVIDED BY SPEC.
|
||||||
|
Reads a byte at user virtual address UADDR.
|
||||||
|
UADDR must be below PHYS_BASE.
|
||||||
|
Returns the byte value if successful, -1 if a segfault occurred. */
|
||||||
|
static int
|
||||||
|
get_user (const uint8_t *uaddr)
|
||||||
|
{
|
||||||
|
int result;
|
||||||
|
asm ("movl $1f, %0; movzbl %1, %0; 1:" : "=&a"(result) : "m"(*uaddr));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* PROVIDED BY SPEC.
|
||||||
|
Writes BYTE to user address UDST.
|
||||||
|
UDST must be below PHYS_BASE.
|
||||||
|
Returns true if successful, false if a segfault occurred. */
|
||||||
|
static bool
|
||||||
|
put_user (uint8_t *udst, uint8_t byte)
|
||||||
|
{
|
||||||
|
int error_code;
|
||||||
|
asm ("movl $1f, %0; movb %b2, %1; 1:"
|
||||||
|
: "=&a"(error_code), "=m"(*udst)
|
||||||
|
: "q"(byte));
|
||||||
|
return error_code != -1;
|
||||||
|
}
|
||||||
|
|||||||
196
src/vm/frame.c
196
src/vm/frame.c
@@ -2,32 +2,35 @@
|
|||||||
#include <hash.h>
|
#include <hash.h>
|
||||||
#include <list.h>
|
#include <list.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
#include "frame.h"
|
#include "frame.h"
|
||||||
#include "page.h"
|
#include "page.h"
|
||||||
|
#include "filesys/file.h"
|
||||||
#include "threads/malloc.h"
|
#include "threads/malloc.h"
|
||||||
#include "threads/vaddr.h"
|
#include "threads/vaddr.h"
|
||||||
|
#include "userprog/pagedir.h"
|
||||||
|
#include "userprog/syscall.h"
|
||||||
#include "threads/synch.h"
|
#include "threads/synch.h"
|
||||||
#include "devices/swap.h"
|
|
||||||
|
|
||||||
/* Hash table that maps every active frame's kernel virtual address
|
/* Hash table that maps every active frame's kernel virtual address
|
||||||
to its corresponding 'frame_metadata'.*/
|
to its corresponding 'frame_metadata'.*/
|
||||||
struct hash frame_table;
|
struct hash frame_table;
|
||||||
|
|
||||||
/* Linked list of frame_metadata whose pages are predicted to currently
|
/* Linked list used to represent the circular queue in the 'clock'
|
||||||
be in the working set of a process. They are not considered for
|
algorithm for page eviction. Iterating from the element that is
|
||||||
eviction, but are considered for demotion to the 'inactive' list. */
|
currently pointed at by 'next_victim' yields an ordering of the entries
|
||||||
struct list active_list;
|
from oldest to newest (in terms of when they were added or checked
|
||||||
|
for having been referenced by a process). */
|
||||||
|
struct list lru_list;
|
||||||
|
|
||||||
/* Linked list of frame_metadata whose pages are predicted to leave the
|
/* The next element in lru_list to be considered for eviction (oldest added
|
||||||
working set of their processes soon, so are considered for eviction.
|
or referenced page in the circular queue). If this page has has an
|
||||||
Pages are considered for eviction from the tail end, and are initially
|
'accessed' bit of 0 when considering eviction, then it will be the next
|
||||||
demoted to 'inactive' at the head. */
|
victim. Otherwise, the next element in the queue is similarly considered. */
|
||||||
struct list inactive_list;
|
struct list_elem *next_victim = NULL;
|
||||||
|
|
||||||
/* Synchronisation variables. */
|
/* Synchronisation variables. */
|
||||||
/* Protects access to the 'inactive' list. */
|
/* Protects access to 'lru_list'. */
|
||||||
struct lock inactive_lock;
|
struct lock lru_lock;
|
||||||
|
|
||||||
struct frame_metadata
|
struct frame_metadata
|
||||||
{
|
{
|
||||||
@@ -45,22 +48,24 @@ struct frame_metadata
|
|||||||
hash_hash_func frame_metadata_hash;
|
hash_hash_func frame_metadata_hash;
|
||||||
hash_less_func frame_metadata_less;
|
hash_less_func frame_metadata_less;
|
||||||
|
|
||||||
|
static struct list_elem *lru_next (struct list_elem *e);
|
||||||
|
static struct list_elem *lru_prev (struct list_elem *e);
|
||||||
static struct frame_metadata *get_victim (void);
|
static struct frame_metadata *get_victim (void);
|
||||||
|
|
||||||
/* Initialize the frame system by initializing the frame (hash) table with
|
/* Initialize the frame system by initializing the frame (hash) table with
|
||||||
the frame_metadata hashing and comparison functions, as well as initializing
|
the frame_metadata hashing and comparison functions, as well as initializing
|
||||||
the active & inactive lists. Also initializes the system's synchronisation
|
'lru_list' and its associated synchronisation primitives. */
|
||||||
primitives. */
|
|
||||||
void
|
void
|
||||||
frame_init (void)
|
frame_init (void)
|
||||||
{
|
{
|
||||||
hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
|
hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
|
||||||
list_init (&active_list);
|
|
||||||
list_init (&inactive_list);
|
|
||||||
|
|
||||||
lock_init (&inactive_lock);
|
list_init (&lru_list);
|
||||||
|
lock_init (&lru_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* TODO: Consider synchronisation more closely (i.e. just for hash
|
||||||
|
table). */
|
||||||
/* Attempt to allocate a frame for a user process, either by direct
|
/* Attempt to allocate a frame for a user process, either by direct
|
||||||
allocation of a user page if there is sufficient RAM, or by
|
allocation of a user page if there is sufficient RAM, or by
|
||||||
evicting a currently active page if memory allocated for user
|
evicting a currently active page if memory allocated for user
|
||||||
@@ -69,7 +74,10 @@ frame_init (void)
|
|||||||
void *
|
void *
|
||||||
frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||||
{
|
{
|
||||||
|
struct frame_metadata *frame_metadata;
|
||||||
flags |= PAL_USER;
|
flags |= PAL_USER;
|
||||||
|
|
||||||
|
lock_acquire (&lru_lock);
|
||||||
void *frame = palloc_get_page (flags);
|
void *frame = palloc_get_page (flags);
|
||||||
|
|
||||||
/* If a frame couldn't be allocated we must be out of main memory. Thus,
|
/* If a frame couldn't be allocated we must be out of main memory. Thus,
|
||||||
@@ -77,43 +85,81 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
|||||||
into disk. */
|
into disk. */
|
||||||
if (frame == NULL)
|
if (frame == NULL)
|
||||||
{
|
{
|
||||||
/* TODO: Deal with race condition wherein a page may be evicted in one
|
/* 1. Obtain victim. */
|
||||||
thread while it's in the middle of being evicted in another. */
|
if (next_victim == NULL)
|
||||||
struct frame_metadata *victim = get_victim ();
|
PANIC ("Couldn't allocate a single page to main memory!\n");
|
||||||
if (victim == NULL)
|
|
||||||
return NULL;
|
struct frame_metadata *victim = get_victim ();
|
||||||
|
ASSERT (victim != NULL); /* get_victim () should never return null. */
|
||||||
|
|
||||||
|
/* 2. Handle victim page writing based on its type. */
|
||||||
|
struct page_entry *victim_page = page_get (victim->upage);
|
||||||
|
if (victim_page != NULL && victim_page->type == PAGE_MMAP)
|
||||||
|
{
|
||||||
|
/* If it was a memory-mapped file page, we just write it back
|
||||||
|
to the file if it was dirty. */
|
||||||
|
if (pagedir_is_dirty(owner->pagedir, victim->upage))
|
||||||
|
{
|
||||||
|
lock_acquire (&filesys_lock);
|
||||||
|
file_write_at (victim_page->file, victim->upage,
|
||||||
|
victim_page->read_bytes, victim_page->offset);
|
||||||
|
lock_release (&filesys_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
else
|
||||||
|
/* Otherwise, insert the page into swap. */
|
||||||
|
page_insert_swapped (victim->upage, victim->frame, victim->owner);
|
||||||
|
|
||||||
size_t swap_slot = swap_out (victim->frame);
|
|
||||||
page_set_swap (victim->owner, victim->upage, swap_slot);
|
|
||||||
|
|
||||||
/* If zero flag is set, zero out the victim page. */
|
/* If zero flag is set, zero out the victim page. */
|
||||||
if (flags & PAL_ZERO)
|
if (flags & PAL_ZERO)
|
||||||
memset (victim->frame, 0, PGSIZE);
|
memset (victim->frame, 0, PGSIZE);
|
||||||
|
|
||||||
frame = victim->frame;
|
/* 3. Indicate that the new frame's metadata will be stored
|
||||||
|
inside the same structure that stored the victim's metadata.
|
||||||
|
As both the new frame and the victim frame share the same kernel
|
||||||
|
virtual address, the hash map need not be updated, and neither
|
||||||
|
the list_elem value as both share the same lru_list position. */
|
||||||
|
frame_metadata = victim;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct frame_metadata *frame_metadata =
|
/* If sufficient main memory allows the frame to be directly allocated,
|
||||||
malloc (sizeof (struct frame_metadata));
|
we must update the frame table with a new entry, and grow lru_list. */
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/* Must own lru_lock here, as otherwise there is a race condition
|
||||||
|
with next_victim either being NULL or uninitialized. */
|
||||||
|
frame_metadata = malloc (sizeof (struct frame_metadata));
|
||||||
frame_metadata->frame = frame;
|
frame_metadata->frame = frame;
|
||||||
|
|
||||||
|
/* Newly allocated frames are pushed to the back of the circular queue
|
||||||
|
represented by lru_list. Must explicitly handle the case where the
|
||||||
|
circular queue is empty (when next_victim == NULL). */
|
||||||
|
if (next_victim == NULL)
|
||||||
|
{
|
||||||
|
list_push_back (&lru_list, &frame_metadata->list_elem);
|
||||||
|
next_victim = &frame_metadata->list_elem;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
struct list_elem *lru_tail = lru_prev (next_victim);
|
||||||
|
list_insert (lru_tail, &frame_metadata->list_elem);
|
||||||
|
}
|
||||||
|
|
||||||
|
hash_insert (&frame_table, &frame_metadata->hash_elem);
|
||||||
|
}
|
||||||
|
|
||||||
frame_metadata->upage = upage;
|
frame_metadata->upage = upage;
|
||||||
frame_metadata->owner = owner;
|
frame_metadata->owner = owner;
|
||||||
|
lock_release (&lru_lock);
|
||||||
|
|
||||||
/* Newly faulted pages begin at the head of the inactive list. */
|
return frame_metadata->frame;
|
||||||
lock_acquire (&inactive_lock);
|
|
||||||
list_push_front (&inactive_list, &frame_metadata->list_elem);
|
|
||||||
lock_release (&inactive_lock);
|
|
||||||
|
|
||||||
/* Finally, insert frame metadata within the frame table, with the key as its
|
|
||||||
allocated kernel address. */
|
|
||||||
hash_replace (&frame_table, &frame_metadata->hash_elem);
|
|
||||||
|
|
||||||
return frame;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Attempt to deallocate a frame for a user process by removing it from the
|
/* Attempt to deallocate a frame for a user process by removing it from the
|
||||||
frame table as well as active/inactive list, and freeing the underlying
|
frame table as well as lru_list, and freeing the underlying page
|
||||||
page memory. Panics if the frame isn't active in memory. */
|
memory & metadata struct. Panics if the frame isn't active in memory. */
|
||||||
void
|
void
|
||||||
frame_free (void *frame)
|
frame_free (void *frame)
|
||||||
{
|
{
|
||||||
@@ -122,33 +168,56 @@ frame_free (void *frame)
|
|||||||
|
|
||||||
struct hash_elem *e =
|
struct hash_elem *e =
|
||||||
hash_delete (&frame_table, &key_metadata.hash_elem);
|
hash_delete (&frame_table, &key_metadata.hash_elem);
|
||||||
if (e == NULL) PANIC ("Attempted to free a frame without a corresponding "
|
if (e == NULL) PANIC ("Attempted to free a frame at kernel address %p, "
|
||||||
"kernel address!\n");
|
"but this address is not allocated!\n", frame);
|
||||||
|
|
||||||
struct frame_metadata *frame_metadata =
|
struct frame_metadata *frame_metadata =
|
||||||
hash_entry (e, struct frame_metadata, hash_elem);
|
hash_entry (e, struct frame_metadata, hash_elem);
|
||||||
|
|
||||||
|
lock_acquire (&lru_lock);
|
||||||
list_remove (&frame_metadata->list_elem);
|
list_remove (&frame_metadata->list_elem);
|
||||||
|
|
||||||
|
/* If we're freeing the frame marked as the next victim, update
|
||||||
|
next_victim to either be the next least recently used page, or NULL
|
||||||
|
if no pages are loaded in main memory. */
|
||||||
|
if (&frame_metadata->list_elem == next_victim)
|
||||||
|
{
|
||||||
|
if (list_empty (&lru_list))
|
||||||
|
next_victim = NULL;
|
||||||
|
else
|
||||||
|
next_victim = lru_next (next_victim);
|
||||||
|
}
|
||||||
|
lock_release (&lru_lock);
|
||||||
|
|
||||||
free (frame_metadata);
|
free (frame_metadata);
|
||||||
palloc_free_page (frame);
|
palloc_free_page (frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Obtain a pointer to the metadata of the frame we should evict next. */
|
/* TODO: Account for page aliases when checking accessed bit. */
|
||||||
|
/* A pre-condition for calling this function is that the calling thread
|
||||||
|
owns lru_lock and that lru_list is non-empty. */
|
||||||
static struct frame_metadata *
|
static struct frame_metadata *
|
||||||
get_victim (void)
|
get_victim (void)
|
||||||
{
|
{
|
||||||
lock_acquire (&inactive_lock);
|
struct list_elem *e = next_victim;
|
||||||
if (list_empty (&inactive_list))
|
struct frame_metadata *frame_metadata;
|
||||||
|
uint32_t *pd;
|
||||||
|
void *upage;
|
||||||
|
for (;;)
|
||||||
{
|
{
|
||||||
return NULL;
|
frame_metadata = list_entry (e, struct frame_metadata, list_elem);
|
||||||
}
|
pd = frame_metadata->owner->pagedir;
|
||||||
else
|
upage = frame_metadata->upage;
|
||||||
{
|
e = lru_next (e);
|
||||||
struct list_elem *victim_elem = list_pop_back (&inactive_list);
|
|
||||||
lock_release (&inactive_lock);
|
|
||||||
|
|
||||||
return list_entry (victim_elem, struct frame_metadata, list_elem);
|
if (!pagedir_is_accessed (pd, upage))
|
||||||
|
break;
|
||||||
|
|
||||||
|
pagedir_set_accessed (pd, upage, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
next_victim = e;
|
||||||
|
return frame_metadata;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Hash function for frame metadata, used for storing entries in the
|
/* Hash function for frame metadata, used for storing entries in the
|
||||||
@@ -177,3 +246,26 @@ frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_,
|
|||||||
return a->frame < b->frame;
|
return a->frame < b->frame;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Returns the next recently used element after the one provided, which
|
||||||
|
is achieved by iterating through lru_list like a circular queue
|
||||||
|
(wrapping around the list at the tail). */
|
||||||
|
static struct list_elem *
|
||||||
|
lru_next (struct list_elem *e)
|
||||||
|
{
|
||||||
|
if (!list_empty (&lru_list) && e == list_back (&lru_list))
|
||||||
|
return list_front (&lru_list);
|
||||||
|
|
||||||
|
return list_next (e);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Returns the previous recently used element after the one provided, which
|
||||||
|
is achieved by iterating through lru_list like a circular queue
|
||||||
|
(wrapping around the list at the head). */
|
||||||
|
static struct list_elem *
|
||||||
|
lru_prev (struct list_elem *e)
|
||||||
|
{
|
||||||
|
if (!list_empty (&lru_list) && e == list_front (&lru_list))
|
||||||
|
return list_back (&lru_list);
|
||||||
|
|
||||||
|
return list_prev (e);
|
||||||
|
}
|
||||||
|
|||||||
@@ -126,21 +126,3 @@ mmap_cleanup (struct hash_elem *e, void *aux UNUSED)
|
|||||||
struct mmap_entry *mmap = hash_entry (e, struct mmap_entry, elem);
|
struct mmap_entry *mmap = hash_entry (e, struct mmap_entry, elem);
|
||||||
mmap_unmap (mmap);
|
mmap_unmap (mmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Updates the 'owner' thread's page table entry for virtual address 'upage'
|
|
||||||
to have a present bit of 0 and stores the specified swap slot value in the
|
|
||||||
entry for later retrieval from disk. */
|
|
||||||
void
|
|
||||||
page_set_swap (struct thread *owner, void *upage, size_t swap_slot)
|
|
||||||
{
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Given that the page with user address 'upage' owned by 'owner' is flagged
|
|
||||||
to be in the swap disk via the owner's page table, returns its stored
|
|
||||||
swap slot. Otherwise panics the kernel. */
|
|
||||||
size_t
|
|
||||||
page_get_swap (struct thread *owner, void *upage)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|||||||
117
src/vm/page.c
117
src/vm/page.c
@@ -1,19 +1,26 @@
|
|||||||
#include "page.h"
|
#include "page.h"
|
||||||
|
#include <stdint.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include "filesys/file.h"
|
#include "filesys/file.h"
|
||||||
|
#include "threads/pte.h"
|
||||||
#include "threads/malloc.h"
|
#include "threads/malloc.h"
|
||||||
#include "threads/palloc.h"
|
#include "threads/palloc.h"
|
||||||
|
#include "devices/swap.h"
|
||||||
#include "userprog/process.h"
|
#include "userprog/process.h"
|
||||||
|
#include "userprog/pagedir.h"
|
||||||
#include "vm/frame.h"
|
#include "vm/frame.h"
|
||||||
|
|
||||||
|
#define SWAP_FLAG_BIT 9
|
||||||
|
#define ADDR_START_BIT 12
|
||||||
|
|
||||||
/* Hashing function needed for the SPT table. Returns a hash for an entry,
|
/* Hashing function needed for the SPT table. Returns a hash for an entry,
|
||||||
based on its upage. */
|
based on its upage. */
|
||||||
unsigned
|
unsigned
|
||||||
page_hash (const struct hash_elem *e, UNUSED void *aux)
|
page_hash (const struct hash_elem *e, UNUSED void *aux)
|
||||||
{
|
{
|
||||||
struct page_entry *page = hash_entry (e, struct page_entry, elem);
|
struct page_entry *page = hash_entry (e, struct page_entry, elem);
|
||||||
return hash_ptr(page->upage);
|
return hash_ptr (page->upage);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Comparator function for the SPT table. Compares two entries based on their
|
/* Comparator function for the SPT table. Compares two entries based on their
|
||||||
@@ -28,22 +35,66 @@ page_less (const struct hash_elem *a_, const struct hash_elem *b_,
|
|||||||
return a->upage < b->upage;
|
return a->upage < b->upage;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate and insert a new page entry into the thread's page table. */
|
static void page_flag_swap (uint32_t *pte, bool set);
|
||||||
|
static void page_set_swap (struct thread *owner, uint32_t *pte,
|
||||||
|
size_t swap_slot);
|
||||||
|
|
||||||
|
// TODO: Deal with NULL malloc returns
|
||||||
|
/* Swap out 'owner' process's 'upage' stored at 'kpage'. Then, allocate and
|
||||||
|
insert a new page entry into the user process thread's SPT representing
|
||||||
|
this swapped out page. */
|
||||||
struct page_entry *
|
struct page_entry *
|
||||||
page_insert (struct file *file, off_t ofs, void *upage, uint32_t read_bytes,
|
page_insert_swapped (void *upage, void *kpage, struct thread *owner)
|
||||||
uint32_t zero_bytes, bool writable, enum page_type type)
|
{
|
||||||
|
/* 1. Initialize swapped page entry. */
|
||||||
|
struct page_entry *page = page_get (upage);
|
||||||
|
if (page == NULL)
|
||||||
|
{
|
||||||
|
page = malloc (sizeof (struct page_entry));
|
||||||
|
if (page == NULL)
|
||||||
|
return NULL;
|
||||||
|
page->upage = upage;
|
||||||
|
lock_init (&page->lock);
|
||||||
|
hash_insert (&owner->pages, &page->elem);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Mark page as 'swapped' and flag the page directory as having
|
||||||
|
been modified *before* eviction begins to prevent the owner of the
|
||||||
|
victim page from accessing/modifying it mid-eviction. */
|
||||||
|
/* TODO: We need to stop the process from destroying pagedir mid-eviction,
|
||||||
|
as this could render the page table entry invalid. */
|
||||||
|
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
|
||||||
|
|
||||||
|
page_flag_swap (pte, true);
|
||||||
|
lock_acquire (&page->lock);
|
||||||
|
pagedir_clear_page (owner->pagedir, upage);
|
||||||
|
|
||||||
|
size_t swap_slot = swap_out (kpage);
|
||||||
|
page_set_swap (owner, pte, swap_slot);
|
||||||
|
|
||||||
|
lock_release (&page->lock);
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Allocate and insert a new page entry into the user process thread's
|
||||||
|
SPT representing a file page. */
|
||||||
|
struct page_entry *
|
||||||
|
page_insert_file (struct file *file, off_t ofs, void *upage,
|
||||||
|
uint32_t read_bytes, uint32_t zero_bytes, bool writable,
|
||||||
|
enum page_type type)
|
||||||
{
|
{
|
||||||
struct page_entry *page = malloc(sizeof (struct page_entry));
|
struct page_entry *page = malloc(sizeof (struct page_entry));
|
||||||
if (page == NULL)
|
if (page == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
page->type = type;
|
||||||
page->file = file;
|
page->file = file;
|
||||||
page->offset = ofs;
|
page->offset = ofs;
|
||||||
page->upage = upage;
|
page->upage = upage;
|
||||||
page->read_bytes = read_bytes;
|
page->read_bytes = read_bytes;
|
||||||
page->zero_bytes = zero_bytes;
|
page->zero_bytes = zero_bytes;
|
||||||
page->writable = writable;
|
page->writable = writable;
|
||||||
page->type = type;
|
lock_init (&page->lock);
|
||||||
|
|
||||||
hash_insert (&thread_current ()->pages, &page->elem);
|
hash_insert (&thread_current ()->pages, &page->elem);
|
||||||
return page;
|
return page;
|
||||||
@@ -66,12 +117,14 @@ page_get (void *upage)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
page_load (struct page_entry *page, bool writable)
|
page_load_file (struct page_entry *page, bool writable)
|
||||||
{
|
{
|
||||||
/* Allocate a frame for the page. If a frame allocation fails, then
|
/* Allocate a frame for the page. If a frame allocation fails, then
|
||||||
frame_alloc should try to evict a page. If it is still NULL, the OS
|
frame_alloc should try to evict a page. If it is still NULL, the OS
|
||||||
panics as this should not happen if eviction is working correctly. */
|
panics as this should not happen if eviction is working correctly. */
|
||||||
void *frame = frame_alloc (PAL_USER, page->upage, thread_current ());
|
struct thread *t = thread_current ();
|
||||||
|
void *frame = frame_alloc (PAL_USER, page->upage, t);
|
||||||
|
pagedir_set_accessed (t->pagedir, page->upage, true);
|
||||||
if (frame == NULL)
|
if (frame == NULL)
|
||||||
PANIC ("Could not allocate a frame to load page into memory.");
|
PANIC ("Could not allocate a frame to load page into memory.");
|
||||||
|
|
||||||
@@ -106,3 +159,53 @@ page_cleanup (struct hash_elem *e, void *aux UNUSED)
|
|||||||
{
|
{
|
||||||
free (hash_entry (e, struct page_entry, elem));
|
free (hash_entry (e, struct page_entry, elem));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Flags the provided page table entry as representing a swapped out page. */
|
||||||
|
void
|
||||||
|
page_flag_swap (uint32_t *pte, bool set)
|
||||||
|
{
|
||||||
|
if (set)
|
||||||
|
*pte |= (1 << SWAP_FLAG_BIT);
|
||||||
|
else
|
||||||
|
*pte &= ~(1 << SWAP_FLAG_BIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Sets the address bits of the page table entry to the provided swap slot
|
||||||
|
value. To be used for later retrieval of the swap slot when page faulting. */
|
||||||
|
static void
|
||||||
|
page_set_swap (struct thread *owner, uint32_t *pte, size_t swap_slot)
|
||||||
|
{
|
||||||
|
/* Store the provided swap slot in the address bits of the page table
|
||||||
|
entry, truncating excess bits. */
|
||||||
|
*pte |= (1 << SWAP_FLAG_BIT);
|
||||||
|
uint32_t swap_slot_bits = (swap_slot << ADDR_START_BIT) & PTE_ADDR;
|
||||||
|
*pte = (*pte & PTE_FLAGS) | swap_slot_bits;
|
||||||
|
|
||||||
|
invalidate_pagedir (owner->pagedir);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Returns true iff the page with user address 'upage' owned by 'owner'
|
||||||
|
is flagged to be in the swap disk via the owner's page table. */
|
||||||
|
bool
|
||||||
|
page_in_swap (struct thread *owner, void *upage)
|
||||||
|
{
|
||||||
|
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
|
||||||
|
return pte != NULL &&
|
||||||
|
(*pte & (1 << SWAP_FLAG_BIT)) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Given that the page with user address 'upage' owned by 'owner' is flagged
|
||||||
|
to be in the swap disk via the owner's page table, returns its stored
|
||||||
|
swap slot and marks the PTE as not being in swap. */
|
||||||
|
size_t
|
||||||
|
page_get_swap (struct thread *owner, void *upage)
|
||||||
|
{
|
||||||
|
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
|
||||||
|
|
||||||
|
ASSERT (pte != NULL);
|
||||||
|
ASSERT ((*pte & PTE_P) == 0);
|
||||||
|
|
||||||
|
/* Masks the address bits and returns truncated value. */
|
||||||
|
page_flag_swap (pte, false);
|
||||||
|
return ((*pte & PTE_ADDR) >> ADDR_START_BIT);
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,17 +2,23 @@
|
|||||||
#define VM_PAGE_H
|
#define VM_PAGE_H
|
||||||
|
|
||||||
#include "threads/thread.h"
|
#include "threads/thread.h"
|
||||||
|
#include "threads/synch.h"
|
||||||
#include "filesys/off_t.h"
|
#include "filesys/off_t.h"
|
||||||
|
|
||||||
enum page_type {
|
enum page_type {
|
||||||
PAGE_FILE,
|
PAGE_FILE,
|
||||||
PAGE_EMPTY
|
PAGE_MMAP
|
||||||
};
|
};
|
||||||
|
|
||||||
struct page_entry {
|
struct page_entry {
|
||||||
enum page_type type; /* Type of Data that should go into the page */
|
enum page_type type; /* Type of Data that should go into the page */
|
||||||
void *upage; /* Start Address of the User Page (Key of hash table). */
|
void *upage; /* Start Address of the User Page (Key of hash table). */
|
||||||
|
|
||||||
|
/* Data for swapped pages */
|
||||||
|
struct lock lock; /* Enforces mutual exclusion in accessing the page
|
||||||
|
referenced by the entry between its owning process
|
||||||
|
and any thread performing page eviction. */
|
||||||
|
|
||||||
/* File Data */
|
/* File Data */
|
||||||
struct file *file; /* Pointer to the file for executables. */
|
struct file *file; /* Pointer to the file for executables. */
|
||||||
off_t offset; /* Offset of the page content within the file. */
|
off_t offset; /* Offset of the page content within the file. */
|
||||||
@@ -26,13 +32,16 @@ struct page_entry {
|
|||||||
unsigned page_hash (const struct hash_elem *e, void *aux);
|
unsigned page_hash (const struct hash_elem *e, void *aux);
|
||||||
bool page_less (const struct hash_elem *a_, const struct hash_elem *b_,
|
bool page_less (const struct hash_elem *a_, const struct hash_elem *b_,
|
||||||
void *aux);
|
void *aux);
|
||||||
struct page_entry *page_insert (struct file *file, off_t ofs, void *upage,
|
struct page_entry *page_insert_swapped (void *upage, void* kpage,
|
||||||
|
struct thread *owner);
|
||||||
|
struct page_entry *page_insert_file (struct file *file, off_t ofs, void *upage,
|
||||||
uint32_t read_bytes, uint32_t zero_bytes,
|
uint32_t read_bytes, uint32_t zero_bytes,
|
||||||
bool writable, enum page_type type);
|
bool writable, enum page_type);
|
||||||
struct page_entry *page_get (void *upage);
|
struct page_entry *page_get (void *upage);
|
||||||
bool page_load (struct page_entry *page, bool writable);
|
bool page_load_file (struct page_entry *page, bool writable);
|
||||||
void page_cleanup (struct hash_elem *e, void *aux);
|
void page_cleanup (struct hash_elem *e, void *aux);
|
||||||
void page_set_swap (struct thread *, void *, size_t);
|
|
||||||
size_t page_get_swap (struct thread *, void *);
|
bool page_in_swap (struct thread *, void *);
|
||||||
|
size_t page_get_swap (struct thread *owner, void *upage);
|
||||||
|
|
||||||
#endif /* vm/frame.h */
|
#endif /* vm/frame.h */
|
||||||
|
|||||||
Reference in New Issue
Block a user