Compare commits

...

90 Commits

Author SHA1 Message Date
Saleh Bubshait
ad4eda5385 Merge branch 'vm/syscall-mmap/styling' into 'master'
Refactor page to follow pintos styling for indentation and add spacing for readability

See merge request lab2425_autumn/pintos_22!69
2024-12-06 18:37:18 +00:00
sBubshait
8d1b4c4994 Refactor page to follow pintos styling for indentation and add spacing for readability 2024-12-06 18:36:54 +00:00
Saleh Bubshait
07c0219058 Merge branch 'vm/syscall-mmap/styling' into 'master'
Refactor frame to add spacing and exception for indentation to follow pintos styling

See merge request lab2425_autumn/pintos_22!68
2024-12-06 18:28:39 +00:00
sBubshait
5fbabdcec9 Refactor frame to add spacing and exception for indentation to follow pintos styling 2024-12-06 18:27:50 +00:00
Saleh Bubshait
5e8bdc68e7 Merge branch 'vm/syscall-mmap/styling' into 'master'
Refactor mmap system call code to follow pintos style in indentation

See merge request lab2425_autumn/pintos_22!67
2024-12-06 18:15:47 +00:00
sBubshait
d039b59b7c Refactor mmap system call code to follow pintos style in indentation 2024-12-06 18:14:47 +00:00
Saleh Bubshait
29c0b93711 Merge branch 'vm/pagedir-spt-synch' into 'master'
fix: synchronise threads' SPTs with locks

See merge request lab2425_autumn/pintos_22!66
2024-12-06 17:44:33 +00:00
77fedd6666 fix: synchronise threads' SPTs with locks 2024-12-06 17:31:33 +00:00
Dias Alberto, Ethan
eba8c1ffa8 Merge branch 'vm/merged/pinning-synch' into 'master'
fix: re-enable shareable read only executable logic

See merge request lab2425_autumn/pintos_22!65
2024-12-06 17:13:02 +00:00
EDiasAlberto
be696ec528 fix: re-enable shareable read only executable logic 2024-12-06 17:07:17 +00:00
Saleh Bubshait
7611090253 Merge branch 'vm/merged/pinning-synch' into 'master'
Merge 'vm/merged/pinning-synch' into master

See merge request lab2425_autumn/pintos_22!64
2024-12-06 16:31:37 +00:00
sBubshait
7f058ffc90 Refactor page_type to rename PAGE_FILE to PAGE_EXECUTABLE as mmap and executables are now separate 2024-12-06 16:22:26 +00:00
c1bc70adad ci: do not ignore an VM tests since it is fully implemented now 2024-12-06 15:55:23 +00:00
sBubshait
22f3b0950f Fix: Insert pages in mmap as PAGE_MMAP instead of PAGE_FILE 2024-12-06 15:54:46 +00:00
f64b92bbfa refactor: document shared_file_page 2024-12-06 15:35:23 +00:00
Themis Demetriades
3d6e30119b refactor: rename lru_lock to ftable_lock for greater clarity, and update comments to reflect this 2024-12-06 15:31:27 +00:00
4104d2c852 fix: always add to frame owners when installing existing shared page. 2024-12-06 15:23:41 +00:00
EDiasAlberto
d389c15828 fix: acquire lru_lock before pinning frames to avoid race condition with eviction 2024-12-06 13:20:43 +00:00
Themis Demetriades
8ac34063d7 fix: disable 'shareable' flag to probe race conditions 2024-12-06 10:56:38 +00:00
Demetriades, Themis
c68fea5249 Merge branch 'vm/merged/themis' into 'master'
Implement VM

See merge request lab2425_autumn/pintos_22!63
2024-12-06 05:07:14 +00:00
Themis Demetriades
65da1659e5 feat: merged shared-read-only-executables with the rest of VM 2024-12-06 04:15:13 +00:00
Themis Demetriades
3897e83963 fix: use correct page_get function within page eviction 2024-12-06 01:43:41 +00:00
Demetriades, Themis
96b350d623 Merge branch 'vm/mmap-write-back-on-eviction' into 'vm/virtual-memory/themis-synch'
Write back mmap file pages to file upon eviction

See merge request lab2425_autumn/pintos_22!59
2024-12-06 01:01:50 +00:00
Themis Demetriades
31403ac7cb fix: obtain correct page table entry when performing eviction 2024-12-06 00:56:03 +00:00
1da0c7d48c fix: properly assign frame owners and deallocate in all required places 2024-12-06 00:29:57 +00:00
Demetriades, Themis
8220b931a9 Merge branch 'vm/virtual-memory/frame-synch/saleh' into 'vm/virtual-memory/themis-synch'
Merge frame pinning to themis-synch

See merge request lab2425_autumn/pintos_22!60
2024-12-06 00:21:02 +00:00
sBubshait
1efa1fef9a Merge frame pinning into themis-synch 2024-12-05 23:56:25 +00:00
sBubshait
fc088a19ac Merge remote-tracking branch 'origin/vm/frame-pinning' into vm/virtual-memory/frame-synch/saleh
# Conflicts:
#	src/userprog/syscall.c
2024-12-05 23:48:52 +00:00
sBubshait
a34bbbed08 Update frame: When evicting an mmapped file page, write it back to the file if it is dirty 2024-12-05 22:53:48 +00:00
833c1b0520 fix: only swap out shared pages once 2024-12-05 22:37:14 +00:00
9aa9cdb91e feat: implement proper destruction of pages, including for shared pages 2024-12-05 22:23:50 +00:00
Themis Demetriades
2811ea0eb3 fix: SPT never removes entries until process termination or special case 2024-12-05 22:05:02 +00:00
dd46200256 feat: initial shared file page management and initialization 2024-12-05 21:46:49 +00:00
sBubshait
6da855fe47 Implement validation of pointers and strings in syscalls with pinning and unpinning to protect against eviction 2024-12-05 21:12:31 +00:00
4dd6b6e928 fix: do not leak when inserting the same page twice, just update 2024-12-05 19:38:27 +00:00
0f1f7b9a6f refactor: extract init_pages 2024-12-05 19:35:39 +00:00
sBubshait
e03273756d Update frame table to add a pinned flag and protect those from being evicted 2024-12-05 17:52:01 +00:00
Themis Demetriades
7860f3863f fix: add check to mmap to ensure file isn't mapped over stack segment (ed1223) 2024-12-05 17:11:02 +00:00
Themis Demetriades
d03e253046 feat: implement synchronisation to protecting access to PTEs of SPTs during eviction 2024-12-05 16:51:15 +00:00
EDiasAlberto
5cf79b5389 fix: add check to mmap to ensure file isn't mapped over stack segment 2024-12-05 16:05:08 +00:00
EDiasAlberto
e779e8ac7c fix: modify stack growth to use frame allocation to allow for page swapping 2024-12-05 04:39:50 +00:00
sBubshait
16db01d3d8 Refactor: Check if page is in a swap in fetch_page instead of the page fault handler 2024-12-05 03:17:18 +00:00
sBubshait
c12cd95093 Fix issues with merging, duplicate references and definition of VM 2024-12-05 02:27:48 +00:00
sBubshait
f13fd435cd Merge remote-tracking branch 'origin/vm/page-swap-synch' into vm/virtual-memory/saleh
# Conflicts:
#	.gitlab-ci.yml
#	src/Makefile.build
#	src/threads/thread.c
#	src/userprog/exception.c
#	src/userprog/process.c
#	src/vm/frame.c
#	src/vm/page.c
#	src/vm/page.h
#	src/vm/stackgrowth.c
#	src/vm/stackgrowth.h
2024-12-05 02:21:53 +00:00
EDiasAlberto
ac31fb1e1e feat: set accessed bit to allocated frames in page_load and get_usr_kpage 2024-12-05 01:41:23 +00:00
sBubshait
1a8eb1bbe5 Merge branch 'vm/memory-mapped-files' into vm/virtual-memory/saleh 2024-12-05 01:24:50 +00:00
sBubshait
52ec8fe779 Fix Bug: Grow stack if necessary in case of a page fault in the kernel context 2024-12-05 01:15:46 +00:00
sBubshait
f171a05108 Merge branch 'vm/stack-growth/saleh' into vm/virtual-memory/saleh
# Conflicts:
#	src/userprog/exception.c
#	src/userprog/process.c
#	src/userprog/syscall.c
#	src/vm/frame.c
#	src/vm/page.c
#	src/vm/page.h
2024-12-05 00:51:03 +00:00
Demetriades, Themis
f06c91cf0d ci: include linear page tests in VM test pipeline 2024-12-05 00:29:49 +00:00
sBubshait
5265fed288 Refactor stack growth to be helper functions in exception for easier merging 2024-12-05 00:27:40 +00:00
Themis Demetriades
19d5b02341 fix: remove use of USERPROG compiler flag specific code when the flag is disabled 2024-12-04 23:48:51 +00:00
Themis Demetriades
0288e13206 fix: don't discriminate between user and kernel page fault contexts for stack growth, lazy loading, and swapping 2024-12-04 23:46:31 +00:00
Themis Demetriades
60faf995ea fix: lazy load executable files of user processes even when accessed in a kernel context 2024-12-04 22:21:31 +00:00
Themis Demetriades
723055f485 fix: only use lazy loading if VM flag is enabled 2024-12-04 21:33:21 +00:00
Themis Demetriades
1e236a5c47 Merge branch 'vm/lazy-loading' into vm/page-swap-synch 2024-12-04 19:11:37 +00:00
Themis Demetriades
4bf6914cfa feat: incorporate lazy-loading data & helpers into supplemental page table 2024-12-04 16:45:36 +00:00
Themis Demetriades
fb73d694bf fix: frame allocation now invalidates the victim process page directory, not the caller's 2024-12-04 16:41:13 +00:00
Themis Demetriades
1b73e415d7 fix: invalidate PTEs of evicted pages before eviction occurs to prevent modificationof pages mid-eviction 2024-12-04 15:02:49 +00:00
Themis Demetriades
47a7dfae04 refactor: add comments describing each type of page fault dealt by the page fault handler 2024-12-03 21:47:59 +00:00
EDiasAlberto
9a3c8a1c38 fix: grow stack upon page fault in kernel context to support syscall stack growth 2024-12-03 20:56:10 +00:00
Themis Demetriades
08eafcf7ef feat: implement page swapping 2024-12-03 16:53:47 +00:00
Themis Demetriades
df7d847978 fix: remove stack fault checks for page faults outside user non-present addresses 2024-12-02 21:07:17 +00:00
Demetriades, Themis
fbcd3c9f19 ci: include dynamic stack growth tests in VM test pipeline 2024-12-02 20:57:05 +00:00
Themis Demetriades
6190d1bee6 fix: disable dynamic stack growth when VM flag is disabled 2024-12-02 20:44:54 +00:00
Themis Demetriades
6adf2e743b refactor: dynamic stack growth functions to follow code style 2024-12-02 19:50:40 +00:00
Themis Demetriades
05a48cf9c6 refactor: page fault exception handler follows code style 2024-12-01 23:36:55 +00:00
Themis Demetriades
bb16abdc0d refactor: supplemental page table helper functions follow code style 2024-12-01 23:30:50 +00:00
Demetriades, Themis
8e278b349a Merge branch 'page-swap-helpers' into 'virtual-memory'
Implement helper functions for managing the supplemental page table

See merge request lab2425_autumn/pintos_22!55
2024-12-01 21:47:30 +00:00
Demetriades, Themis
9d35beb2e4 Merge branch 'virtual-memory' into 'page-swap-helpers'
# Conflicts:
#   src/vm/frame.c
#   src/vm/page.c
2024-12-01 21:44:17 +00:00
Themis Demetriades
7ce512305e fix: remove DVM flag when compiling outside of vm directory 2024-12-01 00:41:09 +00:00
Demetriades, Themis
775b73a3e9 Merge branch 'ethan-stack-growth' into 'virtual-memory'
Implement dynamic stack growth

See merge request lab2425_autumn/pintos_22!54
2024-11-30 23:21:33 +00:00
Demetriades, Themis
d8edc6d3fe Merge branch 'virtual-memory' into 'ethan-stack-growth'
# Conflicts:
#   src/Makefile.build
2024-11-30 23:21:16 +00:00
Demetriades, Themis
5682974f9d Merge branch 'vm/supplemental-page-table' into 'master'
Implement frame table & page eviction algorithm

See merge request lab2425_autumn/pintos_22!53
2024-11-30 23:01:04 +00:00
Themis Demetriades
6f85d7642d feat: implement clock (second-chance) page eviction algorithm 2024-11-30 22:40:13 +00:00
EDiasAlberto
94adc11f03 Feat: implement page_get_swap and page_set_swap functions 2024-11-30 03:21:34 +00:00
EDiasAlberto
40c553d68b Merge stack growth functions 2024-11-30 01:54:28 +00:00
EDiasAlberto
13de832586 Refactor stack growth code to remove messy conditions 2024-11-29 23:52:05 +00:00
EDiasAlberto
5c661c2e24 Feat: pointer validation checks string across multiple pages and handle kernel page faults 2024-11-29 23:49:49 +00:00
EDiasAlberto
5f40d83e66 Implement MMU-based user memory validation 2024-11-29 23:03:31 +00:00
Themis Demetriades
149bb42889 feat: implement clock (second-chance) page eviction algorithm 2024-11-29 19:30:47 +00:00
EDiasAlberto
4f84a83611 Refactor: abstract new page allocation to one general function and make helper functions static 2024-11-27 19:41:22 +00:00
EDiasAlberto
c74a8c55aa Implement stack growth for system calls and add stack pointer tracking to thread 2024-11-27 19:21:43 +00:00
EDiasAlberto
c670c29e47 update stack growth header to fit virtual memory naming format 2024-11-27 18:57:20 +00:00
EDiasAlberto
af7f2ba873 Fix: Magic number in stackgrowth.c 2024-11-26 04:54:00 +00:00
EDiasAlberto
3ef5264b6e feat: allow stack to grow for process up to 8MB in size 2024-11-26 04:43:25 +00:00
59e7a64f8e Only check user pages rather than all bytes in-between, for known-size pointers 2024-11-12 15:48:22 +00:00
cf4bf90cbb Implement user pointer checking for C strings 2024-11-12 15:34:45 +00:00
9a6abab95e Check access to user memory using page fault method (via get_user and put_user). 2024-11-12 15:00:16 +00:00
44f6a85163 Add get_user and put_user provided by spec. 2024-11-12 14:50:53 +00:00
83e044cf68 Let kernel handle its own page faults 2024-11-12 14:50:53 +00:00
15 changed files with 1142 additions and 251 deletions

View File

@@ -38,4 +38,3 @@ test_vm:
extends: .pintos_tests extends: .pintos_tests
variables: variables:
DIR: vm DIR: vm
IGNORE: (tests/vm/pt-grow-stack|tests/vm/pt-grow-pusha|tests/vm/pt-big-stk-obj|tests/vm/pt-overflowstk|tests/vm/pt-write-code2|tests/vm/pt-grow-stk-sc|tests/vm/page-linear|tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-over-stk)

View File

@@ -66,6 +66,7 @@ vm_SRC += vm/frame.c # Frame table manager.
vm_SRC += vm/page.c # Page table manager. vm_SRC += vm/page.c # Page table manager.
vm_SRC += vm/mmap.c # Memory-mapped files. vm_SRC += vm/mmap.c # Memory-mapped files.
vm_SRC += devices/swap.c # Swap block manager. vm_SRC += devices/swap.c # Swap block manager.
#vm_SRC = vm/file.c # Some other file.
# Filesystem code. # Filesystem code.
filesys_SRC = filesys/filesys.c # Filesystem core. filesys_SRC = filesys/filesys.c # Filesystem core.

View File

@@ -33,6 +33,7 @@
#endif #endif
#ifdef VM #ifdef VM
#include "vm/frame.h" #include "vm/frame.h"
#include "vm/page.h"
#include "devices/swap.h" #include "devices/swap.h"
#endif #endif
#ifdef FILESYS #ifdef FILESYS
@@ -104,6 +105,7 @@ main (void)
paging_init (); paging_init ();
#ifdef VM #ifdef VM
frame_init (); frame_init ();
shared_file_pages_init ();
#endif #endif
/* Segmentation. */ /* Segmentation. */

View File

@@ -265,11 +265,24 @@ thread_create (const char *name, int priority,
#ifdef USERPROG #ifdef USERPROG
/* Initialize the thread's file descriptor table. */ /* Initialize the thread's file descriptor table. */
t->fd_counter = MINIMUM_USER_FD; t->fd_counter = MINIMUM_USER_FD;
bool success = hash_init (&t->open_files, fd_hash, fd_less, NULL);
if (success)
{
success = hash_init (&t->child_results, process_result_hash,
process_result_less, t);
if (!success)
hash_destroy (&t->open_files, NULL);
#ifdef VM
else
{
success = init_pages (&t->pages);
if (!success)
hash_destroy (&t->child_results, NULL);
}
#endif
}
if (!hash_init (&t->open_files, fd_hash, fd_less, NULL) if (!success)
|| !hash_init (&t->child_results, process_result_hash,
process_result_less, t)
|| !hash_init (&t->pages, page_hash, page_less, NULL))
{ {
palloc_free_page (t); palloc_free_page (t);
free (t->result); free (t->result);
@@ -723,6 +736,8 @@ init_thread (struct thread *t, const char *name, int nice, int priority,
t->recent_cpu = recent_cpu; t->recent_cpu = recent_cpu;
t->priority = t->base_priority; t->priority = t->base_priority;
lock_init (&t->pages_lock);
old_level = intr_disable (); old_level = intr_disable ();
list_push_back (&all_list, &t->allelem); list_push_back (&all_list, &t->allelem);
intr_set_level (old_level); intr_set_level (old_level);

View File

@@ -136,6 +136,7 @@ struct thread
struct list_elem elem; /* List element. */ struct list_elem elem; /* List element. */
struct hash pages; /* Table of open user pages. */ struct hash pages; /* Table of open user pages. */
struct lock pages_lock; /* Lock for the supplementary page table. */
/* Memory mapped files for user virtual memory. */ /* Memory mapped files for user virtual memory. */
struct hash mmap_files; /* List of memory mapped files. */ struct hash mmap_files; /* List of memory mapped files. */
@@ -149,6 +150,8 @@ struct thread
struct hash open_files; /* Hash Table of FD -> Struct File. */ struct hash open_files; /* Hash Table of FD -> Struct File. */
#endif #endif
void *curr_esp;
/* Owned by thread.c. */ /* Owned by thread.c. */
unsigned magic; /* Detects stack overflow. */ unsigned magic; /* Detects stack overflow. */
}; };

View File

@@ -1,19 +1,30 @@
#include "userprog/exception.h" #include "userprog/exception.h"
#include <inttypes.h> #include <inttypes.h>
#include <stdio.h> #include <stdio.h>
#include "stdbool.h"
#include "userprog/gdt.h" #include "userprog/gdt.h"
#include "userprog/pagedir.h"
#include "threads/interrupt.h" #include "threads/interrupt.h"
#include "threads/thread.h" #include "threads/thread.h"
#include "threads/vaddr.h" #ifdef VM
#include "vm/frame.h"
#include "vm/page.h" #include "vm/page.h"
#include "devices/swap.h"
#include "threads/vaddr.h"
#include "userprog/pagedir.h"
#endif
#define MAX_STACK_SIZE (8 * 1024 * 1024) // 8MB
#define MAX_STACK_OFFSET 32 // 32 bytes offset below stack pointer (ESP)
/* Number of page faults processed. */ /* Number of page faults processed. */
static long long page_fault_cnt; static long long page_fault_cnt;
static void kill (struct intr_frame *); static void kill (struct intr_frame *);
static void page_fault (struct intr_frame *); static void page_fault (struct intr_frame *);
bool try_fetch_page (void *upage, bool write);
static bool is_valid_stack_access (const void *fault_addr, const void *esp);
static bool grow_stack (void *upage);
bool fetch_page (void *upage, bool write);
/* Registers handlers for interrupts that can be caused by user /* Registers handlers for interrupts that can be caused by user
programs. programs.
@@ -149,24 +160,34 @@ page_fault (struct intr_frame *f)
write = (f->error_code & PF_W) != 0; write = (f->error_code & PF_W) != 0;
user = (f->error_code & PF_U) != 0; user = (f->error_code & PF_U) != 0;
if (!user) /* Select the appropriate stack pointer based on the context of the fault. */
{ void *esp = user ? f->esp : thread_current()->curr_esp;
f->eip = (void *)f->eax;
f->eax = 0xffffffff;
return;
}
/* If the fault address is in a user page that is not present, then it might /* If the fault address is in a user page that is not present, then it might
just need to be lazily loaded. So, we check our SPT to see if the page be just that the stack needs to grow or that it needs to be lazily loaded.
is expected to have data loaded in memory. */ So we attempt to grow the stack. If this does not work, we check our SPT to
see if the page is expected to have data loaded in memory. */
void *upage = pg_round_down (fault_addr); void *upage = pg_round_down (fault_addr);
if (not_present && is_user_vaddr (upage) && upage != NULL) if (not_present && is_user_vaddr (upage) && upage != NULL)
{ {
if (try_fetch_page (upage, write)) if (fetch_page (upage, write))
return; return;
if (is_valid_stack_access (fault_addr, esp))
if (grow_stack (upage))
return;
} }
/* If the page fault occurred in kernel mode, then we intentionally indicate
a fault (for get_user() etc). */
if (!user)
{
f->eip = (void *)f->eax;
f->eax = 0xffffffff;
return;
}
/* To implement virtual memory, delete the rest of the function /* To implement virtual memory, delete the rest of the function
body, and replace it with code that brings in the page to body, and replace it with code that brings in the page to
which fault_addr refers. */ which fault_addr refers. */
@@ -178,15 +199,88 @@ page_fault (struct intr_frame *f)
kill (f); kill (f);
} }
/* Validates whether the fault address is a valid stack access. Access is a
valid stack access under the following two conditions:
1. The fault address must be within MAX_STACK_OFFSET (32) bytes below
the current stack pointer. (Accounts for both PUSH and PUSHA instructions)
2. Growing this stack to this address does not cause it to exceed the
MAX_STACK_SIZE (8MB) limit.
Returns true if both conditions are met, false otherwise.
Pre: fault_addr is a valid user virtual address (so also not NULL). */
static bool
is_valid_stack_access (const void *fault_addr, const void *esp)
{
uint32_t new_stack_size = PHYS_BASE - pg_round_down (fault_addr);
uint32_t *lowest_valid_push_addr = (uint32_t *)esp - MAX_STACK_OFFSET;
bool is_within_push_range = (uint32_t *)fault_addr >= lowest_valid_push_addr;
return is_within_push_range && new_stack_size <= MAX_STACK_SIZE;
}
/* Attempts to grow the stack by allocating and mapping a new page.
This involves:
1. Allocating a zeroed page from the user pool
2. Installing it into the page table with write permissions
Returns true if the stack was successfully grown, false if either
allocation or installation fails.
Pre: upage is a valid page-aligned address (so also not NULL). */
static bool
grow_stack (void *upage)
{
/* Allocate new page for stack */
void *new_page = frame_alloc (PAL_ZERO, upage, thread_current ());
if (new_page == NULL)
return false;
/* Install the page into user page table */
if (!pagedir_set_page (thread_current ()->pagedir, upage, new_page, true))
{
frame_free (new_page);
return false;
}
return true;
}
bool bool
try_fetch_page (void *upage, bool write) fetch_page (void *upage, bool write)
{ {
/* Check if the page is in the supplemental page table. That is, it is a page /* Check if the page is in the supplemental page table. That is, it is a page
that is expected to be in memory. */ that is expected to be in memory. */
struct page_entry *page = page_get (upage); struct page_entry *page = page_get (thread_current (), upage);
if (page == NULL) if (page == NULL)
return false; return false;
/* Check if the non-present user page is in the swap partition.
If so, swap it back into main memory, updating the PTE for
the faulted virtual address to point to the newly allocated
frame. */
struct thread *t = thread_current ();
if (page_in_swap (t, upage))
{
/* NOTE: This code should be refactored and moved into helper functions
within 'page.c'.*/
void *kpage = frame_alloc (0, upage, t);
lock_acquire (&page->lock);
size_t swap_slot = page_get_swap (t, upage);
swap_in (kpage, swap_slot);
lock_release (&page->lock);
bool writeable = pagedir_is_writable (t->pagedir, upage);
/* TODO: When this returns false we should quit the page fault,
but currently we continue and check the stack conditions in the
page fault handler. */
return pagedir_set_page (t->pagedir, upage, kpage, writeable);
}
/* An attempt to write to a non-writeable should fail. */ /* An attempt to write to a non-writeable should fail. */
if (write && !page->writable) if (write && !page->writable)
return false; return false;
@@ -194,8 +288,10 @@ try_fetch_page (void *upage, bool write)
/* Load the page into memory based on the type of data it is expecting. */ /* Load the page into memory based on the type of data it is expecting. */
bool success = false; bool success = false;
switch (page->type) { switch (page->type) {
case PAGE_FILE: case PAGE_MMAP:
success = page_load (page, page->writable); case PAGE_EXECUTABLE:
case PAGE_SHARED:
success = page_load_file (page);
break; break;
default: default:
return false; return false;

View File

@@ -2,12 +2,14 @@
#include <stdbool.h> #include <stdbool.h>
#include <stddef.h> #include <stddef.h>
#include <string.h> #include <string.h>
#include "devices/swap.h"
#include "threads/init.h" #include "threads/init.h"
#include "threads/pte.h" #include "threads/pte.h"
#include "threads/palloc.h" #include "threads/palloc.h"
#include "vm/frame.h"
#include "vm/page.h"
static uint32_t *active_pd (void); static uint32_t *active_pd (void);
static void invalidate_pagedir (uint32_t *);
/* Creates a new page directory that has mappings for kernel /* Creates a new page directory that has mappings for kernel
virtual addresses, but none for user virtual addresses. virtual addresses, but none for user virtual addresses.
@@ -40,8 +42,14 @@ pagedir_destroy (uint32_t *pd)
uint32_t *pte; uint32_t *pte;
for (pte = pt; pte < pt + PGSIZE / sizeof *pte; pte++) for (pte = pt; pte < pt + PGSIZE / sizeof *pte; pte++)
if (*pte & PTE_P) {
palloc_free_page (pte_get_page (*pte)); if (page_is_shared_pte (pte))
continue;
else if (page_in_swap_pte (pte))
swap_drop (page_get_swap_pte (pte));
else if (*pte & PTE_P)
frame_free (pte_get_page (*pte));
}
palloc_free_page (pt); palloc_free_page (pt);
} }
palloc_free_page (pd); palloc_free_page (pd);
@@ -53,7 +61,7 @@ pagedir_destroy (uint32_t *pd)
on CREATE. If CREATE is true, then a new page table is on CREATE. If CREATE is true, then a new page table is
created and a pointer into it is returned. Otherwise, a null created and a pointer into it is returned. Otherwise, a null
pointer is returned. */ pointer is returned. */
static uint32_t * uint32_t *
lookup_page (uint32_t *pd, const void *vaddr, bool create) lookup_page (uint32_t *pd, const void *vaddr, bool create)
{ {
uint32_t *pt, *pde; uint32_t *pt, *pde;
@@ -278,7 +286,7 @@ active_pd (void)
This function invalidates the TLB if PD is the active page This function invalidates the TLB if PD is the active page
directory. (If PD is not active then its entries are not in directory. (If PD is not active then its entries are not in
the TLB, so there is no need to invalidate anything.) */ the TLB, so there is no need to invalidate anything.) */
static void void
invalidate_pagedir (uint32_t *pd) invalidate_pagedir (uint32_t *pd)
{ {
if (active_pd () == pd) if (active_pd () == pd)

View File

@@ -6,6 +6,7 @@
uint32_t *pagedir_create (void); uint32_t *pagedir_create (void);
void pagedir_destroy (uint32_t *pd); void pagedir_destroy (uint32_t *pd);
uint32_t *lookup_page (uint32_t *pd, const void *vaddr, bool create);
bool pagedir_set_page (uint32_t *pd, void *upage, void *kpage, bool rw); bool pagedir_set_page (uint32_t *pd, void *upage, void *kpage, bool rw);
void *pagedir_get_page (uint32_t *pd, const void *upage); void *pagedir_get_page (uint32_t *pd, const void *upage);
void pagedir_clear_page (uint32_t *pd, void *upage); void pagedir_clear_page (uint32_t *pd, void *upage);
@@ -16,5 +17,6 @@ void pagedir_set_accessed (uint32_t *pd, const void *upage, bool accessed);
bool pagedir_is_writable (uint32_t *pd, const void *upage); bool pagedir_is_writable (uint32_t *pd, const void *upage);
void pagedir_set_writable (uint32_t *pd, const void *upage, bool writable); void pagedir_set_writable (uint32_t *pd, const void *upage, bool writable);
void pagedir_activate (uint32_t *pd); void pagedir_activate (uint32_t *pd);
void invalidate_pagedir (uint32_t *pd);
#endif /* userprog/pagedir.h */ #endif /* userprog/pagedir.h */

View File

@@ -369,7 +369,11 @@ process_exit (void)
/* Clean up all open files */ /* Clean up all open files */
hash_destroy (&cur->open_files, fd_cleanup); hash_destroy (&cur->open_files, fd_cleanup);
/* Clean up the thread's supplemental page table. */
lock_acquire (&cur->pages_lock);
hash_destroy (&cur->pages, page_cleanup); hash_destroy (&cur->pages, page_cleanup);
lock_release (&cur->pages_lock);
/* Close the executable file, implicitly allowing it to be written to. */ /* Close the executable file, implicitly allowing it to be written to. */
if (cur->exec_file != NULL) if (cur->exec_file != NULL)
@@ -627,6 +631,9 @@ load (const char *file_name, void (**eip) (void), void **esp)
done: done:
/* We arrive here whether the load is successful or not. */ /* We arrive here whether the load is successful or not. */
#ifndef VM
file_close (file);
#endif
lock_release (&filesys_lock); lock_release (&filesys_lock);
return success; return success;
} }
@@ -709,8 +716,8 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
size_t page_zero_bytes = PGSIZE - page_read_bytes; size_t page_zero_bytes = PGSIZE - page_read_bytes;
/* Add the page metadata to the SPT to be lazy loaded later on */ /* Add the page metadata to the SPT to be lazy loaded later on */
if (page_insert (file, ofs, upage, page_read_bytes, page_zero_bytes, if (page_insert_file (file, ofs, upage, page_read_bytes, page_zero_bytes,
writable, PAGE_FILE) == NULL) writable, PAGE_EXECUTABLE) == NULL)
return false; return false;
/* Advance. */ /* Advance. */
@@ -758,6 +765,7 @@ get_usr_kpage (enum palloc_flags flags, void *upage)
return NULL; return NULL;
else else
page = frame_alloc (flags, upage, t); page = frame_alloc (flags, upage, t);
pagedir_set_accessed (t->pagedir, upage, true);
#else #else
page = palloc_get_page (flags | PAL_USER); page = palloc_get_page (flags | PAL_USER);
#endif #endif

View File

@@ -1,5 +1,4 @@
#include "userprog/syscall.h" #include "userprog/syscall.h"
#include "userprog/exception.h"
#include "devices/shutdown.h" #include "devices/shutdown.h"
#include "devices/input.h" #include "devices/input.h"
#include "filesys/file.h" #include "filesys/file.h"
@@ -11,9 +10,11 @@
#include "threads/synch.h" #include "threads/synch.h"
#include "userprog/process.h" #include "userprog/process.h"
#include "userprog/pagedir.h" #include "userprog/pagedir.h"
#include "vm/frame.h"
#include "vm/page.h" #include "vm/page.h"
#include "vm/mmap.h" #include "vm/mmap.h"
#include <stdio.h> #include <stdio.h>
#include <stdbool.h>
#include <syscall-nr.h> #include <syscall-nr.h>
#define MAX_SYSCALL_ARGS 3 #define MAX_SYSCALL_ARGS 3
@@ -52,8 +53,16 @@ static mapid_t syscall_mmap (int fd, void *addr);
static void syscall_munmap (mapid_t mapping); static void syscall_munmap (mapid_t mapping);
static struct open_file *fd_get_file (int fd); static struct open_file *fd_get_file (int fd);
static void validate_user_pointer (const void *start, size_t size, bool write); static void validate_user_ptr (const void *start, size_t size,
static void validate_user_string (const char *str); bool write);
static void validate_and_pin_user_ptr (const void *start, size_t size,
bool write);
static void validate_and_pin_user_str (const char *ptr);
static void unpin_user_ptr (const void *start, size_t size);
static void unpin_user_str (const char *ptr);
static int get_user (const uint8_t *);
static bool put_user (uint8_t *, uint8_t);
/* A struct defining a syscall_function pointer along with its arity. */ /* A struct defining a syscall_function pointer along with its arity. */
struct syscall_arguments struct syscall_arguments
@@ -104,8 +113,9 @@ static void
syscall_handler (struct intr_frame *f) syscall_handler (struct intr_frame *f)
{ {
/* First, read the system call number from the stack. */ /* First, read the system call number from the stack. */
validate_user_pointer (f->esp, sizeof (uintptr_t), false); validate_user_ptr (f->esp, sizeof (uintptr_t), false);
uintptr_t syscall_number = *(int *) f->esp; uintptr_t syscall_number = *(int *)f->esp;
thread_current ()->curr_esp = f->esp;
/* Ensures the number corresponds to a system call that can be handled. */ /* Ensures the number corresponds to a system call that can be handled. */
if (syscall_number >= LOOKUP_SIZE) if (syscall_number >= LOOKUP_SIZE)
@@ -114,12 +124,11 @@ syscall_handler (struct intr_frame *f)
struct syscall_arguments syscall = syscall_lookup[syscall_number]; struct syscall_arguments syscall = syscall_lookup[syscall_number];
/* Next, read and copy the arguments from the stack pointer. */ /* Next, read and copy the arguments from the stack pointer. */
validate_user_pointer (f->esp + sizeof (uintptr_t), validate_user_ptr (f->esp + sizeof (uintptr_t),
syscall.arity * sizeof (uintptr_t), false); syscall.arity * sizeof (uintptr_t), false);
uintptr_t args[MAX_SYSCALL_ARGS] = { 0 };
uintptr_t args[MAX_SYSCALL_ARGS] = {0};
for (int i = 0; i < syscall.arity && i < MAX_SYSCALL_ARGS; i++) for (int i = 0; i < syscall.arity && i < MAX_SYSCALL_ARGS; i++)
args[i] = *(uintptr_t *) (f->esp + sizeof (uintptr_t) * (i + 1)); args[i] = *(uintptr_t *)(f->esp + sizeof (uintptr_t) * (i + 1));
/* Call the function that handles this system call with the arguments. When /* Call the function that handles this system call with the arguments. When
there is a return value it is stored in f->eax. */ there is a return value it is stored in f->eax. */
@@ -148,10 +157,11 @@ syscall_exit (int status)
static pid_t static pid_t
syscall_exec (const char *cmd_line) syscall_exec (const char *cmd_line)
{ {
/* Validate the user string before executing the process. */ validate_and_pin_user_str (cmd_line);
validate_user_string (cmd_line); pid_t pid = process_execute (cmd_line);
unpin_user_str (cmd_line);
return process_execute (cmd_line); /* Returns the PID of the new process */ return pid;
} }
/* Handles the syscall of wait. Effectively a wrapper for process_wait as the /* Handles the syscall of wait. Effectively a wrapper for process_wait as the
@@ -168,14 +178,15 @@ syscall_wait (pid_t pid)
static bool static bool
syscall_create (const char *file, unsigned initial_size) syscall_create (const char *file, unsigned initial_size)
{ {
/* Validate the user string before creating the file. */ validate_and_pin_user_str (file);
validate_user_string (file);
/* Acquire the file system lock to prevent race conditions. */ /* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock); lock_acquire (&filesys_lock);
bool status = filesys_create (file, initial_size); bool status = filesys_create (file, initial_size);
lock_release (&filesys_lock); lock_release (&filesys_lock);
unpin_user_str (file);
/* Return the status of the file creation. */ /* Return the status of the file creation. */
return status; return status;
} }
@@ -186,14 +197,15 @@ syscall_create (const char *file, unsigned initial_size)
static bool static bool
syscall_remove (const char *file) syscall_remove (const char *file)
{ {
/* Validate the user string before removing the file. */ validate_and_pin_user_str (file);
validate_user_string (file);
/* Acquire the file system lock to prevent race conditions. */ /* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock); lock_acquire (&filesys_lock);
bool status = filesys_remove (file); bool status = filesys_remove (file);
lock_release (&filesys_lock); lock_release (&filesys_lock);
unpin_user_str (file);
/* Return the status of the file removal. */ /* Return the status of the file removal. */
return status; return status;
} }
@@ -205,14 +217,15 @@ syscall_remove (const char *file)
static int static int
syscall_open (const char *file) syscall_open (const char *file)
{ {
/* Validate the user string before opening the file. */ validate_and_pin_user_str (file);
validate_user_string (file);
/* Acquire the file system lock to prevent race conditions. */ /* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock); lock_acquire (&filesys_lock);
struct file *ptr = filesys_open (file); struct file *ptr = filesys_open (file);
lock_release (&filesys_lock); lock_release (&filesys_lock);
unpin_user_str (file);
/* If the file could not be opened, return failure. */ /* If the file could not be opened, return failure. */
if (ptr == NULL) if (ptr == NULL)
return EXIT_FAILURE; return EXIT_FAILURE;
@@ -272,11 +285,11 @@ syscall_read (int fd, void *buffer, unsigned size)
if (fd < STDIN_FILENO || fd == STDOUT_FILENO) if (fd < STDIN_FILENO || fd == STDOUT_FILENO)
return EXIT_FAILURE; return EXIT_FAILURE;
/* Validate the user buffer for the provided size before reading. */
validate_user_pointer (buffer, size, true);
if (fd == STDIN_FILENO) if (fd == STDIN_FILENO)
{ {
/* Validate the user buffer. */
validate_user_ptr (buffer, size, true);
/* Reading from the console. */ /* Reading from the console. */
char *write_buffer = buffer; char *write_buffer = buffer;
for (unsigned i = 0; i < size; i++) for (unsigned i = 0; i < size; i++)
@@ -294,13 +307,19 @@ syscall_read (int fd, void *buffer, unsigned size)
if (file_info == NULL) if (file_info == NULL)
return EXIT_FAILURE; return EXIT_FAILURE;
/* Validate the user buffer, and pin the pages to prevent eviction. */
validate_and_pin_user_ptr (buffer, size, true);
/* Acquire the file system lock to prevent race-conditions. */ /* Acquire the file system lock to prevent race-conditions. */
lock_acquire (&filesys_lock); lock_acquire (&filesys_lock);
int bytes_written = file_read (file_info->file, buffer, size); int bytes_read = file_read (file_info->file, buffer, size);
lock_release (&filesys_lock); lock_release (&filesys_lock);
/* Unpin the pages to allow eviction. */
unpin_user_ptr (buffer, size);
/* Return the number of bytes read. */ /* Return the number of bytes read. */
return bytes_written; return bytes_read;
} }
} }
@@ -316,11 +335,11 @@ syscall_write (int fd, const void *buffer, unsigned size)
if (fd <= 0) if (fd <= 0)
return 0; return 0;
/* Validate the user buffer for the provided size before writing. */
validate_user_pointer (buffer, size, false);
if (fd == STDOUT_FILENO) if (fd == STDOUT_FILENO)
{ {
/* Validate the user buffer. */
validate_user_ptr (buffer, size, false);
/* Writing to the console. */ /* Writing to the console. */
putbuf (buffer, size); putbuf (buffer, size);
@@ -336,13 +355,19 @@ syscall_write (int fd, const void *buffer, unsigned size)
if (file_info == NULL) if (file_info == NULL)
return 0; return 0;
/* Validate the user buffer, and pin the pages to prevent eviction. */
validate_and_pin_user_ptr (buffer, size, false);
/* Acquire the file system lock to prevent race conditions. */ /* Acquire the file system lock to prevent race conditions. */
lock_acquire (&filesys_lock); lock_acquire (&filesys_lock);
int bytes = file_write (file_info->file, buffer, size); int bytes_written = file_write (file_info->file, buffer, size);
lock_release (&filesys_lock); lock_release (&filesys_lock);
/* Unpin the pages to allow eviction. */
unpin_user_ptr (buffer, size);
/* Return the number of bytes written. */ /* Return the number of bytes written. */
return bytes; return bytes_written;
} }
} }
@@ -429,13 +454,16 @@ syscall_mmap (int fd, void *addr)
if (file_size == 0) if (file_size == 0)
return MMAP_FAILURE; return MMAP_FAILURE;
/* ensures the page for mmap does not overlap with the stack */
if (addr >= (thread_current ()->curr_esp - PGSIZE))
return MMAP_FAILURE;
/* Check and ensure that there is enough space in the user virtual memory to /* Check and ensure that there is enough space in the user virtual memory to
hold the entire file. */ hold the entire file. */
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE) for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
{ if (page_get (thread_current (), addr + ofs) != NULL)
if (page_get (addr + ofs) != NULL)
return MMAP_FAILURE; return MMAP_FAILURE;
}
/* Map the file data into the user virtual memory starting from addr. */ /* Map the file data into the user virtual memory starting from addr. */
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE) for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
@@ -443,8 +471,8 @@ syscall_mmap (int fd, void *addr)
off_t read_bytes = file_size - ofs < PGSIZE ? file_size - ofs : PGSIZE; off_t read_bytes = file_size - ofs < PGSIZE ? file_size - ofs : PGSIZE;
off_t zero_bytes = PGSIZE - read_bytes; off_t zero_bytes = PGSIZE - read_bytes;
if (page_insert (file, ofs, addr + ofs, read_bytes, zero_bytes, true, if (page_insert_file (file, ofs, addr + ofs, read_bytes, zero_bytes, true,
PAGE_FILE) == NULL) PAGE_MMAP) == NULL)
return MMAP_FAILURE; return MMAP_FAILURE;
} }
@@ -453,13 +481,12 @@ syscall_mmap (int fd, void *addr)
if (mmap == NULL) if (mmap == NULL)
return MMAP_FAILURE; return MMAP_FAILURE;
return mmap->mapping; return mmap->mapping;
} }
/* Handles the syscall for unmapping a memory mapped file. /* Handles the syscall for unmapping a memory mapped file.
Pre: mapping is a valid mapping identifier returned by mmap syscall. */ Pre: mapping is a valid mapping identifier returned by mmap syscall. */
static void static void
syscall_munmap (mapid_t mapping) syscall_munmap (mapid_t mapping)
{ {
@@ -532,67 +559,193 @@ fd_get_file (int fd)
return hash_entry (e, struct open_file, elem); return hash_entry (e, struct open_file, elem);
} }
/* Validates if a block of memory starting at START and of size SIZE bytes is /* Helper function that validates a block of memory and optionally pins frames.
fully contained within user virtual memory. Kills the thread (by exiting with thread_exit() if the memory is invalid. Used only by the two helper functions
failure) if the memory is invalid. Otherwise, returns (nothing) normally. validate_user_ptr and validate_and_pin_user_ptr. See the comments for those
If the size is 0, the function does no checks and returns the given ptr. */ functions for more details on each. */
static void static void
validate_user_pointer (const void *start, size_t size, bool write) validate_user_ptr_helper (const void *start, size_t size, bool write, bool pin)
{ {
/* If the size is 0, we do not need to check anything. */
if (size == 0) if (size == 0)
return; return;
const void *end = start + size - 1; /* ptr < ptr + size - 1, so sufficient to check that (ptr + size -1) is a
valid user virtual memory address. */
/* Check if the start and end pointers are valid user virtual addresses. */ void *end = start + size - 1;
if (start == NULL || !is_user_vaddr (start) || !is_user_vaddr (end)) if (!is_user_vaddr (end))
syscall_exit (EXIT_FAILURE); syscall_exit (EXIT_FAILURE);
/* We no longer check if the memory is mapped to physical memory. This is for (const void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
because the data may not necessarily be there at the time of the syscall,
but it may be lazily loaded later. In such case, we try to preload the
page. If that fails, we exit the thread. */
for (void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
if (pagedir_get_page (thread_current ()->pagedir, ptr) == NULL &&
!try_fetch_page (ptr, write))
syscall_exit (EXIT_FAILURE);
}
/* Validates if a string is fully contained within user virtual memory. Kills
the thread (by exiting with failure) if the memory is invalid. Otherwise,
returns (nothing) normally. */
static void
validate_user_string (const char *str)
{
/* Check if the string pointer is a valid user virtual address. */
if (str == NULL || !is_user_vaddr (str))
syscall_exit (EXIT_FAILURE);
/* Calculate the offset of the string within the (first) page. */
size_t offset = (uintptr_t) str % PGSIZE;
/* We move page by page, checking if the page is mapped to physical memory. */
for (;;)
{ {
void *page = pg_round_down (str); int result;
/* If we reach addresses that are not mapped to physical memory before the /* Check read access to pointer. */
end of the string, the thread is terminated. */ if ((result = get_user (ptr)) == -1)
if (!is_user_vaddr(page) ||
(pagedir_get_page (thread_current ()->pagedir, page) == NULL &&
!try_fetch_page (page, false)))
syscall_exit (EXIT_FAILURE); syscall_exit (EXIT_FAILURE);
while (offset < PGSIZE) /* Check write access to pointer (if required). */
{ if (write && !put_user ((uint8_t *)ptr, result))
if (*str == '\0') syscall_exit (EXIT_FAILURE);
return; /* We reached the end of the string without issues. */
str++; /* If pin is set, pin the frame to prevent eviction. */
offset++; if (pin)
{
void *kpage = pagedir_get_page(thread_current()->pagedir, ptr);
if (kpage == NULL)
{
// If it was evicted, try to load it back in.
ptr -= PGSIZE;
continue;
} }
offset = 0; /* Next page will start at the beginning. */ frame_pin(kpage);
}
} }
} }
/* Validates if a block of memory starting at PTR and of size SIZE bytes is
fully contained within valid user virtual memory. thread_exit () if the
memory is invalid.
If the size is 0, the function does no checks and returns PTR. */
static void
validate_user_ptr (const void *start, size_t size, bool write)
{
validate_user_ptr_helper (start, size, write, false);
}
/* Validates if a block of memory starting at PTR and of size SIZE bytes is
fully contained within valid user virtual memory. thread_exit () if the
memory is invalid. The function also checks if the memory is writable if
WRITE flag is set.
The function attempts to preload the pages in case they are not in memory
yet (e.g., in a swap, lazy loading). If this is successful, the frame pages
are pinned to prevent eviction prior to access.
As such, a call to this function MUST be followed by a call to
unpin_user_ptr (START, SIZE) to unpin the pages and allow eviction.
If the size is 0, the function does no checks and returns PTR. */
static void
validate_and_pin_user_ptr (const void *start, size_t size, bool write)
{
validate_user_ptr_helper (start, size, write, true);
}
/* Unpins all the pages containing a block of memory starting at START and of
size SIZE bytes.
Pre: The pages were previously pinned by validate_and_pin_user_ptr (START,
SIZE). */
static void
unpin_user_ptr (const void *start, size_t size)
{
void *end = start + size - 1;
/* We don't need to do any checks as this function is always called after
validate_and_pin_user_ptr. */
/* Go through all pages in the block range, unpinning the frames. */
for (void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
{
void *kpage = pagedir_get_page (thread_current ()->pagedir, ptr);
ASSERT (kpage != NULL);
frame_unpin (kpage);
}
}
/* Validates of a C-string starting at ptr is fully contained within valid
user virtual memory. thread_exit () if the memory is invalid. */
static void
validate_and_pin_user_str (const char *ptr)
{
size_t offset = (uintptr_t) ptr % PGSIZE;
for (;;)
{
if (!is_user_vaddr (ptr))
syscall_exit (EXIT_FAILURE);
if (get_user ((const uint8_t *)ptr) == -1)
syscall_exit (EXIT_FAILURE);
/* Pin the frame to prevent eviction. */
void *page = pg_round_down (ptr);
void *kpage = pagedir_get_page (thread_current ()->pagedir, page);
if (kpage == NULL)
{
// If it was evicted, attempt to reload.
ptr -= PGSIZE;
continue;
}
frame_pin (kpage);
while (offset < PGSIZE)
{
if (*ptr == '\0')
return; /* We reached the end of the string without issues. */
ptr++;
offset++;
}
offset = 0;
}
}
/* Unpins all the pages containing a C-string starting at PTR.
Pre: The pages were previously pinned by validate_and_pin_user_str (PTR).
PTR points to a valid C string that ends with '\0'. */
static void
unpin_user_str (const char *ptr)
{
size_t offset = (uintptr_t)ptr % PGSIZE;
const char *str_ptr = ptr;
for (;;)
{
void *page = pg_round_down(str_ptr);
void *kpage = pagedir_get_page(thread_current()->pagedir, page);
ASSERT(kpage != NULL);
frame_unpin (kpage);
/* Scan until end of string or page */
while (offset < PGSIZE)
{
if (*str_ptr == '\0')
return; /* Found end of string */
str_ptr++;
offset++;
}
offset = 0;
}
}
/* PROVIDED BY SPEC.
Reads a byte at user virtual address UADDR.
UADDR must be below PHYS_BASE.
Returns the byte value if successful, -1 if a segfault occurred. */
static int
get_user (const uint8_t *uaddr)
{
int result;
asm ("movl $1f, %0; movzbl %1, %0; 1:" : "=&a"(result) : "m"(*uaddr));
return result;
}
/* PROVIDED BY SPEC.
Writes BYTE to user address UDST.
UDST must be below PHYS_BASE.
Returns true if successful, false if a segfault occurred. */
static bool
put_user (uint8_t *udst, uint8_t byte)
{
int error_code;
asm ("movl $1f, %0; movb %b2, %1; 1:"
: "=&a"(error_code), "=m"(*udst)
: "q"(byte));
return error_code != -1;
}

View File

@@ -2,65 +2,72 @@
#include <hash.h> #include <hash.h>
#include <list.h> #include <list.h>
#include <string.h> #include <string.h>
#include "frame.h" #include "frame.h"
#include "page.h" #include "page.h"
#include "filesys/file.h"
#include "threads/malloc.h" #include "threads/malloc.h"
#include "threads/vaddr.h" #include "threads/vaddr.h"
#include "userprog/pagedir.h"
#include "userprog/syscall.h"
#include "threads/synch.h" #include "threads/synch.h"
#include "devices/swap.h"
/* Hash table that maps every active frame's kernel virtual address /* Hash table that maps every active frame's kernel virtual address
to its corresponding 'frame_metadata'.*/ to its corresponding 'frame_metadata'.*/
struct hash frame_table; struct hash frame_table;
/* Linked list of frame_metadata whose pages are predicted to currently /* Linked list used to represent the circular queue in the 'clock'
be in the working set of a process. They are not considered for algorithm for page eviction. Iterating from the element that is
eviction, but are considered for demotion to the 'inactive' list. */ currently pointed at by 'next_victim' yields an ordering of the entries
struct list active_list; from oldest to newest (in terms of when they were added or checked
for having been referenced by a process). */
struct list lru_list;
/* Linked list of frame_metadata whose pages are predicted to leave the /* The next element in lru_list to be considered for eviction (oldest added
working set of their processes soon, so are considered for eviction. or referenced page in the circular queue). If this page has has an
Pages are considered for eviction from the tail end, and are initially 'accessed' bit of 0 when considering eviction, then it will be the next
demoted to 'inactive' at the head. */ victim. Otherwise, the next element in the queue is similarly considered. */
struct list inactive_list; struct list_elem *next_victim = NULL;
/* Synchronisation variables. */
/* Protects access to the 'inactive' list. */
struct lock inactive_lock;
struct frame_metadata struct frame_metadata
{ {
void *frame; /* The kernel virtual address holding the frame. */ void *frame; /* The kernel virtual address holding the frame. */
void *upage; /* The user virtual address pointing to the frame. */ void *upage; /* The user virtual address pointing to the frame. */
struct thread *owner; /* Pointer to the thread that owns the frame. */ struct list owners; /* List of threads that own the frame. */
bool pinned; /* Indicates wheter the frame should be
considered as an eviction candidate.*/
struct hash_elem hash_elem; /* Tracks the position of the frame metadata struct hash_elem hash_elem; /* Tracks the position of the frame metadata
within 'frame_table', whose key is the within 'frame_table', whose key is the
kernel virtual address of the frame. */ kernel virtual address of the frame. */
struct list_elem list_elem; /* Tracks the position of the frame metadata struct list_elem list_elem; /* Tracks the position of the frame metadata
in either the 'active' or 'inactive' list, within 'lru_list', so a victim can be
so a victim can be chosen for eviction. */ chosen for eviction. */
}; };
hash_hash_func frame_metadata_hash; hash_hash_func frame_metadata_hash;
hash_less_func frame_metadata_less; hash_less_func frame_metadata_less;
static struct list_elem *lru_next (struct list_elem *e);
static struct list_elem *lru_prev (struct list_elem *e);
static struct frame_metadata *frame_metadata_get (void *frame);
static struct frame_metadata *get_victim (void); static struct frame_metadata *get_victim (void);
static void free_owners (struct list *owners);
static struct frame_metadata *frame_metadata_find (void *frame);
/* Initialize the frame system by initializing the frame (hash) table with /* Initialize the frame system by initializing the frame (hash) table with
the frame_metadata hashing and comparison functions, as well as initializing the frame_metadata hashing and comparison functions, as well as initializing
the active & inactive lists. Also initializes the system's synchronisation 'lru_list' and its associated synchronisation primitives. */
primitives. */
void void
frame_init (void) frame_init (void)
{ {
hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL); hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
list_init (&active_list);
list_init (&inactive_list);
lock_init (&inactive_lock); list_init (&lru_list);
lock_init (&ftable_lock);
} }
/* TODO: Consider synchronisation more closely (i.e. just for hash
table). */
/* Attempt to allocate a frame for a user process, either by direct /* Attempt to allocate a frame for a user process, either by direct
allocation of a user page if there is sufficient RAM, or by allocation of a user page if there is sufficient RAM, or by
evicting a currently active page if memory allocated for user evicting a currently active page if memory allocated for user
@@ -69,7 +76,10 @@ frame_init (void)
void * void *
frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
{ {
struct frame_metadata *frame_metadata;
flags |= PAL_USER; flags |= PAL_USER;
lock_acquire (&ftable_lock);
void *frame = palloc_get_page (flags); void *frame = palloc_get_page (flags);
/* If a frame couldn't be allocated we must be out of main memory. Thus, /* If a frame couldn't be allocated we must be out of main memory. Thus,
@@ -77,80 +87,262 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
into disk. */ into disk. */
if (frame == NULL) if (frame == NULL)
{ {
/* TODO: Deal with race condition wherein a page may be evicted in one /* 1. Obtain victim. */
thread while it's in the middle of being evicted in another. */ if (next_victim == NULL)
struct frame_metadata *victim = get_victim (); PANIC ("Couldn't allocate a single page to main memory!\n");
if (victim == NULL)
return NULL;
size_t swap_slot = swap_out (victim->frame); struct frame_metadata *victim = get_victim ();
page_set_swap (victim->owner, victim->upage, swap_slot); ASSERT (victim != NULL); /* get_victim () should never return null. */
/* 2. Handle victim page writing based on its type. */
struct page_entry *victim_page = page_get (thread_current (), victim->upage);
if (victim_page != NULL && victim_page->type == PAGE_MMAP)
{
/* If it was a memory-mapped file page, we just write it back
to the file if it was dirty. */
if (pagedir_is_dirty(owner->pagedir, victim->upage))
{
lock_acquire (&filesys_lock);
file_write_at (victim_page->file, victim->upage,
victim_page->read_bytes, victim_page->offset);
lock_release (&filesys_lock);
}
}
else
{
/* Otherwise, insert the page into swap. */
page_insert_swapped (victim->upage, victim->frame, &victim->owners);
}
/* Free victim's owners. */
free_owners (&victim->owners);
/* If zero flag is set, zero out the victim page. */ /* If zero flag is set, zero out the victim page. */
if (flags & PAL_ZERO) if (flags & PAL_ZERO)
memset (victim->frame, 0, PGSIZE); memset (victim->frame, 0, PGSIZE);
frame = victim->frame; /* 3. Indicate that the new frame's metadata will be stored
inside the same structure that stored the victim's metadata.frame.c
As both the new frame and the victim frame share the same kernel
virtual address, the hash map need not be updated, and neither
the list_elem value as both share the same lru_list position. */
frame_metadata = victim;
} }
struct frame_metadata *frame_metadata = /* If sufficient main memory allows the frame to be directly allocated,
malloc (sizeof (struct frame_metadata)); we must update the frame table with a new entry, and grow lru_list. */
frame_metadata->frame = frame; else
{
/* Must own ftable_lock here, as otherwise there is a race condition
with next_victim either being NULL or uninitialized. */
frame_metadata = malloc (sizeof (struct frame_metadata));
if (frame_metadata == NULL)
PANIC ("Couldn't allocate memory for frame metadata!\n");
frame_metadata->frame = frame;
/* Newly allocated frames are pushed to the back of the circular queue
represented by lru_list. Must explicitly handle the case where the
circular queue is empty (when next_victim == NULL). */
if (next_victim == NULL)
{
list_push_back (&lru_list, &frame_metadata->list_elem);
next_victim = &frame_metadata->list_elem;
}
else
{
struct list_elem *lru_tail = lru_prev (next_victim);
list_insert (lru_tail, &frame_metadata->list_elem);
}
hash_insert (&frame_table, &frame_metadata->hash_elem);
}
struct frame_owner *frame_owner = malloc (sizeof (struct frame_owner));
if (frame_owner == NULL)
PANIC ("Couldn't allocate memory for frame owner!\n");
frame_owner->owner = owner;
list_init (&frame_metadata->owners);
list_push_back (&frame_metadata->owners, &frame_owner->elem);
frame_metadata->upage = upage; frame_metadata->upage = upage;
frame_metadata->owner = owner; frame_metadata->pinned = false;
lock_release (&ftable_lock);
return frame_metadata->frame;
}
/* Newly faulted pages begin at the head of the inactive list. */ void
lock_acquire (&inactive_lock); frame_pin (void *frame)
list_push_front (&inactive_list, &frame_metadata->list_elem); {
lock_release (&inactive_lock); ASSERT (frame != NULL);
lock_acquire (&ftable_lock);
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to pin a frame at an unallocated kernel address '%p'\n",
frame);
/* Finally, insert frame metadata within the frame table, with the key as its frame_metadata->pinned = true;
allocated kernel address. */ lock_release (&ftable_lock);
hash_replace (&frame_table, &frame_metadata->hash_elem); }
void
frame_unpin (void *frame)
{
ASSERT (frame != NULL);
lock_acquire (&ftable_lock);
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to unpin a frame at an unallocated kernel address '%p'\n",
frame);
return frame; frame_metadata->pinned = false;
lock_release (&ftable_lock);
} }
/* Attempt to deallocate a frame for a user process by removing it from the /* Attempt to deallocate a frame for a user process by removing it from the
frame table as well as active/inactive list, and freeing the underlying frame table as well as lru_list, and freeing the underlying page
page memory. Panics if the frame isn't active in memory. */ memory & metadata struct. Panics if the frame isn't active in memory. */
void void
frame_free (void *frame) frame_free (void *frame)
{ {
struct frame_metadata key_metadata; struct frame_metadata *frame_metadata = frame_metadata_find (frame);
key_metadata.frame = frame; if (frame_metadata == NULL)
PANIC ("Attempted to free a frame at kernel address %p, "
"but this address is not allocated!\n",
frame);
struct hash_elem *e = free_owners (&frame_metadata->owners);
hash_delete (&frame_table, &key_metadata.hash_elem); lock_acquire (&ftable_lock);
if (e == NULL) PANIC ("Attempted to free a frame without a corresponding " hash_delete (&frame_table, &frame_metadata->hash_elem);
"kernel address!\n");
struct frame_metadata *frame_metadata =
hash_entry (e, struct frame_metadata, hash_elem);
list_remove (&frame_metadata->list_elem); list_remove (&frame_metadata->list_elem);
/* If we're freeing the frame marked as the next victim, update
next_victim to either be the next least recently used page, or NULL
if no pages are loaded in main memory. */
if (&frame_metadata->list_elem == next_victim)
{
if (list_empty (&lru_list))
next_victim = NULL;
else
next_victim = lru_next (next_victim);
}
lock_release (&ftable_lock);
free (frame_metadata); free (frame_metadata);
palloc_free_page (frame); palloc_free_page (frame);
} }
/* Obtain a pointer to the metadata of the frame we should evict next. */ /* Add a thread to a frame's frame_metadata owners list. */
bool
frame_owner_insert (void *frame, struct thread *owner)
{
struct frame_metadata *frame_metadata = frame_metadata_find (frame);
if (frame_metadata == NULL)
return false;
struct frame_owner *frame_owner = malloc (sizeof (struct frame_owner));
if (frame_owner == NULL)
return false;
frame_owner->owner = owner;
list_push_back (&frame_metadata->owners, &frame_owner->elem);
return true;
}
/* Remove and deallocate a frame owner from the frame_metadata owners list.
*/
void
frame_owner_remove (void *frame, struct thread *owner)
{
struct frame_metadata *frame_metadata = frame_metadata_find (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to remove an owner from a frame at kernel "
"address %p, but this address is not allocated!\n",
frame);
struct list_elem *oe;
for (oe = list_begin (&frame_metadata->owners);
oe != list_end (&frame_metadata->owners);)
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
oe = list_next (oe);
if (frame_owner->owner == owner)
{
list_remove (&frame_owner->elem);
free (frame_owner);
return;
}
}
NOT_REACHED ();
}
/* Find a frame_metadata entry in the frame table. */
static struct frame_metadata *
frame_metadata_find (void *frame)
{
struct frame_metadata key_metadata;
key_metadata.frame = frame;
struct hash_elem *e = hash_find (&frame_table, &key_metadata.hash_elem);
if (e == NULL)
return NULL;
return hash_entry (e, struct frame_metadata, hash_elem);
}
/* A pre-condition for calling this function is that the calling thread
owns ftable_lock and that lru_list is non-empty. */
static struct frame_metadata * static struct frame_metadata *
get_victim (void) get_victim (void)
{ {
lock_acquire (&inactive_lock); struct list_elem *ve = next_victim;
if (list_empty (&inactive_list)) struct frame_metadata *frame_metadata;
bool found = false;
while (!found)
{ {
return NULL; frame_metadata = list_entry (ve, struct frame_metadata, list_elem);
} ve = lru_next (ve);
else struct list_elem *oe;
{
struct list_elem *victim_elem = list_pop_back (&inactive_list); /* Skip pinned frames */
lock_release (&inactive_lock); if (frame_metadata->pinned)
continue;
return list_entry (victim_elem, struct frame_metadata, list_elem);
/* Returns once a frame that was not accessed by any owner is found. */
found = true;
for (oe = list_begin (&frame_metadata->owners);
oe != list_end (&frame_metadata->owners); oe = list_next (oe))
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
uint32_t *pd = frame_owner->owner->pagedir;
void *upage = frame_metadata->upage;
if (pagedir_is_accessed (pd, upage))
{
found = false;
pagedir_set_accessed (pd, upage, false);
}
}
} }
next_victim = ve;
return frame_metadata;
} }
static void
free_owners (struct list *owners)
{
struct list_elem *oe;
for (oe = list_begin (owners); oe != list_end (owners);)
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
oe = list_remove (oe);
free (frame_owner);
}
}
/* Hash function for frame metadata, used for storing entries in the /* Hash function for frame metadata, used for storing entries in the
frame table. */ frame table. */
unsigned unsigned
@@ -168,12 +360,46 @@ frame_metadata_hash (const struct hash_elem *e, void *aux UNUSED)
bool bool
frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_, frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED) void *aux UNUSED)
{ {
struct frame_metadata *a = struct frame_metadata *a =
hash_entry (a_, struct frame_metadata, hash_elem); hash_entry (a_, struct frame_metadata, hash_elem);
struct frame_metadata *b = struct frame_metadata *b =
hash_entry (b_, struct frame_metadata, hash_elem); hash_entry (b_, struct frame_metadata, hash_elem);
return a->frame < b->frame; return a->frame < b->frame;
} }
static struct frame_metadata *
frame_metadata_get (void *frame)
{
struct frame_metadata key_metadata;
key_metadata.frame = frame;
struct hash_elem *e = hash_find (&frame_table, &key_metadata.hash_elem);
if (e == NULL) return NULL;
return hash_entry (e, struct frame_metadata, hash_elem);
}
/* Returns the next recently used element after the one provided, which
is achieved by iterating through lru_list like a circular queue
(wrapping around the list at the tail). */
static struct list_elem *
lru_next (struct list_elem *e)
{
if (!list_empty (&lru_list) && e == list_back (&lru_list))
return list_front (&lru_list);
return list_next (e);
}
/* Returns the previous recently used element after the one provided, which
is achieved by iterating through lru_list like a circular queue
(wrapping around the list at the head). */
static struct list_elem *
lru_prev (struct list_elem *e)
{
if (!list_empty (&lru_list) && e == list_front (&lru_list))
return list_back (&lru_list);
return list_prev (e);
}

View File

@@ -4,8 +4,24 @@
#include "threads/thread.h" #include "threads/thread.h"
#include "threads/palloc.h" #include "threads/palloc.h"
struct frame_owner
{
struct thread *owner; /* The thread that owns the frame. */
struct list_elem elem; /* List element for the list of owners. */
};
/* Synchronisation variables. */
/* Protects access to the frame table and its related components. */
struct lock ftable_lock;
void frame_init (void); void frame_init (void);
void *frame_alloc (enum palloc_flags, void *, struct thread *); void *frame_alloc (enum palloc_flags, void *, struct thread *);
void frame_pin (void *frame);
void frame_unpin (void *frame);
void frame_free (void *frame); void frame_free (void *frame);
bool frame_owner_insert (void *frame, struct thread *owner);
void frame_owner_remove (void *frame, struct thread *owner);
#endif /* vm/frame.h */ #endif /* vm/frame.h */

View File

@@ -1,5 +1,6 @@
#include "mmap.h" #include "mmap.h"
#include "page.h" #include "page.h"
#include "threads/thread.h"
#include "threads/vaddr.h" #include "threads/vaddr.h"
#include "threads/malloc.h" #include "threads/malloc.h"
#include "userprog/syscall.h" #include "userprog/syscall.h"
@@ -66,26 +67,27 @@ mmap_unmap (struct mmap_entry *mmap)
if necessary. */ if necessary. */
off_t length = file_length (mmap->file); off_t length = file_length (mmap->file);
for (off_t ofs = 0; ofs < length; ofs += PGSIZE) for (off_t ofs = 0; ofs < length; ofs += PGSIZE)
{
void *upage = mmap->upage + ofs;
/* Get the SPT page entry for this page. */
struct page_entry *page = page_get(upage);
if (page == NULL)
continue;
/* Write the page back to the file if it is dirty. */
if (pagedir_is_dirty (thread_current ()->pagedir, upage))
{ {
lock_acquire (&filesys_lock); void *upage = mmap->upage + ofs;
file_write_at (mmap->file, upage, page->read_bytes, ofs);
lock_release (&filesys_lock); /* Get the SPT page entry for this page. */
struct page_entry *page = page_get(thread_current (), upage);
if (page == NULL)
continue;
/* Write the page back to the file if it is dirty. */
if (pagedir_is_dirty (thread_current ()->pagedir, upage))
{
lock_acquire (&filesys_lock);
file_write_at (mmap->file, upage, page->read_bytes, ofs);
lock_release (&filesys_lock);
}
/* Remove the page from the supplemental page table. */
hash_delete (&thread_current ()->pages, &page->elem);
} }
/* Remove the page from the supplemental page table. */ /* Close the file and free the mmap entry. */
hash_delete (&thread_current ()->pages, &page->elem);
}
file_close (mmap->file); file_close (mmap->file);
free (mmap); free (mmap);
} }
@@ -126,21 +128,3 @@ mmap_cleanup (struct hash_elem *e, void *aux UNUSED)
struct mmap_entry *mmap = hash_entry (e, struct mmap_entry, elem); struct mmap_entry *mmap = hash_entry (e, struct mmap_entry, elem);
mmap_unmap (mmap); mmap_unmap (mmap);
} }
/* Updates the 'owner' thread's page table entry for virtual address 'upage'
to have a present bit of 0 and stores the specified swap slot value in the
entry for later retrieval from disk. */
void
page_set_swap (struct thread *owner, void *upage, size_t swap_slot)
{
}
/* Given that the page with user address 'upage' owned by 'owner' is flagged
to be in the swap disk via the owner's page table, returns its stored
swap slot. Otherwise panics the kernel. */
size_t
page_get_swap (struct thread *owner, void *upage)
{
return 0;
}

View File

@@ -1,24 +1,57 @@
#include "page.h" #include "page.h"
#include <stdint.h>
#include <string.h> #include <string.h>
#include <stdio.h> #include <stdio.h>
#include "filesys/file.h" #include "filesys/file.h"
#include "threads/pte.h"
#include "threads/malloc.h" #include "threads/malloc.h"
#include "threads/palloc.h" #include "threads/palloc.h"
#include "threads/synch.h"
#include "devices/swap.h"
#include "userprog/process.h" #include "userprog/process.h"
#include "userprog/pagedir.h"
#include "vm/frame.h" #include "vm/frame.h"
#define SWAP_FLAG_BIT 9
#define SHARED_FLAG_BIT 10
#define ADDR_START_BIT 12
struct hash shared_file_pages;
struct lock shared_file_pages_lock;
static unsigned page_hash (const struct hash_elem *e, void *aux UNUSED);
static bool page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED);
static void page_flag_shared (struct thread *owner, void *upage, bool shared);
static unsigned shared_file_page_hash (const struct hash_elem *e,
void *aux UNUSED);
static bool shared_file_page_less (const struct hash_elem *a_,
const struct hash_elem *b_,
void *aux UNUSED);
static struct shared_file_page *shared_file_page_get (struct file *file,
void *upage);
/* Initialise a supplementary page table. */
bool
init_pages (struct hash *pages)
{
ASSERT (pages != NULL);
return hash_init (pages, page_hash, page_less, NULL);
}
/* Hashing function needed for the SPT table. Returns a hash for an entry, /* Hashing function needed for the SPT table. Returns a hash for an entry,
based on its upage. */ based on its upage. */
unsigned static unsigned
page_hash (const struct hash_elem *e, UNUSED void *aux) page_hash (const struct hash_elem *e, void *aux UNUSED)
{ {
struct page_entry *page = hash_entry (e, struct page_entry, elem); struct page_entry *page = hash_entry (e, struct page_entry, elem);
return hash_ptr(page->upage); return hash_ptr (page->upage);
} }
/* Comparator function for the SPT table. Compares two entries based on their /* Comparator function for the SPT table. Compares two entries based on their
upages. */ upages. */
bool static bool
page_less (const struct hash_elem *a_, const struct hash_elem *b_, page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED) void *aux UNUSED)
{ {
@@ -28,37 +61,121 @@ page_less (const struct hash_elem *a_, const struct hash_elem *b_,
return a->upage < b->upage; return a->upage < b->upage;
} }
/* Allocate and insert a new page entry into the thread's page table. */ static void page_flag_swap (uint32_t *pte, bool set);
struct page_entry * static void page_set_swap (struct thread *owner, uint32_t *pte,
page_insert (struct file *file, off_t ofs, void *upage, uint32_t read_bytes, size_t swap_slot);
uint32_t zero_bytes, bool writable, enum page_type type)
// TODO: Deal with NULL malloc returns
/* Swap out 'owner' process's 'upage' stored at 'kpage'. Then, allocate and
insert a new page entry into the user process thread's SPT representing
this swapped out page. */
bool
page_insert_swapped (void *upage, void *kpage, struct list *owners)
{ {
struct file *exec_file = NULL;
struct list_elem *e;
for (e = list_begin (owners); e != list_end (owners); e = list_next (e))
{
struct thread *owner = list_entry (e, struct frame_owner, elem)->owner;
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
if (exec_file != NULL || page_is_shared_pte (pte))
{
ASSERT (page_is_shared_pte (pte));
pagedir_clear_page (owner->pagedir, upage);
exec_file = owner->exec_file;
ASSERT (exec_file != NULL);
continue;
}
ASSERT (list_size (owners) == 1);
/* 1. Initialize swapped page entry. */
struct page_entry *page = page_get (owner, upage);
lock_acquire (&owner->pages_lock);
if (page == NULL)
{
page = malloc (sizeof (struct page_entry));
if (page == NULL)
return NULL;
page->upage = upage;
lock_init (&page->lock);
hash_insert (&owner->pages, &page->elem);
}
/* Mark page as 'swapped' and flag the page directory as having
been modified *before* eviction begins to prevent the owner of the
victim page from accessing/modifying it mid-eviction. */
/* TODO: We need to stop the process from destroying pagedir mid-eviction,
as this could render the page table entry invalid. */
page_flag_swap (pte, true);
lock_acquire (&page->lock);
pagedir_clear_page (owner->pagedir, upage);
size_t swap_slot = swap_out (kpage);
page_set_swap (owner, pte, swap_slot);
lock_release (&page->lock);
lock_release (&owner->pages_lock);
}
if (exec_file != NULL)
{
lock_acquire (&shared_file_pages_lock);
struct shared_file_page *sfp = shared_file_page_get (exec_file, upage);
sfp->frame = NULL;
sfp->swap_slot = swap_out (kpage);
lock_release (&shared_file_pages_lock);
}
return true;
}
/* Allocate and insert a new page entry into the user process thread's
SPT representing a file page. */
struct page_entry *
page_insert_file (struct file *file, off_t ofs, void *upage,
uint32_t read_bytes, uint32_t zero_bytes, bool writable,
enum page_type type)
{
/* If page exists, just update it. */
struct page_entry *existing = page_get (thread_current (), upage);
if (existing != NULL)
{
ASSERT (existing->read_bytes == read_bytes);
ASSERT (existing->zero_bytes == zero_bytes);
existing->writable = existing->writable || writable;
return existing;
}
struct page_entry *page = malloc(sizeof (struct page_entry)); struct page_entry *page = malloc(sizeof (struct page_entry));
if (page == NULL) if (page == NULL)
return NULL; return NULL;
page->type = type;
page->file = file; page->file = file;
page->offset = ofs; page->offset = ofs;
page->upage = upage; page->upage = upage;
page->read_bytes = read_bytes; page->read_bytes = read_bytes;
page->zero_bytes = zero_bytes; page->zero_bytes = zero_bytes;
page->writable = writable; page->writable = writable;
page->type = type; lock_init (&page->lock);
hash_insert (&thread_current ()->pages, &page->elem); struct thread *t = thread_current ();
lock_acquire (&t->pages_lock);
hash_insert (&t->pages, &page->elem);
lock_release (&t->pages_lock);
return page; return page;
} }
/* Gets a page_entry from the starting address of the page. Returns NULL if no /* Gets a page_entry from the starting address of the page. Returns NULL if no
such page_entry exists in the hash map.*/ such page_entry exists in the hash map.*/
struct page_entry * struct page_entry *
page_get (void *upage) page_get (struct thread *thread, void *upage)
{ {
struct page_entry fake_page_entry; struct page_entry fake_page_entry;
fake_page_entry.upage = upage; fake_page_entry.upage = upage;
lock_acquire (&thread->pages_lock);
struct hash_elem *e struct hash_elem *e
= hash_find (&thread_current ()->pages, &fake_page_entry.elem); = hash_find (&thread->pages, &fake_page_entry.elem);
lock_release (&thread->pages_lock);
if (e == NULL) if (e == NULL)
return NULL; return NULL;
@@ -66,18 +183,70 @@ page_get (void *upage)
} }
bool bool
page_load (struct page_entry *page, bool writable) page_load_file (struct page_entry *page)
{ {
/* Allocate a frame for the page. If a frame allocation fails, then /* Allocate a frame for the page. If a frame allocation fails, then
frame_alloc should try to evict a page. If it is still NULL, the OS frame_alloc should try to evict a page. If it is still NULL, the OS
panics as this should not happen if eviction is working correctly. */ panics as this should not happen if eviction is working correctly. */
void *frame = frame_alloc (PAL_USER, page->upage, thread_current ()); struct thread *t = thread_current ();
bool shareable = !page->writable && file_compare (page->file, t->exec_file);
if (shareable)
{
lock_acquire (&shared_file_pages_lock);
struct shared_file_page *sfp
= shared_file_page_get (page->file, page->upage);
if (sfp != NULL)
{
/* Frame exists, just install it. */
if (sfp->frame != NULL)
{
if (!install_page (page->upage, sfp->frame, page->writable))
{
lock_release (&shared_file_pages_lock);
return false;
}
frame_owner_insert (sfp->frame, t);
}
/* Otherwise, shared page is in swap. Load it. */
else
{
void *frame = frame_alloc (PAL_USER, page->upage, t);
if (frame == NULL)
PANIC (
"Could not allocate a frame to load page into memory.");
swap_in (frame, sfp->swap_slot);
if (!install_page (page->upage, frame, false))
{
frame_free (frame);
lock_release (&shared_file_pages_lock);
return false;
}
}
page_flag_shared (t, page->upage, true);
if (page->type != PAGE_SHARED)
{
sfp->ref_count++;
page->type = PAGE_SHARED;
}
lock_release (&shared_file_pages_lock);
return true;
}
}
void *frame = frame_alloc (PAL_USER, page->upage, t);
pagedir_set_accessed (t->pagedir, page->upage, true);
if (frame == NULL) if (frame == NULL)
PANIC ("Could not allocate a frame to load page into memory."); PANIC ("Could not allocate a frame to load page into memory.");
/* Map the page to the frame. */ /* Map the page to the frame. */
if (!install_page (page->upage, frame, writable)) if (!install_page (page->upage, frame, page->writable))
{ {
if (shareable)
lock_release (&shared_file_pages_lock);
frame_free (frame); frame_free (frame);
return false; return false;
} }
@@ -88,6 +257,8 @@ page_load (struct page_entry *page, bool writable)
file_seek (page->file, page->offset); file_seek (page->file, page->offset);
if (file_read (page->file, frame, page->read_bytes) != (int) page->read_bytes) if (file_read (page->file, frame, page->read_bytes) != (int) page->read_bytes)
{ {
if (shareable)
lock_release (&shared_file_pages_lock);
frame_free (frame); frame_free (frame);
return false; return false;
} }
@@ -95,6 +266,27 @@ page_load (struct page_entry *page, bool writable)
/* Zero out the remaining bytes in the frame. */ /* Zero out the remaining bytes in the frame. */
memset (frame + page->read_bytes, 0, page->zero_bytes); memset (frame + page->read_bytes, 0, page->zero_bytes);
/* If file page is read-only, make it shared. */
if (shareable)
{
struct shared_file_page *sfp = malloc (sizeof (struct shared_file_page));
if (sfp == NULL)
{
lock_release (&shared_file_pages_lock);
frame_free (frame);
return false;
}
sfp->file = page->file;
sfp->upage = page->upage;
sfp->frame = frame;
sfp->swap_slot = 0;
sfp->ref_count = 1;
hash_insert (&shared_file_pages, &sfp->elem);
page_flag_shared (t, page->upage, true);
page->type = PAGE_SHARED;
lock_release (&shared_file_pages_lock);
}
/* Mark the page as loaded successfully. */ /* Mark the page as loaded successfully. */
return true; return true;
} }
@@ -104,5 +296,161 @@ page_load (struct page_entry *page, bool writable)
void void
page_cleanup (struct hash_elem *e, void *aux UNUSED) page_cleanup (struct hash_elem *e, void *aux UNUSED)
{ {
free (hash_entry (e, struct page_entry, elem)); struct page_entry *page = hash_entry (e, struct page_entry, elem);
if (page->type == PAGE_SHARED)
{
lock_acquire (&shared_file_pages_lock);
struct shared_file_page *sfp
= shared_file_page_get (page->file, page->upage);
ASSERT (sfp != NULL);
if (sfp->frame != NULL)
frame_owner_remove (sfp->frame, thread_current ());
sfp->ref_count--;
if (sfp->ref_count == 0)
{
hash_delete (&shared_file_pages, &sfp->elem);
if (sfp->frame != NULL)
frame_free (sfp->frame);
else
swap_drop (sfp->swap_slot);
free (sfp);
}
lock_release (&shared_file_pages_lock);
}
free (page);
}
/* Flags the provided page table entry as representing a swapped out page. */
void
page_flag_swap (uint32_t *pte, bool set)
{
if (set)
*pte |= (1 << SWAP_FLAG_BIT);
else
*pte &= ~(1 << SWAP_FLAG_BIT);
}
/* Sets the address bits of the page table entry to the provided swap slot
value. To be used for later retrieval of the swap slot when page faulting. */
static void
page_set_swap (struct thread *owner, uint32_t *pte, size_t swap_slot)
{
/* Store the provided swap slot in the address bits of the page table
entry, truncating excess bits. */
*pte |= (1 << SWAP_FLAG_BIT);
uint32_t swap_slot_bits = (swap_slot << ADDR_START_BIT) & PTE_ADDR;
*pte = (*pte & PTE_FLAGS) | swap_slot_bits;
invalidate_pagedir (owner->pagedir);
}
/* Returns true iff the page with user address 'upage' owned by 'owner'
is flagged to be in the swap disk via the owner's page table. */
bool
page_in_swap (struct thread *owner, void *upage)
{
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
return page_in_swap_pte (pte);
}
/* Returns true iff the page table entry is marked to be in the swap disk. */
bool
page_in_swap_pte (uint32_t *pte)
{
return pte != NULL && (*pte & (1 << SWAP_FLAG_BIT)) != 0;
}
/* Given that the page with user address 'upage' owned by 'owner' is flagged
to be in the swap disk via the owner's page table, returns its stored
swap slot and marks the PTE as not being in swap. */
size_t
page_get_swap (struct thread *owner, void *upage)
{
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
ASSERT (pte != NULL);
ASSERT ((*pte & PTE_P) == 0);
/* Masks the address bits and returns truncated value. */
page_flag_swap (pte, false);
return ((*pte & PTE_ADDR) >> ADDR_START_BIT);
}
/* Returns the swap slot stored in a PTE. */
size_t
page_get_swap_pte (uint32_t *pte)
{
ASSERT (pte != NULL);
ASSERT ((*pte & PTE_P) == 0);
return ((*pte & PTE_ADDR) >> ADDR_START_BIT);
}
/* Flags the provided page table entry as representing a shared page. */
static void
page_flag_shared (struct thread *owner, void *upage, bool shared)
{
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
ASSERT (pte != NULL);
if (shared)
*pte |= (1 << SHARED_FLAG_BIT);
else
*pte &= ~(1 << SHARED_FLAG_BIT);
}
/* Returns true iff the page table entry is marked to be shared. */
bool
page_is_shared_pte (uint32_t *pte)
{
return pte != NULL && (*pte & (1 << SHARED_FLAG_BIT)) != 0;
}
/* Initializes the shared file pages hash table. */
void
shared_file_pages_init ()
{
if (!hash_init (&shared_file_pages, shared_file_page_hash,
shared_file_page_less, NULL))
PANIC ("Failed to initialize shared file pages hash table.");
lock_init (&shared_file_pages_lock);
}
/* Hash function for shared file pages, used for storing entries in the
shared file pages table. */
static unsigned
shared_file_page_hash (const struct hash_elem *e, void *aux UNUSED)
{
struct shared_file_page *sfp = hash_entry (e, struct shared_file_page, elem);
void *inode = file_get_inode (sfp->file);
void *upage = sfp->upage;
void *bytes[2] = { inode, upage };
return hash_bytes (bytes, sizeof (bytes));
}
/* 'less_func' comparison function for shared file pages, used for comparing
the keys of the shared file pages table. */
static bool
shared_file_page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
{
const struct shared_file_page *a
= hash_entry (a_, struct shared_file_page, elem);
const struct shared_file_page *b
= hash_entry (b_, struct shared_file_page, elem);
return !file_compare (a->file, b->file) || a->upage < b->upage;
}
static struct shared_file_page *
shared_file_page_get (struct file *file, void *upage)
{
struct shared_file_page fake_sfp;
fake_sfp.file = file;
fake_sfp.upage = upage;
struct hash_elem *e = hash_find (&shared_file_pages, &fake_sfp.elem);
if (e == NULL)
return NULL;
return hash_entry (e, struct shared_file_page, elem);
} }

View File

@@ -2,17 +2,26 @@
#define VM_PAGE_H #define VM_PAGE_H
#include "threads/thread.h" #include "threads/thread.h"
#include "threads/synch.h"
#include "filesys/off_t.h" #include "filesys/off_t.h"
enum page_type { enum page_type
PAGE_FILE, {
PAGE_EMPTY PAGE_EXECUTABLE,
PAGE_MMAP,
PAGE_SHARED
}; };
struct page_entry { struct page_entry
{
enum page_type type; /* Type of Data that should go into the page */ enum page_type type; /* Type of Data that should go into the page */
void *upage; /* Start Address of the User Page (Key of hash table). */ void *upage; /* Start Address of the User Page (Key of hash table). */
/* Data for swapped pages */
struct lock lock; /* Enforces mutual exclusion in accessing the page
referenced by the entry between its owning process
and any thread performing page eviction. */
/* File Data */ /* File Data */
struct file *file; /* Pointer to the file for executables. */ struct file *file; /* Pointer to the file for executables. */
off_t offset; /* Offset of the page content within the file. */ off_t offset; /* Offset of the page content within the file. */
@@ -23,16 +32,37 @@ struct page_entry {
struct hash_elem elem; /* An elem for the hash table. */ struct hash_elem elem; /* An elem for the hash table. */
}; };
unsigned page_hash (const struct hash_elem *e, void *aux); struct shared_file_page
bool page_less (const struct hash_elem *a_, const struct hash_elem *b_, {
void *aux); struct file *file; /* The shared file page's source file, used for indexing
struct page_entry *page_insert (struct file *file, off_t ofs, void *upage, the table. */
uint32_t read_bytes, uint32_t zero_bytes, void *upage; /* The shared page's upage which is the same across all process
bool writable, enum page_type type); using it. Used for indexing the table. */
struct page_entry *page_get (void *upage); void *frame; /* Set to the frame address of the page when it is in memory.
bool page_load (struct page_entry *page, bool writable); Set to NULL when the page is in swap. */
void page_cleanup (struct hash_elem *e, void *aux); size_t swap_slot; /* Set to the swap_slot of the shared paged if it is
void page_set_swap (struct thread *, void *, size_t); currently in swap. Should not be used when frame is not
size_t page_get_swap (struct thread *, void *); NULL.*/
int ref_count; /* Number of processes that are using this shared page. */
#endif /* vm/frame.h */ struct hash_elem elem; /* AN elem for the hash table. */
};
bool init_pages (struct hash *pages);
bool page_insert_swapped (void *upage, void *kpage, struct list *owners);
struct page_entry *page_insert_file (struct file *file, off_t ofs, void *upage,
uint32_t read_bytes, uint32_t zero_bytes,
bool writable, enum page_type);
struct page_entry *page_get (struct thread *thread, void *upage);
bool page_load_file (struct page_entry *page);
void page_cleanup (struct hash_elem *e, void *aux);
bool page_in_swap (struct thread *, void *);
bool page_in_swap_pte (uint32_t *pte);
size_t page_get_swap (struct thread *owner, void *upage);
size_t page_get_swap_pte (uint32_t *pte);
bool page_is_shared_pte (uint32_t *pte);
void shared_file_pages_init (void);
#endif