Compare commits

..

31 Commits

Author SHA1 Message Date
ce89d3577f fix: synchronise pagedir which now may be accessed by other threads, with a lock 2024-12-06 18:22:00 +00:00
77fedd6666 fix: synchronise threads' SPTs with locks 2024-12-06 17:31:33 +00:00
Dias Alberto, Ethan
eba8c1ffa8 Merge branch 'vm/merged/pinning-synch' into 'master'
fix: re-enable shareable read only executable logic

See merge request lab2425_autumn/pintos_22!65
2024-12-06 17:13:02 +00:00
EDiasAlberto
be696ec528 fix: re-enable shareable read only executable logic 2024-12-06 17:07:17 +00:00
Saleh Bubshait
7611090253 Merge branch 'vm/merged/pinning-synch' into 'master'
Merge 'vm/merged/pinning-synch' into master

See merge request lab2425_autumn/pintos_22!64
2024-12-06 16:31:37 +00:00
sBubshait
7f058ffc90 Refactor page_type to rename PAGE_FILE to PAGE_EXECUTABLE as mmap and executables are now separate 2024-12-06 16:22:26 +00:00
c1bc70adad ci: do not ignore an VM tests since it is fully implemented now 2024-12-06 15:55:23 +00:00
sBubshait
22f3b0950f Fix: Insert pages in mmap as PAGE_MMAP instead of PAGE_FILE 2024-12-06 15:54:46 +00:00
f64b92bbfa refactor: document shared_file_page 2024-12-06 15:35:23 +00:00
Themis Demetriades
3d6e30119b refactor: rename lru_lock to ftable_lock for greater clarity, and update comments to reflect this 2024-12-06 15:31:27 +00:00
4104d2c852 fix: always add to frame owners when installing existing shared page. 2024-12-06 15:23:41 +00:00
EDiasAlberto
d389c15828 fix: acquire lru_lock before pinning frames to avoid race condition with eviction 2024-12-06 13:20:43 +00:00
Themis Demetriades
8ac34063d7 fix: disable 'shareable' flag to probe race conditions 2024-12-06 10:56:38 +00:00
Demetriades, Themis
c68fea5249 Merge branch 'vm/merged/themis' into 'master'
Implement VM

See merge request lab2425_autumn/pintos_22!63
2024-12-06 05:07:14 +00:00
Themis Demetriades
65da1659e5 feat: merged shared-read-only-executables with the rest of VM 2024-12-06 04:15:13 +00:00
Themis Demetriades
3897e83963 fix: use correct page_get function within page eviction 2024-12-06 01:43:41 +00:00
Demetriades, Themis
96b350d623 Merge branch 'vm/mmap-write-back-on-eviction' into 'vm/virtual-memory/themis-synch'
Write back mmap file pages to file upon eviction

See merge request lab2425_autumn/pintos_22!59
2024-12-06 01:01:50 +00:00
Themis Demetriades
31403ac7cb fix: obtain correct page table entry when performing eviction 2024-12-06 00:56:03 +00:00
1da0c7d48c fix: properly assign frame owners and deallocate in all required places 2024-12-06 00:29:57 +00:00
Demetriades, Themis
8220b931a9 Merge branch 'vm/virtual-memory/frame-synch/saleh' into 'vm/virtual-memory/themis-synch'
Merge frame pinning to themis-synch

See merge request lab2425_autumn/pintos_22!60
2024-12-06 00:21:02 +00:00
sBubshait
1efa1fef9a Merge frame pinning into themis-synch 2024-12-05 23:56:25 +00:00
sBubshait
fc088a19ac Merge remote-tracking branch 'origin/vm/frame-pinning' into vm/virtual-memory/frame-synch/saleh
# Conflicts:
#	src/userprog/syscall.c
2024-12-05 23:48:52 +00:00
sBubshait
a34bbbed08 Update frame: When evicting an mmapped file page, write it back to the file if it is dirty 2024-12-05 22:53:48 +00:00
833c1b0520 fix: only swap out shared pages once 2024-12-05 22:37:14 +00:00
9aa9cdb91e feat: implement proper destruction of pages, including for shared pages 2024-12-05 22:23:50 +00:00
Themis Demetriades
2811ea0eb3 fix: SPT never removes entries until process termination or special case 2024-12-05 22:05:02 +00:00
dd46200256 feat: initial shared file page management and initialization 2024-12-05 21:46:49 +00:00
4dd6b6e928 fix: do not leak when inserting the same page twice, just update 2024-12-05 19:38:27 +00:00
0f1f7b9a6f refactor: extract init_pages 2024-12-05 19:35:39 +00:00
Themis Demetriades
7860f3863f fix: add check to mmap to ensure file isn't mapped over stack segment (ed1223) 2024-12-05 17:11:02 +00:00
Themis Demetriades
d03e253046 feat: implement synchronisation to protecting access to PTEs of SPTs during eviction 2024-12-05 16:51:15 +00:00
13 changed files with 790 additions and 250 deletions

View File

@@ -38,4 +38,3 @@ test_vm:
extends: .pintos_tests
variables:
DIR: vm
IGNORE: (tests/vm/pt-grow-stack|tests/vm/pt-grow-pusha|tests/vm/pt-big-stk-obj|tests/vm/pt-overflowstk|tests/vm/pt-write-code2|tests/vm/pt-grow-stk-sc|tests/vm/page-linear|tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-over-stk)

View File

@@ -33,6 +33,7 @@
#endif
#ifdef VM
#include "vm/frame.h"
#include "vm/page.h"
#include "devices/swap.h"
#endif
#ifdef FILESYS
@@ -104,6 +105,7 @@ main (void)
paging_init ();
#ifdef VM
frame_init ();
shared_file_pages_init ();
#endif
/* Segmentation. */

View File

@@ -265,11 +265,24 @@ thread_create (const char *name, int priority,
#ifdef USERPROG
/* Initialize the thread's file descriptor table. */
t->fd_counter = MINIMUM_USER_FD;
bool success = hash_init (&t->open_files, fd_hash, fd_less, NULL);
if (success)
{
success = hash_init (&t->child_results, process_result_hash,
process_result_less, t);
if (!success)
hash_destroy (&t->open_files, NULL);
#ifdef VM
else
{
success = init_pages (&t->pages);
if (!success)
hash_destroy (&t->child_results, NULL);
}
#endif
}
if (!hash_init (&t->open_files, fd_hash, fd_less, NULL)
|| !hash_init (&t->child_results, process_result_hash,
process_result_less, t)
|| !hash_init (&t->pages, page_hash, page_less, NULL))
if (!success)
{
palloc_free_page (t);
free (t->result);
@@ -723,6 +736,9 @@ init_thread (struct thread *t, const char *name, int nice, int priority,
t->recent_cpu = recent_cpu;
t->priority = t->base_priority;
lock_init (&t->pages_lock);
lock_init (&t->pagedir_lock);
old_level = intr_disable ();
list_push_back (&all_list, &t->allelem);
intr_set_level (old_level);

View File

@@ -136,6 +136,7 @@ struct thread
struct list_elem elem; /* List element. */
struct hash pages; /* Table of open user pages. */
struct lock pages_lock; /* Lock for the supplementary page table. */
/* Memory mapped files for user virtual memory. */
struct hash mmap_files; /* List of memory mapped files. */
@@ -144,6 +145,7 @@ struct thread
#ifdef USERPROG
/* Owned by userprog/process.c. */
uint32_t *pagedir; /* Page directory. */
struct lock pagedir_lock; /* Lock for the page directory. */
unsigned int fd_counter; /* File descriptor counter for thread's
open files. */
struct hash open_files; /* Hash Table of FD -> Struct File. */

View File

@@ -3,19 +3,19 @@
#include <stdio.h>
#include "stdbool.h"
#include "userprog/gdt.h"
#include "userprog/pagedir.h"
#include "userprog/process.h"
#include "threads/interrupt.h"
#include "threads/palloc.h"
#include "threads/thread.h"
#include "threads/vaddr.h"
#ifdef VM
#include "vm/frame.h"
#include "vm/page.h"
#include "devices/swap.h"
#include "threads/vaddr.h"
#include "userprog/pagedir.h"
#endif
#define MAX_STACK_SIZE (8 * 1024 * 1024) // 8MB
#define MAX_STACK_OFFSET 32 // 32 bytes offset below stack pointer (ESP)
/* Number of page faults processed. */
static long long page_fault_cnt;
@@ -187,6 +187,7 @@ page_fault (struct intr_frame *f)
return;
}
/* To implement virtual memory, delete the rest of the function
body, and replace it with code that brings in the page to
which fault_addr refers. */
@@ -232,12 +233,19 @@ static bool
grow_stack (void *upage)
{
/* Allocate new page for stack */
struct thread *t = thread_current ();
lock_acquire (&t->pagedir_lock);
void *new_page = frame_alloc (PAL_ZERO, upage, thread_current ());
if (new_page == NULL)
return false;
{
lock_release (&t->pagedir_lock);
return false;
}
/* Install the page into user page table */
if (!pagedir_set_page (thread_current ()->pagedir, upage, new_page, true))
bool result = pagedir_set_page (t->pagedir, upage, new_page, true);
lock_release (&t->pagedir_lock);
if (!result)
{
frame_free (new_page);
return false;
@@ -249,32 +257,36 @@ grow_stack (void *upage)
bool
fetch_page (void *upage, bool write)
{
/* Check if the page is in the supplemental page table. That is, it is a page
that is expected to be in memory. */
struct page_entry *page = page_get (thread_current (), upage);
if (page == NULL)
return false;
/* Check if the non-present user page is in the swap partition.
If so, swap it back into main memory, updating the PTE for
the faulted virtual address to point to the newly allocated
frame. */
struct thread *t = thread_current ();
lock_acquire (&t->pagedir_lock);
if (page_in_swap (t, upage))
{
size_t swap_slot = page_get_swap (t, upage);
void *kpage = frame_alloc (0, upage, t);
swap_in (kpage, swap_slot);
bool writeable = pagedir_is_writable (t->pagedir, upage);
if (pagedir_set_page (t->pagedir, upage, kpage, writeable))
{
struct page_entry *page = page_get(upage);
if (page != NULL)
page->frame = kpage;
return true;
}
}
/* NOTE: This code should be refactored and moved into helper functions
within 'page.c'.*/
void *kpage = frame_alloc (0, upage, t);
lock_acquire (&page->lock);
/* Check if the page is in the supplemental page table. That is, it is a page
that is expected to be in memory. */
struct page_entry *page = page_get (upage);
if (page == NULL)
return false;
size_t swap_slot = page_get_swap (t, upage);
swap_in (kpage, swap_slot);
lock_release (&page->lock);
bool writeable = pagedir_is_writable (t->pagedir, upage);
bool result = pagedir_set_page (t->pagedir, upage, kpage, writeable);
lock_release (&t->pagedir_lock);
return result;
}
lock_release (&t->pagedir_lock);
/* An attempt to write to a non-writeable should fail. */
if (write && !page->writable)
@@ -283,16 +295,14 @@ fetch_page (void *upage, bool write)
/* Load the page into memory based on the type of data it is expecting. */
bool success = false;
switch (page->type) {
case PAGE_FILE:
success = page_load (page, page->writable);
case PAGE_MMAP:
case PAGE_EXECUTABLE:
case PAGE_SHARED:
success = page_load_file (page);
break;
default:
return false;
}
if (success && page->writable &&
!pagedir_is_writable(thread_current()->pagedir, upage))
pagedir_set_writable(thread_current()->pagedir, upage, true);
return success;
}

View File

@@ -2,9 +2,12 @@
#include <stdbool.h>
#include <stddef.h>
#include <string.h>
#include "devices/swap.h"
#include "threads/init.h"
#include "threads/pte.h"
#include "threads/palloc.h"
#include "vm/frame.h"
#include "vm/page.h"
static uint32_t *active_pd (void);
@@ -39,8 +42,14 @@ pagedir_destroy (uint32_t *pd)
uint32_t *pte;
for (pte = pt; pte < pt + PGSIZE / sizeof *pte; pte++)
if (*pte & PTE_P)
palloc_free_page (pte_get_page (*pte));
{
if (page_is_shared_pte (pte))
continue;
else if (page_in_swap_pte (pte))
swap_drop (page_get_swap_pte (pte));
else if (*pte & PTE_P)
frame_free (pte_get_page (*pte));
}
palloc_free_page (pt);
}
palloc_free_page (pd);

View File

@@ -371,7 +371,9 @@ process_exit (void)
hash_destroy (&cur->open_files, fd_cleanup);
/* Clean up the thread's supplemental page table. */
lock_acquire (&cur->pages_lock);
hash_destroy (&cur->pages, page_cleanup);
lock_release (&cur->pages_lock);
/* Close the executable file, implicitly allowing it to be written to. */
if (cur->exec_file != NULL)
@@ -394,6 +396,7 @@ process_exit (void)
/* Destroy the current process's page directory and switch back
to the kernel-only page directory. */
lock_acquire (&cur->pagedir_lock);
pd = cur->pagedir;
if (pd != NULL)
{
@@ -408,6 +411,7 @@ process_exit (void)
pagedir_activate (NULL);
pagedir_destroy (pd);
}
lock_release (&cur->pagedir_lock);
}
/* Destruct a process_result, with multi-thread awareness.
@@ -533,7 +537,9 @@ load (const char *file_name, void (**eip) (void), void **esp)
lock_acquire (&filesys_lock);
/* Allocate and activate page directory. */
lock_acquire (&t->pagedir_lock);
t->pagedir = pagedir_create ();
lock_release (&t->pagedir_lock);
if (t->pagedir == NULL)
goto done;
process_activate ();
@@ -714,8 +720,8 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
size_t page_zero_bytes = PGSIZE - page_read_bytes;
/* Add the page metadata to the SPT to be lazy loaded later on */
if (page_insert (file, ofs, upage, page_read_bytes, page_zero_bytes,
writable, PAGE_FILE) == NULL)
if (page_insert_file (file, ofs, upage, page_read_bytes, page_zero_bytes,
writable, PAGE_EXECUTABLE) == NULL)
return false;
/* Advance. */
@@ -759,11 +765,16 @@ get_usr_kpage (enum palloc_flags flags, void *upage)
void *page;
#ifdef VM
struct thread *t = thread_current ();
lock_acquire (&t->pagedir_lock);
if (pagedir_get_page (t->pagedir, upage) != NULL)
return NULL;
{
lock_release (&t->pagedir_lock);
return NULL;
}
else
page = frame_alloc (flags, upage, t);
pagedir_set_accessed (t->pagedir, upage, true);
lock_release (&t->pagedir_lock);
#else
page = palloc_get_page (flags | PAL_USER);
#endif

View File

@@ -461,10 +461,9 @@ syscall_mmap (int fd, void *addr)
/* Check and ensure that there is enough space in the user virtual memory to
hold the entire file. */
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
{
if (page_get (addr + ofs) != NULL)
if (page_get (thread_current (), addr + ofs) != NULL)
return MMAP_FAILURE;
}
/* Map the file data into the user virtual memory starting from addr. */
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
@@ -472,8 +471,8 @@ syscall_mmap (int fd, void *addr)
off_t read_bytes = file_size - ofs < PGSIZE ? file_size - ofs : PGSIZE;
off_t zero_bytes = PGSIZE - read_bytes;
if (page_insert (file, ofs, addr + ofs, read_bytes, zero_bytes, true,
PAGE_FILE) == NULL)
if (page_insert_file (file, ofs, addr + ofs, read_bytes, zero_bytes, true,
PAGE_MMAP) == NULL)
return MMAP_FAILURE;
}
@@ -482,7 +481,6 @@ syscall_mmap (int fd, void *addr)
if (mmap == NULL)
return MMAP_FAILURE;
return mmap->mapping;
}
@@ -577,6 +575,7 @@ validate_user_ptr_helper (const void *start, size_t size, bool write, bool pin)
if (!is_user_vaddr (end))
syscall_exit (EXIT_FAILURE);
struct thread *t = thread_current ();
for (const void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
{
int result;
@@ -592,7 +591,9 @@ validate_user_ptr_helper (const void *start, size_t size, bool write, bool pin)
/* If pin is set, pin the frame to prevent eviction. */
if (pin)
{
void *kpage = pagedir_get_page(thread_current()->pagedir, ptr);
lock_acquire (&t->pagedir_lock);
void *kpage = pagedir_get_page (t->pagedir, ptr);
lock_release (&t->pagedir_lock);
if (kpage == NULL)
{
// If it was evicted, try to load it back in.
@@ -642,18 +643,21 @@ validate_and_pin_user_ptr (const void *start, size_t size, bool write)
static void
unpin_user_ptr (const void *start, size_t size)
{
struct thread *t = thread_current ();
void *end = start + size - 1;
/* We don't need to do any checks as this function is always called after
validate_and_pin_user_ptr. */
/* Go through all pages in the block range, unpinning the frames. */
lock_acquire (&t->pagedir_lock);
for (void *ptr = pg_round_down (start); ptr <= end; ptr += PGSIZE)
{
void *kpage = pagedir_get_page (thread_current ()->pagedir, ptr);
void *kpage = pagedir_get_page (t->pagedir, ptr);
ASSERT (kpage != NULL);
frame_unpin (kpage);
}
lock_release (&t->pagedir_lock);
}
/* Validates of a C-string starting at ptr is fully contained within valid
@@ -661,6 +665,7 @@ unpin_user_ptr (const void *start, size_t size)
static void
validate_and_pin_user_str (const char *ptr)
{
struct thread *t = thread_current ();
size_t offset = (uintptr_t) ptr % PGSIZE;
for (;;)
@@ -673,7 +678,9 @@ validate_and_pin_user_str (const char *ptr)
/* Pin the frame to prevent eviction. */
void *page = pg_round_down (ptr);
void *kpage = pagedir_get_page (thread_current ()->pagedir, page);
lock_acquire (&t->pagedir_lock);
void *kpage = pagedir_get_page (t->pagedir, page);
lock_release (&t->pagedir_lock);
if (kpage == NULL)
{
// If it was evicted, attempt to reload.
@@ -703,13 +710,15 @@ validate_and_pin_user_str (const char *ptr)
static void
unpin_user_str (const char *ptr)
{
struct thread *t = thread_current ();
size_t offset = (uintptr_t)ptr % PGSIZE;
const char *str_ptr = ptr;
lock_acquire (&t->pagedir_lock);
for (;;)
{
void *page = pg_round_down(str_ptr);
void *kpage = pagedir_get_page(thread_current()->pagedir, page);
void *kpage = pagedir_get_page (t->pagedir, page);
ASSERT(kpage != NULL);
frame_unpin (kpage);
@@ -717,7 +726,11 @@ unpin_user_str (const char *ptr)
while (offset < PGSIZE)
{
if (*str_ptr == '\0')
return; /* Found end of string */
{
/* Found end of string */
lock_release (&t->pagedir_lock);
return;
}
str_ptr++;
offset++;
}

View File

@@ -2,142 +2,199 @@
#include <hash.h>
#include <list.h>
#include <string.h>
#include <stdio.h>
#include "frame.h"
#include "page.h"
#include "filesys/file.h"
#include "threads/malloc.h"
#include "threads/vaddr.h"
#include "userprog/pagedir.h"
#include "userprog/syscall.h"
#include "threads/synch.h"
#include "devices/swap.h"
struct frame_entry
{
void *frame;
void *upage;
struct thread *owner;
bool pinned;
struct hash_elem hash_elem;
struct list_elem list_elem;
};
/* Hash table that maps every active frame's kernel virtual address
to its corresponding 'frame_metadata'.*/
struct hash frame_table;
struct lock frame_lock;
/* Linked list used to represent the circular queue in the 'clock'
algorithm for page eviction. Iterating from the element that is
currently pointed at by 'next_victim' yields an ordering of the entries
from oldest to newest (in terms of when they were added or checked
for having been referenced by a process). */
struct list lru_list;
struct list_elem *next_victim;
hash_hash_func frame_hash;
hash_less_func frame_less;
/* The next element in lru_list to be considered for eviction (oldest added
or referenced page in the circular queue). If this page has has an
'accessed' bit of 0 when considering eviction, then it will be the next
victim. Otherwise, the next element in the queue is similarly considered. */
struct list_elem *next_victim = NULL;
struct frame_metadata
{
void *frame; /* The kernel virtual address holding the frame. */
void *upage; /* The user virtual address pointing to the frame. */
struct list owners; /* List of threads that own the frame. */
bool pinned; /* Indicates wheter the frame should be
considered as an eviction candidate.*/
struct hash_elem hash_elem; /* Tracks the position of the frame metadata
within 'frame_table', whose key is the
kernel virtual address of the frame. */
struct list_elem list_elem; /* Tracks the position of the frame metadata
within 'lru_list', so a victim can be
chosen for eviction. */
};
hash_hash_func frame_metadata_hash;
hash_less_func frame_metadata_less;
static struct frame_entry *frame_get (void *frame);
static struct frame_entry *get_victim (void);
static struct list_elem *lru_next (struct list_elem *e);
static struct list_elem *lru_prev (struct list_elem *e);
static struct frame_metadata *frame_metadata_get (void *frame);
static struct frame_metadata *get_victim (struct thread *cur);
static void free_owners (struct list *owners);
static struct frame_metadata *frame_metadata_find (void *frame);
/* Initialize the frame system by initializing the frame (hash) table with
the frame_metadata hashing and comparison functions, as well as initializing
'lru_list' and its associated synchronisation primitives. */
void
frame_init (void)
{
hash_init (&frame_table, frame_hash, frame_less, NULL);
lock_init (&frame_lock);
hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
list_init (&lru_list);
lock_init (&ftable_lock);
}
/* TODO: Consider synchronisation more closely (i.e. just for hash
table). */
/* Attempt to allocate a frame for a user process, either by direct
allocation of a user page if there is sufficient RAM, or by
evicting a currently active page if memory allocated for user
processes is fulled and storing it in swap. If swap is full in
the former case, panic the kernel. */
void *
frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
{
lock_acquire (&frame_lock);
struct frame_entry *frame_metadata;
struct frame_metadata *frame_metadata;
flags |= PAL_USER;
lock_acquire (&ftable_lock);
void *frame = palloc_get_page (flags);
/* If a frame couldn't be allocated we must be out of main memory. Thus,
obtain a victim page to replace with our page, and swap the victim
into disk. */
if (frame == NULL)
{
if (next_victim == NULL)
PANIC ("Couldn't allocate a single page to main memory!\n");
{
/* 1. Obtain victim. */
if (next_victim == NULL)
PANIC ("Couldn't allocate a single page to main memory!\n");
struct frame_entry *victim = get_victim ();
ASSERT (victim != NULL); /* get_victim () should never return null. */
struct frame_metadata *victim = get_victim (owner);
ASSERT (victim != NULL); /* get_victim () should never return null. */
/* 2. Swap out victim into disk. */
/* Mark page as 'not present' and flag the page directory as having
been modified *before* eviction begins to prevent the owner of the
victim page from accessing/modifying it mid-eviction. */
pagedir_clear_page (victim->owner->pagedir, victim->upage);
/* 2. Handle victim page writing based on its type. */
struct page_entry *victim_page = page_get (thread_current (), victim->upage);
if (victim_page != NULL && victim_page->type == PAGE_MMAP)
{
/* If it was a memory-mapped file page, we just write it back
to the file if it was dirty. */
if (pagedir_is_dirty(owner->pagedir, victim->upage))
{
lock_acquire (&filesys_lock);
file_write_at (victim_page->file, victim->upage,
victim_page->read_bytes, victim_page->offset);
lock_release (&filesys_lock);
}
}
else
{
/* Otherwise, insert the page into swap. */
page_insert_swapped (victim->upage, victim->frame, &victim->owners, owner);
}
// TODO: Lock PTE of victim page for victim process.
/* Free victim's owners. */
free_owners (&victim->owners);
size_t swap_slot = swap_out (victim->frame);
page_set_swap (victim->owner, victim->upage, swap_slot);
/* If zero flag is set, zero out the victim page. */
if (flags & PAL_ZERO)
memset (victim->frame, 0, PGSIZE);
/* If zero flag is set, zero out the victim page. */
if (flags & PAL_ZERO)
memset (victim->frame, 0, PGSIZE);
/* 3. Indicate that the new frame's metadata will be stored
inside the same structure that stored the victim's metadata.frame.c
As both the new frame and the victim frame share the same kernel
virtual address, the hash map need not be updated, and neither
the list_elem value as both share the same lru_list position. */
frame_metadata = victim;
}
/* 3. Indicate that the new frame's metadata will be stored
inside the same structure that stored the victim's metadata.
As both the new frame and the victim frame share the same kernel
virtual address, the hash map need not be updated, and neither
the list_elem value as both share the same lru_list position. */
frame_metadata = victim;
}
/* If sufficient main memory allows the frame to be directly allocated,
we must update the frame table with a new entry, and grow lru_list. */
/* If sufficient main memory allows the frame to be directly allocated,
we must update the frame table with a new entry, and grow lru_list. */
else
{
/* Must own lru_lock here, as otherwise there is a race condition
with next_victim either being NULL or uninitialized. */
frame_metadata = malloc (sizeof (struct frame_entry));
frame_metadata->frame = frame;
{
/* Must own ftable_lock here, as otherwise there is a race condition
with next_victim either being NULL or uninitialized. */
frame_metadata = malloc (sizeof (struct frame_metadata));
if (frame_metadata == NULL)
PANIC ("Couldn't allocate memory for frame metadata!\n");
frame_metadata->frame = frame;
/* Newly allocated frames are pushed to the back of the circular queue
represented by lru_list. Must explicitly handle the case where the
circular queue is empty (when next_victim == NULL). */
if (next_victim == NULL)
{
list_push_back (&lru_list, &frame_metadata->list_elem);
next_victim = &frame_metadata->list_elem;
}
else
{
struct list_elem *lru_tail = lru_prev (next_victim);
list_insert (lru_tail, &frame_metadata->list_elem);
/* Newly allocated frames are pushed to the back of the circular queue
represented by lru_list. Must explicitly handle the case where the
circular queue is empty (when next_victim == NULL). */
if (next_victim == NULL)
{
list_push_back (&lru_list, &frame_metadata->list_elem);
next_victim = &frame_metadata->list_elem;
}
else
{
struct list_elem *lru_tail = lru_prev (next_victim);
list_insert (lru_tail, &frame_metadata->list_elem);
}
hash_insert (&frame_table, &frame_metadata->hash_elem);
}
hash_insert (&frame_table, &frame_metadata->hash_elem);
}
struct frame_owner *frame_owner = malloc (sizeof (struct frame_owner));
if (frame_owner == NULL)
PANIC ("Couldn't allocate memory for frame owner!\n");
frame_owner->owner = owner;
list_init (&frame_metadata->owners);
list_push_back (&frame_metadata->owners, &frame_owner->elem);
frame_metadata->upage = upage;
frame_metadata->owner = owner;
frame_metadata->pinned = false;
void *frame_addr = frame_metadata->frame;
lock_release (&frame_lock);
return frame_addr;
lock_release (&ftable_lock);
return frame_metadata->frame;
}
void
frame_pin (void *frame)
{
struct frame_entry *frame_metadata = frame_get (frame);
ASSERT (frame != NULL);
lock_acquire (&ftable_lock);
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to pin a frame at an unallocated kernel address '%p'\n",
frame);
frame_metadata->pinned = true;
lock_release (&ftable_lock);
}
void
frame_unpin (void *frame)
{
struct frame_entry *frame_metadata = frame_get (frame);
ASSERT (frame != NULL);
lock_acquire (&ftable_lock);
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to unpin a frame at an unallocated kernel address '%p'\n",
frame);
frame_metadata->pinned = false;
lock_release (&ftable_lock);
}
/* Attempt to deallocate a frame for a user process by removing it from the
@@ -145,111 +202,183 @@ frame_unpin (void *frame)
memory & metadata struct. Panics if the frame isn't active in memory. */
void
frame_free (void *frame)
{
struct frame_metadata *frame_metadata = frame_metadata_find (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to free a frame at kernel address %p, "
"but this address is not allocated!\n",
frame);
free_owners (&frame_metadata->owners);
lock_acquire (&ftable_lock);
hash_delete (&frame_table, &frame_metadata->hash_elem);
list_remove (&frame_metadata->list_elem);
/* If we're freeing the frame marked as the next victim, update
next_victim to either be the next least recently used page, or NULL
if no pages are loaded in main memory. */
if (&frame_metadata->list_elem == next_victim)
{
if (list_empty (&lru_list))
next_victim = NULL;
else
next_victim = lru_next (next_victim);
}
lock_release (&ftable_lock);
free (frame_metadata);
palloc_free_page (frame);
}
/* Add a thread to a frame's frame_metadata owners list. */
bool
frame_owner_insert (void *frame, struct thread *owner)
{
lock_acquire(&frame_lock);
struct frame_entry key_metadata;
key_metadata.frame = frame;
struct frame_metadata *frame_metadata = frame_metadata_find (frame);
if (frame_metadata == NULL)
return false;
struct hash_elem *e =
hash_delete (&frame_table, &key_metadata.hash_elem);
if (e == NULL)
return;
struct frame_entry *frame_metadata =
hash_entry (e, struct frame_entry, hash_elem);
struct page_entry *page = page_get (frame_metadata->upage);
if (page != NULL)
{
page->frame = NULL;
}
list_remove (&frame_metadata->list_elem);
/* If we're freeing the frame marked as the next victim, update
next_victim to either be the next least recently used page, or NULL
if no pages are loaded in main memory. */
if (&frame_metadata->list_elem == next_victim)
{
if (list_empty (&lru_list))
next_victim = NULL;
else
next_victim = lru_next (next_victim);
}
free (frame_metadata);
palloc_free_page (frame);
lock_release (&frame_lock);
struct frame_owner *frame_owner = malloc (sizeof (struct frame_owner));
if (frame_owner == NULL)
return false;
frame_owner->owner = owner;
list_push_back (&frame_metadata->owners, &frame_owner->elem);
return true;
}
/* TODO: Account for page aliases when checking accessed bit. */
/* A pre-condition for calling this function is that the calling thread
owns lru_lock and that lru_list is non-empty. */
static struct frame_entry *
get_victim (void)
/* Remove and deallocate a frame owner from the frame_metadata owners list.
*/
void
frame_owner_remove (void *frame, struct thread *owner)
{
struct list_elem *e = next_victim;
struct frame_entry *frame_metadata;
uint32_t *pd;
void *upage;
for (;;)
struct frame_metadata *frame_metadata = frame_metadata_find (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to remove an owner from a frame at kernel "
"address %p, but this address is not allocated!\n",
frame);
struct list_elem *oe;
for (oe = list_begin (&frame_metadata->owners);
oe != list_end (&frame_metadata->owners);)
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
oe = list_next (oe);
if (frame_owner->owner == owner)
{
list_remove (&frame_owner->elem);
free (frame_owner);
return;
}
}
NOT_REACHED ();
}
/* Find a frame_metadata entry in the frame table. */
static struct frame_metadata *
frame_metadata_find (void *frame)
{
struct frame_metadata key_metadata;
key_metadata.frame = frame;
struct hash_elem *e = hash_find (&frame_table, &key_metadata.hash_elem);
if (e == NULL)
return NULL;
return hash_entry (e, struct frame_metadata, hash_elem);
}
/* A pre-condition for calling this function is that the calling thread
owns ftable_lock and that lru_list is non-empty. */
static struct frame_metadata *
get_victim (struct thread *cur)
{
frame_metadata = list_entry (e, struct frame_entry, list_elem);
pd = frame_metadata->owner->pagedir;
upage = frame_metadata->upage;
e = lru_next (e);
struct list_elem *ve = next_victim;
struct frame_metadata *frame_metadata;
bool found = false;
while (!found)
{
frame_metadata = list_entry (ve, struct frame_metadata, list_elem);
ve = lru_next (ve);
struct list_elem *oe;
/* Skip pinned frames */
if (frame_metadata->pinned)
continue;
/* Skip pinned frames */
if (frame_metadata->pinned)
continue;
if (!pagedir_is_accessed (pd, upage))
break;
/* Returns once a frame that was not accessed by any owner is found. */
found = true;
for (oe = list_begin (&frame_metadata->owners);
oe != list_end (&frame_metadata->owners); oe = list_next (oe))
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
if (frame_owner->owner != cur)
lock_acquire (&frame_owner->owner->pagedir_lock);
uint32_t *pd = frame_owner->owner->pagedir;
void *upage = frame_metadata->upage;
pagedir_set_accessed (pd, upage, false);
if (pagedir_is_accessed (pd, upage))
{
found = false;
pagedir_set_accessed (pd, upage, false);
}
if (frame_owner->owner != cur)
lock_release (&frame_owner->owner->pagedir_lock);
}
}
next_victim = ve;
return frame_metadata;
}
next_victim = e;
return frame_metadata;
static void
free_owners (struct list *owners)
{
struct list_elem *oe;
for (oe = list_begin (owners); oe != list_end (owners);)
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
oe = list_remove (oe);
free (frame_owner);
}
}
/* Hash function for frame metadata, used for storing entries in the
frame table. */
unsigned
frame_hash (const struct hash_elem *e, void *aux UNUSED)
{
struct frame_entry *entry =
hash_entry (e, struct frame_entry, hash_elem);
frame_metadata_hash (const struct hash_elem *e, void *aux UNUSED)
{
struct frame_metadata *frame_metadata =
hash_entry (e, struct frame_metadata, hash_elem);
return hash_bytes (&entry->frame, sizeof (entry->frame));
}
return hash_bytes (&frame_metadata->frame, sizeof (frame_metadata->frame));
}
/* 'less_func' comparison function for frame metadata, used for comparing
/* 'less_func' comparison function for frame metadata, used for comparing
the keys of the frame table. Returns true iff the kernel virtual address
of the first frame is less than that of the second frame. */
bool
frame_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
{
struct frame_entry *a =
hash_entry (a_, struct frame_entry, hash_elem);
struct frame_entry *b =
hash_entry (b_, struct frame_entry, hash_elem);
struct frame_metadata *a =
hash_entry (a_, struct frame_metadata, hash_elem);
struct frame_metadata *b =
hash_entry (b_, struct frame_metadata, hash_elem);
return a->frame < b->frame;
}
static struct frame_entry *
frame_get (void *frame)
static struct frame_metadata *
frame_metadata_get (void *frame)
{
struct frame_entry fake_frame;
fake_frame.frame = frame;
struct frame_metadata key_metadata;
key_metadata.frame = frame;
struct hash_elem *e = hash_find (&frame_table, &fake_frame.hash_elem);
struct hash_elem *e = hash_find (&frame_table, &key_metadata.hash_elem);
if (e == NULL) return NULL;
return hash_entry (e, struct frame_entry, hash_elem);
return hash_entry (e, struct frame_metadata, hash_elem);
}
/* Returns the next recently used element after the one provided, which

View File

@@ -4,10 +4,24 @@
#include "threads/thread.h"
#include "threads/palloc.h"
struct frame_owner
{
struct thread *owner; /* The thread that owns the frame. */
struct list_elem elem; /* List element for the list of owners. */
};
/* Synchronisation variables. */
/* Protects access to the frame table and its related components. */
struct lock ftable_lock;
void frame_init (void);
void *frame_alloc (enum palloc_flags, void *, struct thread *);
void frame_pin (void *frame);
void frame_unpin (void *frame);
void frame_free (void *frame);
bool frame_owner_insert (void *frame, struct thread *owner);
void frame_owner_remove (void *frame, struct thread *owner);
#endif /* vm/frame.h */

View File

@@ -1,5 +1,6 @@
#include "mmap.h"
#include "page.h"
#include "threads/thread.h"
#include "threads/vaddr.h"
#include "threads/malloc.h"
#include "userprog/syscall.h"
@@ -70,7 +71,7 @@ mmap_unmap (struct mmap_entry *mmap)
void *upage = mmap->upage + ofs;
/* Get the SPT page entry for this page. */
struct page_entry *page = page_get(upage);
struct page_entry *page = page_get(thread_current (), upage);
if (page == NULL)
continue;

View File

@@ -1,21 +1,49 @@
#include "page.h"
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include "filesys/file.h"
#include "threads/pte.h"
#include "threads/malloc.h"
#include "threads/palloc.h"
#include "threads/synch.h"
#include "devices/swap.h"
#include "userprog/process.h"
#include "userprog/pagedir.h"
#include "vm/frame.h"
#define SWAP_FLAG_BIT 9
#define SHARED_FLAG_BIT 10
#define ADDR_START_BIT 12
struct hash shared_file_pages;
struct lock shared_file_pages_lock;
static unsigned page_hash (const struct hash_elem *e, void *aux UNUSED);
static bool page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED);
static void page_flag_shared (struct thread *owner, void *upage, bool shared);
static unsigned shared_file_page_hash (const struct hash_elem *e,
void *aux UNUSED);
static bool shared_file_page_less (const struct hash_elem *a_,
const struct hash_elem *b_,
void *aux UNUSED);
static struct shared_file_page *shared_file_page_get (struct file *file,
void *upage);
/* Initialise a supplementary page table. */
bool
init_pages (struct hash *pages)
{
ASSERT (pages != NULL);
return hash_init (pages, page_hash, page_less, NULL);
}
/* Hashing function needed for the SPT table. Returns a hash for an entry,
based on its upage. */
unsigned
page_hash (const struct hash_elem *e, UNUSED void *aux)
static unsigned
page_hash (const struct hash_elem *e, void *aux UNUSED)
{
struct page_entry *page = hash_entry (e, struct page_entry, elem);
return hash_ptr (page->upage);
@@ -23,7 +51,7 @@ page_hash (const struct hash_elem *e, UNUSED void *aux)
/* Comparator function for the SPT table. Compares two entries based on their
upages. */
bool
static bool
page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
{
@@ -33,38 +61,132 @@ page_less (const struct hash_elem *a_, const struct hash_elem *b_,
return a->upage < b->upage;
}
/* Allocate and insert a new page entry into the thread's page table. */
struct page_entry *
page_insert (struct file *file, off_t ofs, void *upage, uint32_t read_bytes,
uint32_t zero_bytes, bool writable, enum page_type type)
static void page_flag_swap (uint32_t *pte, bool set);
static void page_set_swap (struct thread *owner, uint32_t *pte,
size_t swap_slot);
// TODO: Deal with NULL malloc returns
/* Swap out 'owner' process's 'upage' stored at 'kpage'. Then, allocate and
insert a new page entry into the user process thread's SPT representing
this swapped out page. */
bool
page_insert_swapped (void *upage, void *kpage, struct list *owners,
struct thread *cur)
{
struct file *exec_file = NULL;
struct list_elem *e;
for (e = list_begin (owners); e != list_end (owners); e = list_next (e))
{
struct thread *owner = list_entry (e, struct frame_owner, elem)->owner;
if (owner != cur)
lock_acquire (&owner->pagedir_lock);
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
if (exec_file != NULL || page_is_shared_pte (pte))
{
ASSERT (page_is_shared_pte (pte));
pagedir_clear_page (owner->pagedir, upage);
exec_file = owner->exec_file;
ASSERT (exec_file != NULL);
if (owner != cur)
lock_release (&owner->pagedir_lock);
continue;
}
ASSERT (list_size (owners) == 1);
/* 1. Initialize swapped page entry. */
struct page_entry *page = page_get (owner, upage);
lock_acquire (&owner->pages_lock);
if (page == NULL)
{
page = malloc (sizeof (struct page_entry));
if (page == NULL)
{
if (owner != cur)
lock_release (&owner->pagedir_lock);
return false;
}
page->upage = upage;
lock_init (&page->lock);
hash_insert (&owner->pages, &page->elem);
}
/* Mark page as 'swapped' and flag the page directory as having
been modified *before* eviction begins to prevent the owner of the
victim page from accessing/modifying it mid-eviction. */
/* TODO: We need to stop the process from destroying pagedir mid-eviction,
as this could render the page table entry invalid. */
page_flag_swap (pte, true);
lock_acquire (&page->lock);
pagedir_clear_page (owner->pagedir, upage);
size_t swap_slot = swap_out (kpage);
page_set_swap (owner, pte, swap_slot);
lock_release (&page->lock);
lock_release (&owner->pages_lock);
if (owner != cur)
lock_release (&owner->pagedir_lock);
}
if (exec_file != NULL)
{
lock_acquire (&shared_file_pages_lock);
struct shared_file_page *sfp = shared_file_page_get (exec_file, upage);
sfp->frame = NULL;
sfp->swap_slot = swap_out (kpage);
lock_release (&shared_file_pages_lock);
}
return true;
}
/* Allocate and insert a new page entry into the user process thread's
SPT representing a file page. */
struct page_entry *
page_insert_file (struct file *file, off_t ofs, void *upage,
uint32_t read_bytes, uint32_t zero_bytes, bool writable,
enum page_type type)
{
/* If page exists, just update it. */
struct page_entry *existing = page_get (thread_current (), upage);
if (existing != NULL)
{
ASSERT (existing->read_bytes == read_bytes);
ASSERT (existing->zero_bytes == zero_bytes);
existing->writable = existing->writable || writable;
return existing;
}
struct page_entry *page = malloc(sizeof (struct page_entry));
if (page == NULL)
return NULL;
page->upage = upage;
page->frame = NULL;
page->type = type;
page->file = file;
page->offset = ofs;
page->upage = upage;
page->read_bytes = read_bytes;
page->zero_bytes = zero_bytes;
page->writable = writable;
page->type = type;
lock_init (&page->lock);
hash_insert (&thread_current ()->pages, &page->elem);
struct thread *t = thread_current ();
lock_acquire (&t->pages_lock);
hash_insert (&t->pages, &page->elem);
lock_release (&t->pages_lock);
return page;
}
/* Gets a page_entry from the starting address of the page. Returns NULL if no
such page_entry exists in the hash map.*/
struct page_entry *
page_get (void *upage)
page_get (struct thread *thread, void *upage)
{
struct page_entry fake_page_entry;
fake_page_entry.upage = upage;
lock_acquire (&thread->pages_lock);
struct hash_elem *e
= hash_find (&thread_current ()->pages, &fake_page_entry.elem);
= hash_find (&thread->pages, &fake_page_entry.elem);
lock_release (&thread->pages_lock);
if (e == NULL)
return NULL;
@@ -72,21 +194,72 @@ page_get (void *upage)
}
bool
page_load (struct page_entry *page, bool writable)
page_load_file (struct page_entry *page)
{
/* Allocate a frame for the page. If a frame allocation fails, then
frame_alloc should try to evict a page. If it is still NULL, the OS
panics as this should not happen if eviction is working correctly. */
struct thread *t = thread_current ();
bool shareable = !page->writable && file_compare (page->file, t->exec_file);
lock_acquire (&t->pagedir_lock);
if (shareable)
{
lock_acquire (&shared_file_pages_lock);
struct shared_file_page *sfp
= shared_file_page_get (page->file, page->upage);
if (sfp != NULL)
{
/* Frame exists, just install it. */
if (sfp->frame != NULL)
{
if (!install_page (page->upage, sfp->frame, page->writable))
{
lock_release (&shared_file_pages_lock);
lock_release (&t->pagedir_lock);
return false;
}
frame_owner_insert (sfp->frame, t);
}
/* Shared page is in swap. Load it. */
else
{
void *frame = frame_alloc (PAL_USER, page->upage, t);
if (frame == NULL)
PANIC ("Could not allocate a frame to load page into memory.");
swap_in (frame, sfp->swap_slot);
if (!install_page (page->upage, frame, false))
{
frame_free (frame);
lock_release (&shared_file_pages_lock);
lock_release (&t->pagedir_lock);
return false;
}
}
page_flag_shared (t, page->upage, true);
if (page->type != PAGE_SHARED)
{
sfp->ref_count++;
page->type = PAGE_SHARED;
}
lock_release (&shared_file_pages_lock);
lock_release (&t->pagedir_lock);
return true;
}
}
void *frame = frame_alloc (PAL_USER, page->upage, t);
pagedir_set_accessed (t->pagedir, page->upage, true);
if (frame == NULL)
PANIC ("Could not allocate a frame to load page into memory.");
/* Map the page to the frame. */
if (!install_page (page->upage, frame, writable))
if (!install_page (page->upage, frame, page->writable))
{
if (shareable)
lock_release (&shared_file_pages_lock);
frame_free (frame);
lock_release (&t->pagedir_lock);
return false;
}
@@ -96,6 +269,9 @@ page_load (struct page_entry *page, bool writable)
file_seek (page->file, page->offset);
if (file_read (page->file, frame, page->read_bytes) != (int) page->read_bytes)
{
if (shareable)
lock_release (&shared_file_pages_lock);
lock_release (&t->pagedir_lock);
frame_free (frame);
return false;
}
@@ -103,8 +279,29 @@ page_load (struct page_entry *page, bool writable)
/* Zero out the remaining bytes in the frame. */
memset (frame + page->read_bytes, 0, page->zero_bytes);
page->frame = frame;
/* If file page is read-only, make it shared. */
if (shareable)
{
struct shared_file_page *sfp = malloc (sizeof (struct shared_file_page));
if (sfp == NULL)
{
lock_release (&shared_file_pages_lock);
lock_release (&t->pagedir_lock);
frame_free (frame);
return false;
}
sfp->file = page->file;
sfp->upage = page->upage;
sfp->frame = frame;
sfp->swap_slot = 0;
sfp->ref_count = 1;
hash_insert (&shared_file_pages, &sfp->elem);
page_flag_shared (t, page->upage, true);
page->type = PAGE_SHARED;
lock_release (&shared_file_pages_lock);
}
lock_release (&t->pagedir_lock);
/* Mark the page as loaded successfully. */
return true;
}
@@ -115,21 +312,44 @@ void
page_cleanup (struct hash_elem *e, void *aux UNUSED)
{
struct page_entry *page = hash_entry (e, struct page_entry, elem);
if (page->frame != NULL)
frame_free (page->frame);
if (page->type == PAGE_SHARED)
{
lock_acquire (&shared_file_pages_lock);
struct shared_file_page *sfp
= shared_file_page_get (page->file, page->upage);
ASSERT (sfp != NULL);
if (sfp->frame != NULL)
frame_owner_remove (sfp->frame, thread_current ());
sfp->ref_count--;
if (sfp->ref_count == 0)
{
hash_delete (&shared_file_pages, &sfp->elem);
if (sfp->frame != NULL)
frame_free (sfp->frame);
else
swap_drop (sfp->swap_slot);
free (sfp);
}
lock_release (&shared_file_pages_lock);
}
free (page);
}
/* Updates the 'owner' thread's page table entry for virtual address 'upage'
to flag the page as being stored in swap, and stores the specified swap slot
value in the entry at the address bits for later retrieval from disk. */
/* Flags the provided page table entry as representing a swapped out page. */
void
page_set_swap (struct thread *owner, void *upage, size_t swap_slot)
{
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
page_flag_swap (uint32_t *pte, bool set)
{
if (set)
*pte |= (1 << SWAP_FLAG_BIT);
else
*pte &= ~(1 << SWAP_FLAG_BIT);
}
/* Sets the address bits of the page table entry to the provided swap slot
value. To be used for later retrieval of the swap slot when page faulting. */
static void
page_set_swap (struct thread *owner, uint32_t *pte, size_t swap_slot)
{
/* Store the provided swap slot in the address bits of the page table
entry, truncating excess bits. */
*pte |= (1 << SWAP_FLAG_BIT);
@@ -145,13 +365,19 @@ bool
page_in_swap (struct thread *owner, void *upage)
{
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
return pte != NULL &&
(*pte & (1 << SWAP_FLAG_BIT)) != 0;
return page_in_swap_pte (pte);
}
/* Returns true iff the page table entry is marked to be in the swap disk. */
bool
page_in_swap_pte (uint32_t *pte)
{
return pte != NULL && (*pte & (1 << SWAP_FLAG_BIT)) != 0;
}
/* Given that the page with user address 'upage' owned by 'owner' is flagged
to be in the swap disk via the owner's page table, returns its stored
swap slot. Otherwise panics the kernel. */
swap slot and marks the PTE as not being in swap. */
size_t
page_get_swap (struct thread *owner, void *upage)
{
@@ -161,5 +387,85 @@ page_get_swap (struct thread *owner, void *upage)
ASSERT ((*pte & PTE_P) == 0);
/* Masks the address bits and returns truncated value. */
page_flag_swap (pte, false);
return ((*pte & PTE_ADDR) >> ADDR_START_BIT);
}
/* Returns the swap slot stored in a PTE. */
size_t
page_get_swap_pte (uint32_t *pte)
{
ASSERT (pte != NULL);
ASSERT ((*pte & PTE_P) == 0);
return ((*pte & PTE_ADDR) >> ADDR_START_BIT);
}
/* Flags the provided page table entry as representing a shared page. */
static void
page_flag_shared (struct thread *owner, void *upage, bool shared)
{
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
ASSERT (pte != NULL);
if (shared)
*pte |= (1 << SHARED_FLAG_BIT);
else
*pte &= ~(1 << SHARED_FLAG_BIT);
}
/* Returns true iff the page table entry is marked to be shared. */
bool
page_is_shared_pte (uint32_t *pte)
{
return pte != NULL && (*pte & (1 << SHARED_FLAG_BIT)) != 0;
}
/* Initializes the shared file pages hash table. */
void
shared_file_pages_init ()
{
if (!hash_init (&shared_file_pages, shared_file_page_hash,
shared_file_page_less, NULL))
PANIC ("Failed to initialize shared file pages hash table.");
lock_init (&shared_file_pages_lock);
}
/* Hash function for shared file pages, used for storing entries in the
shared file pages table. */
static unsigned
shared_file_page_hash (const struct hash_elem *e, void *aux UNUSED)
{
struct shared_file_page *sfp = hash_entry (e, struct shared_file_page, elem);
void *inode = file_get_inode (sfp->file);
void *upage = sfp->upage;
void *bytes[2] = { inode, upage };
return hash_bytes (bytes, sizeof (bytes));
}
/* 'less_func' comparison function for shared file pages, used for comparing
the keys of the shared file pages table. */
static bool
shared_file_page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux UNUSED)
{
const struct shared_file_page *a
= hash_entry (a_, struct shared_file_page, elem);
const struct shared_file_page *b
= hash_entry (b_, struct shared_file_page, elem);
return !file_compare (a->file, b->file) || a->upage < b->upage;
}
static struct shared_file_page *
shared_file_page_get (struct file *file, void *upage)
{
struct shared_file_page fake_sfp;
fake_sfp.file = file;
fake_sfp.upage = upage;
struct hash_elem *e = hash_find (&shared_file_pages, &fake_sfp.elem);
if (e == NULL)
return NULL;
return hash_entry (e, struct shared_file_page, elem);
}

View File

@@ -2,17 +2,25 @@
#define VM_PAGE_H
#include "threads/thread.h"
#include "threads/synch.h"
#include "filesys/off_t.h"
enum page_type {
PAGE_FILE,
PAGE_EMPTY
enum page_type
{
PAGE_EXECUTABLE,
PAGE_MMAP,
PAGE_SHARED
};
struct page_entry {
struct page_entry
{
enum page_type type; /* Type of Data that should go into the page */
void *upage; /* Start Address of the User Page (Key of hash table). */
void *frame; /* Frame Address where the page is loaded. */
/* Data for swapped pages */
struct lock lock; /* Enforces mutual exclusion in accessing the page
referenced by the entry between its owning process
and any thread performing page eviction. */
/* File Data */
struct file *file; /* Pointer to the file for executables. */
@@ -24,18 +32,38 @@ struct page_entry {
struct hash_elem elem; /* An elem for the hash table. */
};
unsigned page_hash (const struct hash_elem *e, void *aux);
bool page_less (const struct hash_elem *a_, const struct hash_elem *b_,
void *aux);
struct page_entry *page_insert (struct file *file, off_t ofs, void *upage,
uint32_t read_bytes, uint32_t zero_bytes,
bool writable, enum page_type type);
struct page_entry *page_get (void *upage);
bool page_load (struct page_entry *page, bool writable);
struct shared_file_page
{
struct file *file; /* The shared file page's source file, used for indexing
the table. */
void *upage; /* The shared page's upage which is the same across all process
using it. Used for indexing the table. */
void *frame; /* Set to the frame address of the page when it is in memory.
Set to NULL when the page is in swap. */
size_t swap_slot; /* Set to the swap_slot of the shared paged if it is
currently in swap. Should not be used when frame is not
NULL.*/
int ref_count; /* Number of processes that are using this shared page. */
struct hash_elem elem; /* AN elem for the hash table. */
};
bool init_pages (struct hash *pages);
bool page_insert_swapped (void *upage, void *kpage, struct list *owners,
struct thread *cur);
struct page_entry *page_insert_file (struct file *file, off_t ofs, void *upage,
uint32_t read_bytes, uint32_t zero_bytes,
bool writable, enum page_type);
struct page_entry *page_get (struct thread *thread, void *upage);
bool page_load_file (struct page_entry *page);
void page_cleanup (struct hash_elem *e, void *aux);
void page_set_swap (struct thread *, void *, size_t);
bool page_in_swap (struct thread *, void *);
size_t page_get_swap (struct thread *, void *);
bool page_in_swap_pte (uint32_t *pte);
size_t page_get_swap (struct thread *owner, void *upage);
size_t page_get_swap_pte (uint32_t *pte);
#endif /* vm/frame.h */
bool page_is_shared_pte (uint32_t *pte);
void shared_file_pages_init (void);
#endif