Compare commits
19 Commits
vm/merged/
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ad4eda5385 | ||
|
|
8d1b4c4994 | ||
|
|
07c0219058 | ||
|
|
5fbabdcec9 | ||
|
|
5e8bdc68e7 | ||
|
|
d039b59b7c | ||
|
|
29c0b93711 | ||
|
77fedd6666
|
|||
|
|
eba8c1ffa8 | ||
|
|
be696ec528 | ||
|
|
7611090253 | ||
|
|
7f058ffc90 | ||
|
c1bc70adad
|
|||
|
|
22f3b0950f | ||
|
f64b92bbfa
|
|||
|
|
3d6e30119b | ||
|
4104d2c852
|
|||
|
|
d389c15828 | ||
|
|
c68fea5249 |
@@ -38,4 +38,3 @@ test_vm:
|
||||
extends: .pintos_tests
|
||||
variables:
|
||||
DIR: vm
|
||||
IGNORE: (tests/vm/pt-grow-stack|tests/vm/pt-grow-pusha|tests/vm/pt-big-stk-obj|tests/vm/pt-overflowstk|tests/vm/pt-write-code2|tests/vm/pt-grow-stk-sc|tests/vm/page-linear|tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-over-stk)
|
||||
|
||||
@@ -736,9 +736,7 @@ init_thread (struct thread *t, const char *name, int nice, int priority,
|
||||
t->recent_cpu = recent_cpu;
|
||||
t->priority = t->base_priority;
|
||||
|
||||
#ifdef VM
|
||||
lock_init (&t->ptable_lock);
|
||||
#endif
|
||||
lock_init (&t->pages_lock);
|
||||
|
||||
old_level = intr_disable ();
|
||||
list_push_back (&all_list, &t->allelem);
|
||||
|
||||
@@ -136,9 +136,7 @@ struct thread
|
||||
struct list_elem elem; /* List element. */
|
||||
|
||||
struct hash pages; /* Table of open user pages. */
|
||||
|
||||
struct lock ptable_lock; /* Protects access to the process's
|
||||
page directory and SPT. */
|
||||
struct lock pages_lock; /* Lock for the supplementary page table. */
|
||||
|
||||
/* Memory mapped files for user virtual memory. */
|
||||
struct hash mmap_files; /* List of memory mapped files. */
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
#include <inttypes.h>
|
||||
#include <stdio.h>
|
||||
#include "stdbool.h"
|
||||
#include "threads/synch.h"
|
||||
#include "userprog/gdt.h"
|
||||
#include "threads/interrupt.h"
|
||||
#include "threads/thread.h"
|
||||
@@ -169,9 +168,6 @@ page_fault (struct intr_frame *f)
|
||||
So we attempt to grow the stack. If this does not work, we check our SPT to
|
||||
see if the page is expected to have data loaded in memory. */
|
||||
void *upage = pg_round_down (fault_addr);
|
||||
|
||||
printf ("FATHER, I FAULT AT %p!\n", fault_addr);
|
||||
|
||||
if (not_present && is_user_vaddr (upage) && upage != NULL)
|
||||
{
|
||||
if (fetch_page (upage, write))
|
||||
@@ -185,11 +181,11 @@ page_fault (struct intr_frame *f)
|
||||
/* If the page fault occurred in kernel mode, then we intentionally indicate
|
||||
a fault (for get_user() etc). */
|
||||
if (!user)
|
||||
{
|
||||
f->eip = (void *)f->eax;
|
||||
f->eax = 0xffffffff;
|
||||
return;
|
||||
}
|
||||
{
|
||||
f->eip = (void *)f->eax;
|
||||
f->eax = 0xffffffff;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/* To implement virtual memory, delete the rest of the function
|
||||
@@ -254,21 +250,17 @@ grow_stack (void *upage)
|
||||
bool
|
||||
fetch_page (void *upage, bool write)
|
||||
{
|
||||
struct thread *t = thread_current ();
|
||||
/* Check if the page is in the supplemental page table. That is, it is a page
|
||||
that is expected to be in memory. */
|
||||
lock_acquire (&t->ptable_lock);
|
||||
struct page_entry *page = page_get (t, upage);
|
||||
lock_release (&t->ptable_lock);
|
||||
struct page_entry *page = page_get (thread_current (), upage);
|
||||
if (page == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
|
||||
/* Check if the non-present user page is in the swap partition.
|
||||
If so, swap it back into main memory, updating the PTE for
|
||||
the faulted virtual address to point to the newly allocated
|
||||
frame. */
|
||||
struct thread *t = thread_current ();
|
||||
if (page_in_swap (t, upage))
|
||||
{
|
||||
/* NOTE: This code should be refactored and moved into helper functions
|
||||
@@ -283,6 +275,9 @@ fetch_page (void *upage, bool write)
|
||||
|
||||
bool writeable = pagedir_is_writable (t->pagedir, upage);
|
||||
|
||||
/* TODO: When this returns false we should quit the page fault,
|
||||
but currently we continue and check the stack conditions in the
|
||||
page fault handler. */
|
||||
return pagedir_set_page (t->pagedir, upage, kpage, writeable);
|
||||
}
|
||||
|
||||
@@ -294,7 +289,7 @@ fetch_page (void *upage, bool write)
|
||||
bool success = false;
|
||||
switch (page->type) {
|
||||
case PAGE_MMAP:
|
||||
case PAGE_FILE:
|
||||
case PAGE_EXECUTABLE:
|
||||
case PAGE_SHARED:
|
||||
success = page_load_file (page);
|
||||
break;
|
||||
|
||||
@@ -371,7 +371,9 @@ process_exit (void)
|
||||
hash_destroy (&cur->open_files, fd_cleanup);
|
||||
|
||||
/* Clean up the thread's supplemental page table. */
|
||||
lock_acquire (&cur->pages_lock);
|
||||
hash_destroy (&cur->pages, page_cleanup);
|
||||
lock_release (&cur->pages_lock);
|
||||
|
||||
/* Close the executable file, implicitly allowing it to be written to. */
|
||||
if (cur->exec_file != NULL)
|
||||
@@ -715,7 +717,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
|
||||
|
||||
/* Add the page metadata to the SPT to be lazy loaded later on */
|
||||
if (page_insert_file (file, ofs, upage, page_read_bytes, page_zero_bytes,
|
||||
writable, PAGE_FILE) == NULL)
|
||||
writable, PAGE_EXECUTABLE) == NULL)
|
||||
return false;
|
||||
|
||||
/* Advance. */
|
||||
|
||||
@@ -460,15 +460,10 @@ syscall_mmap (int fd, void *addr)
|
||||
|
||||
/* Check and ensure that there is enough space in the user virtual memory to
|
||||
hold the entire file. */
|
||||
lock_acquire (&thread_current ()->ptable_lock);
|
||||
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
|
||||
{
|
||||
if (page_get (thread_current (), addr + ofs) != NULL)
|
||||
{
|
||||
lock_release (&thread_current ()->ptable_lock);
|
||||
return MMAP_FAILURE;
|
||||
}
|
||||
}
|
||||
return MMAP_FAILURE;
|
||||
|
||||
|
||||
/* Map the file data into the user virtual memory starting from addr. */
|
||||
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
|
||||
@@ -477,20 +472,15 @@ syscall_mmap (int fd, void *addr)
|
||||
off_t zero_bytes = PGSIZE - read_bytes;
|
||||
|
||||
if (page_insert_file (file, ofs, addr + ofs, read_bytes, zero_bytes, true,
|
||||
PAGE_FILE) == NULL)
|
||||
{
|
||||
lock_release (&thread_current ()->ptable_lock);
|
||||
return MMAP_FAILURE;
|
||||
}
|
||||
PAGE_MMAP) == NULL)
|
||||
return MMAP_FAILURE;
|
||||
}
|
||||
lock_release (&thread_current ()->ptable_lock);
|
||||
|
||||
/* Create a new mapping for the file. */
|
||||
struct mmap_entry *mmap = mmap_insert (file, addr);
|
||||
if (mmap == NULL)
|
||||
return MMAP_FAILURE;
|
||||
|
||||
|
||||
return mmap->mapping;
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
#include "page.h"
|
||||
#include "filesys/file.h"
|
||||
#include "threads/malloc.h"
|
||||
#include "threads/thread.h"
|
||||
#include "threads/vaddr.h"
|
||||
#include "userprog/pagedir.h"
|
||||
#include "userprog/syscall.h"
|
||||
@@ -29,9 +28,6 @@ struct list lru_list;
|
||||
victim. Otherwise, the next element in the queue is similarly considered. */
|
||||
struct list_elem *next_victim = NULL;
|
||||
|
||||
/* Synchronisation variables. */
|
||||
/* Protects access to 'lru_list'. */
|
||||
struct lock lru_lock;
|
||||
|
||||
struct frame_metadata
|
||||
{
|
||||
@@ -44,8 +40,8 @@ struct frame_metadata
|
||||
within 'frame_table', whose key is the
|
||||
kernel virtual address of the frame. */
|
||||
struct list_elem list_elem; /* Tracks the position of the frame metadata
|
||||
in either the 'active' or 'inactive' list,
|
||||
so a victim can be chosen for eviction. */
|
||||
within 'lru_list', so a victim can be
|
||||
chosen for eviction. */
|
||||
};
|
||||
|
||||
hash_hash_func frame_metadata_hash;
|
||||
@@ -67,7 +63,7 @@ frame_init (void)
|
||||
hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
|
||||
|
||||
list_init (&lru_list);
|
||||
lock_init (&lru_lock);
|
||||
lock_init (&ftable_lock);
|
||||
}
|
||||
|
||||
/* TODO: Consider synchronisation more closely (i.e. just for hash
|
||||
@@ -83,7 +79,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||
struct frame_metadata *frame_metadata;
|
||||
flags |= PAL_USER;
|
||||
|
||||
lock_acquire (&lru_lock);
|
||||
lock_acquire (&ftable_lock);
|
||||
void *frame = palloc_get_page (flags);
|
||||
|
||||
/* If a frame couldn't be allocated we must be out of main memory. Thus,
|
||||
@@ -99,7 +95,6 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||
ASSERT (victim != NULL); /* get_victim () should never return null. */
|
||||
|
||||
/* 2. Handle victim page writing based on its type. */
|
||||
lock_acquire (&owner->ptable_lock);
|
||||
struct page_entry *victim_page = page_get (thread_current (), victim->upage);
|
||||
if (victim_page != NULL && victim_page->type == PAGE_MMAP)
|
||||
{
|
||||
@@ -111,7 +106,6 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||
file_write_at (victim_page->file, victim->upage,
|
||||
victim_page->read_bytes, victim_page->offset);
|
||||
lock_release (&filesys_lock);
|
||||
lock_release (&owner->ptable_lock);
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -128,7 +122,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||
memset (victim->frame, 0, PGSIZE);
|
||||
|
||||
/* 3. Indicate that the new frame's metadata will be stored
|
||||
inside the same structure that stored the victim's metadata.
|
||||
inside the same structure that stored the victim's metadata.frame.c
|
||||
As both the new frame and the victim frame share the same kernel
|
||||
virtual address, the hash map need not be updated, and neither
|
||||
the list_elem value as both share the same lru_list position. */
|
||||
@@ -139,7 +133,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||
we must update the frame table with a new entry, and grow lru_list. */
|
||||
else
|
||||
{
|
||||
/* Must own lru_lock here, as otherwise there is a race condition
|
||||
/* Must own ftable_lock here, as otherwise there is a race condition
|
||||
with next_victim either being NULL or uninitialized. */
|
||||
frame_metadata = malloc (sizeof (struct frame_metadata));
|
||||
if (frame_metadata == NULL)
|
||||
@@ -171,30 +165,36 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||
list_push_back (&frame_metadata->owners, &frame_owner->elem);
|
||||
frame_metadata->upage = upage;
|
||||
frame_metadata->pinned = false;
|
||||
lock_release (&lru_lock);
|
||||
lock_release (&ftable_lock);
|
||||
return frame_metadata->frame;
|
||||
}
|
||||
|
||||
void
|
||||
frame_pin (void *frame)
|
||||
{
|
||||
ASSERT (frame != NULL);
|
||||
lock_acquire (&ftable_lock);
|
||||
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
|
||||
if (frame_metadata == NULL)
|
||||
PANIC ("Attempted to pin a frame at an unallocated kernel address '%p'\n",
|
||||
frame);
|
||||
|
||||
frame_metadata->pinned = true;
|
||||
lock_release (&ftable_lock);
|
||||
}
|
||||
|
||||
void
|
||||
frame_unpin (void *frame)
|
||||
{
|
||||
ASSERT (frame != NULL);
|
||||
lock_acquire (&ftable_lock);
|
||||
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
|
||||
if (frame_metadata == NULL)
|
||||
PANIC ("Attempted to unpin a frame at an unallocated kernel address '%p'\n",
|
||||
frame);
|
||||
|
||||
frame_metadata->pinned = false;
|
||||
lock_release (&ftable_lock);
|
||||
}
|
||||
|
||||
/* Attempt to deallocate a frame for a user process by removing it from the
|
||||
@@ -210,7 +210,7 @@ frame_free (void *frame)
|
||||
frame);
|
||||
|
||||
free_owners (&frame_metadata->owners);
|
||||
lock_acquire (&lru_lock);
|
||||
lock_acquire (&ftable_lock);
|
||||
hash_delete (&frame_table, &frame_metadata->hash_elem);
|
||||
list_remove (&frame_metadata->list_elem);
|
||||
|
||||
@@ -224,7 +224,7 @@ frame_free (void *frame)
|
||||
else
|
||||
next_victim = lru_next (next_victim);
|
||||
}
|
||||
lock_release (&lru_lock);
|
||||
lock_release (&ftable_lock);
|
||||
|
||||
free (frame_metadata);
|
||||
palloc_free_page (frame);
|
||||
@@ -241,6 +241,7 @@ frame_owner_insert (void *frame, struct thread *owner)
|
||||
struct frame_owner *frame_owner = malloc (sizeof (struct frame_owner));
|
||||
if (frame_owner == NULL)
|
||||
return false;
|
||||
|
||||
frame_owner->owner = owner;
|
||||
list_push_back (&frame_metadata->owners, &frame_owner->elem);
|
||||
return true;
|
||||
@@ -263,6 +264,7 @@ frame_owner_remove (void *frame, struct thread *owner)
|
||||
{
|
||||
struct frame_owner *frame_owner
|
||||
= list_entry (oe, struct frame_owner, elem);
|
||||
|
||||
oe = list_next (oe);
|
||||
if (frame_owner->owner == owner)
|
||||
{
|
||||
@@ -284,13 +286,12 @@ frame_metadata_find (void *frame)
|
||||
struct hash_elem *e = hash_find (&frame_table, &key_metadata.hash_elem);
|
||||
if (e == NULL)
|
||||
return NULL;
|
||||
|
||||
return hash_entry (e, struct frame_metadata, hash_elem);
|
||||
}
|
||||
|
||||
/* Obtain the next frame that should be evicted following the clock (second
|
||||
chance) algorithm, ignoring pinned frames. A pre-condition for calling this
|
||||
function is that the calling thread owns lru_lock and that lru_list is
|
||||
non-empty. */
|
||||
/* A pre-condition for calling this function is that the calling thread
|
||||
owns ftable_lock and that lru_list is non-empty. */
|
||||
static struct frame_metadata *
|
||||
get_victim (void)
|
||||
{
|
||||
@@ -300,7 +301,6 @@ get_victim (void)
|
||||
while (!found)
|
||||
{
|
||||
frame_metadata = list_entry (ve, struct frame_metadata, list_elem);
|
||||
|
||||
ve = lru_next (ve);
|
||||
struct list_elem *oe;
|
||||
|
||||
@@ -315,10 +315,6 @@ get_victim (void)
|
||||
{
|
||||
struct frame_owner *frame_owner
|
||||
= list_entry (oe, struct frame_owner, elem);
|
||||
|
||||
lock_acquire (&frame_owner->owner->ptable_lock);
|
||||
/* TODO: Account for death of frame_owner here! */
|
||||
|
||||
uint32_t *pd = frame_owner->owner->pagedir;
|
||||
void *upage = frame_metadata->upage;
|
||||
|
||||
@@ -327,8 +323,6 @@ get_victim (void)
|
||||
found = false;
|
||||
pagedir_set_accessed (pd, upage, false);
|
||||
}
|
||||
|
||||
lock_release (&frame_owner->owner->ptable_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -383,7 +377,6 @@ frame_metadata_get (void *frame)
|
||||
|
||||
struct hash_elem *e = hash_find (&frame_table, &key_metadata.hash_elem);
|
||||
if (e == NULL) return NULL;
|
||||
|
||||
return hash_entry (e, struct frame_metadata, hash_elem);
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,12 @@ struct frame_owner
|
||||
struct list_elem elem; /* List element for the list of owners. */
|
||||
};
|
||||
|
||||
/* Synchronisation variables. */
|
||||
/* Protects access to the frame table and its related components. */
|
||||
struct lock ftable_lock;
|
||||
|
||||
void frame_init (void);
|
||||
|
||||
void *frame_alloc (enum palloc_flags, void *, struct thread *);
|
||||
void frame_pin (void *frame);
|
||||
void frame_unpin (void *frame);
|
||||
|
||||
@@ -66,29 +66,28 @@ mmap_unmap (struct mmap_entry *mmap)
|
||||
/* Free all the pages associated with the mapping, writing back to the file
|
||||
if necessary. */
|
||||
off_t length = file_length (mmap->file);
|
||||
lock_acquire (&thread_current ()->ptable_lock);
|
||||
for (off_t ofs = 0; ofs < length; ofs += PGSIZE)
|
||||
{
|
||||
void *upage = mmap->upage + ofs;
|
||||
|
||||
/* Get the SPT page entry for this page. */
|
||||
struct page_entry *page = page_get(thread_current (), upage);
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
||||
/* Write the page back to the file if it is dirty. */
|
||||
if (pagedir_is_dirty (thread_current ()->pagedir, upage))
|
||||
{
|
||||
lock_acquire (&filesys_lock);
|
||||
file_write_at (mmap->file, upage, page->read_bytes, ofs);
|
||||
lock_release (&filesys_lock);
|
||||
void *upage = mmap->upage + ofs;
|
||||
|
||||
/* Get the SPT page entry for this page. */
|
||||
struct page_entry *page = page_get(thread_current (), upage);
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
||||
/* Write the page back to the file if it is dirty. */
|
||||
if (pagedir_is_dirty (thread_current ()->pagedir, upage))
|
||||
{
|
||||
lock_acquire (&filesys_lock);
|
||||
file_write_at (mmap->file, upage, page->read_bytes, ofs);
|
||||
lock_release (&filesys_lock);
|
||||
}
|
||||
|
||||
/* Remove the page from the supplemental page table. */
|
||||
hash_delete (&thread_current ()->pages, &page->elem);
|
||||
}
|
||||
|
||||
/* Remove the page from the supplemental page table. */
|
||||
hash_delete (&thread_current ()->pages, &page->elem);
|
||||
}
|
||||
lock_release (&thread_current ()->ptable_lock);
|
||||
|
||||
/* Close the file and free the mmap entry. */
|
||||
file_close (mmap->file);
|
||||
free (mmap);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
#include "threads/palloc.h"
|
||||
#include "threads/synch.h"
|
||||
#include "devices/swap.h"
|
||||
#include "threads/thread.h"
|
||||
#include "userprog/process.h"
|
||||
#include "userprog/pagedir.h"
|
||||
#include "vm/frame.h"
|
||||
@@ -78,11 +77,6 @@ page_insert_swapped (void *upage, void *kpage, struct list *owners)
|
||||
for (e = list_begin (owners); e != list_end (owners); e = list_next (e))
|
||||
{
|
||||
struct thread *owner = list_entry (e, struct frame_owner, elem)->owner;
|
||||
if (!lock_held_by_current_thread (&owner->ptable_lock))
|
||||
{
|
||||
lock_acquire (&owner->ptable_lock);
|
||||
}
|
||||
|
||||
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
|
||||
if (exec_file != NULL || page_is_shared_pte (pte))
|
||||
{
|
||||
@@ -90,14 +84,13 @@ page_insert_swapped (void *upage, void *kpage, struct list *owners)
|
||||
pagedir_clear_page (owner->pagedir, upage);
|
||||
exec_file = owner->exec_file;
|
||||
ASSERT (exec_file != NULL);
|
||||
|
||||
lock_release (&owner->ptable_lock);
|
||||
continue;
|
||||
}
|
||||
ASSERT (list_size (owners) == 1);
|
||||
|
||||
/* 1. Initialize swapped page entry. */
|
||||
struct page_entry *page = page_get (owner, upage);
|
||||
lock_acquire (&owner->pages_lock);
|
||||
if (page == NULL)
|
||||
{
|
||||
page = malloc (sizeof (struct page_entry));
|
||||
@@ -107,7 +100,6 @@ page_insert_swapped (void *upage, void *kpage, struct list *owners)
|
||||
lock_init (&page->lock);
|
||||
hash_insert (&owner->pages, &page->elem);
|
||||
}
|
||||
lock_release (&owner->ptable_lock);
|
||||
|
||||
/* Mark page as 'swapped' and flag the page directory as having
|
||||
been modified *before* eviction begins to prevent the owner of the
|
||||
@@ -122,6 +114,7 @@ page_insert_swapped (void *upage, void *kpage, struct list *owners)
|
||||
page_set_swap (owner, pte, swap_slot);
|
||||
|
||||
lock_release (&page->lock);
|
||||
lock_release (&owner->pages_lock);
|
||||
}
|
||||
if (exec_file != NULL)
|
||||
{
|
||||
@@ -141,24 +134,15 @@ page_insert_file (struct file *file, off_t ofs, void *upage,
|
||||
uint32_t read_bytes, uint32_t zero_bytes, bool writable,
|
||||
enum page_type type)
|
||||
{
|
||||
bool ptlock_held =
|
||||
lock_held_by_current_thread(&thread_current ()->ptable_lock);
|
||||
|
||||
/* If page exists, just update it. */
|
||||
if (!ptlock_held)
|
||||
lock_acquire (&thread_current ()->ptable_lock);
|
||||
|
||||
struct page_entry *existing = page_get (thread_current (), upage);
|
||||
|
||||
if (existing != NULL)
|
||||
{
|
||||
ASSERT (existing->read_bytes == read_bytes);
|
||||
ASSERT (existing->zero_bytes == zero_bytes);
|
||||
existing->writable = existing->writable || writable;
|
||||
lock_release (&thread_current ()->ptable_lock);
|
||||
return existing;
|
||||
}
|
||||
lock_release (&thread_current ()->ptable_lock);
|
||||
|
||||
struct page_entry *page = malloc(sizeof (struct page_entry));
|
||||
if (page == NULL)
|
||||
@@ -173,36 +157,29 @@ page_insert_file (struct file *file, off_t ofs, void *upage,
|
||||
page->writable = writable;
|
||||
lock_init (&page->lock);
|
||||
|
||||
lock_acquire (&thread_current ()->ptable_lock);
|
||||
hash_insert (&thread_current ()->pages, &page->elem);
|
||||
|
||||
if (!ptlock_held)
|
||||
lock_release (&thread_current ()->ptable_lock);
|
||||
|
||||
struct thread *t = thread_current ();
|
||||
lock_acquire (&t->pages_lock);
|
||||
hash_insert (&t->pages, &page->elem);
|
||||
lock_release (&t->pages_lock);
|
||||
return page;
|
||||
}
|
||||
|
||||
/* Gets a page_entry from the starting address of the page. Returns NULL if no
|
||||
such page_entry exists in the hash map. Must only be called on a thread
|
||||
whose ptable_lock you own. */
|
||||
such page_entry exists in the hash map.*/
|
||||
struct page_entry *
|
||||
page_get (struct thread *thread, void *upage)
|
||||
{
|
||||
ASSERT (lock_held_by_current_thread (&thread->ptable_lock));
|
||||
|
||||
struct page_entry fake_page_entry;
|
||||
fake_page_entry.upage = upage;
|
||||
|
||||
lock_acquire (&thread->pages_lock);
|
||||
struct hash_elem *e
|
||||
= hash_find (&thread->pages, &fake_page_entry.elem);
|
||||
lock_release (&thread->pages_lock);
|
||||
if (e == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct page_entry *pe = hash_entry (e, struct page_entry, elem);
|
||||
|
||||
return pe;
|
||||
return hash_entry (e, struct page_entry, elem);
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -213,12 +190,12 @@ page_load_file (struct page_entry *page)
|
||||
panics as this should not happen if eviction is working correctly. */
|
||||
struct thread *t = thread_current ();
|
||||
bool shareable = !page->writable && file_compare (page->file, t->exec_file);
|
||||
shareable = false;
|
||||
if (shareable)
|
||||
{
|
||||
lock_acquire (&shared_file_pages_lock);
|
||||
struct shared_file_page *sfp
|
||||
= shared_file_page_get (page->file, page->upage);
|
||||
|
||||
if (sfp != NULL)
|
||||
{
|
||||
/* Frame exists, just install it. */
|
||||
@@ -229,13 +206,10 @@ page_load_file (struct page_entry *page)
|
||||
lock_release (&shared_file_pages_lock);
|
||||
return false;
|
||||
}
|
||||
/* First time adding the shared page, so add thread as owner. */
|
||||
if (page->type != PAGE_SHARED)
|
||||
{
|
||||
frame_owner_insert (sfp->frame, t);
|
||||
}
|
||||
frame_owner_insert (sfp->frame, t);
|
||||
}
|
||||
/* Shared page is in swap. Load it. */
|
||||
|
||||
/* Otherwise, shared page is in swap. Load it. */
|
||||
else
|
||||
{
|
||||
void *frame = frame_alloc (PAL_USER, page->upage, t);
|
||||
@@ -251,6 +225,7 @@ page_load_file (struct page_entry *page)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
page_flag_shared (t, page->upage, true);
|
||||
if (page->type != PAGE_SHARED)
|
||||
{
|
||||
@@ -348,12 +323,12 @@ page_cleanup (struct hash_elem *e, void *aux UNUSED)
|
||||
/* Flags the provided page table entry as representing a swapped out page. */
|
||||
void
|
||||
page_flag_swap (uint32_t *pte, bool set)
|
||||
{
|
||||
if (set)
|
||||
*pte |= (1 << SWAP_FLAG_BIT);
|
||||
else
|
||||
*pte &= ~(1 << SWAP_FLAG_BIT);
|
||||
}
|
||||
{
|
||||
if (set)
|
||||
*pte |= (1 << SWAP_FLAG_BIT);
|
||||
else
|
||||
*pte &= ~(1 << SWAP_FLAG_BIT);
|
||||
}
|
||||
|
||||
/* Sets the address bits of the page table entry to the provided swap slot
|
||||
value. To be used for later retrieval of the swap slot when page faulting. */
|
||||
|
||||
@@ -7,9 +7,8 @@
|
||||
|
||||
enum page_type
|
||||
{
|
||||
PAGE_FILE,
|
||||
PAGE_EXECUTABLE,
|
||||
PAGE_MMAP,
|
||||
PAGE_EMPTY,
|
||||
PAGE_SHARED
|
||||
};
|
||||
|
||||
@@ -35,13 +34,18 @@ struct page_entry
|
||||
|
||||
struct shared_file_page
|
||||
{
|
||||
struct file *file;
|
||||
void *upage;
|
||||
void *frame;
|
||||
size_t swap_slot;
|
||||
int ref_count;
|
||||
struct file *file; /* The shared file page's source file, used for indexing
|
||||
the table. */
|
||||
void *upage; /* The shared page's upage which is the same across all process
|
||||
using it. Used for indexing the table. */
|
||||
void *frame; /* Set to the frame address of the page when it is in memory.
|
||||
Set to NULL when the page is in swap. */
|
||||
size_t swap_slot; /* Set to the swap_slot of the shared paged if it is
|
||||
currently in swap. Should not be used when frame is not
|
||||
NULL.*/
|
||||
int ref_count; /* Number of processes that are using this shared page. */
|
||||
|
||||
struct hash_elem elem;
|
||||
struct hash_elem elem; /* AN elem for the hash table. */
|
||||
};
|
||||
|
||||
bool init_pages (struct hash *pages);
|
||||
|
||||
Reference in New Issue
Block a user