Maintain a reference to the frame allocated in the SPT
This commit is contained in:
@@ -262,7 +262,12 @@ fetch_page (void *upage, bool write)
|
||||
|
||||
bool writeable = pagedir_is_writable (t->pagedir, upage);
|
||||
if (pagedir_set_page (t->pagedir, upage, kpage, writeable))
|
||||
{
|
||||
struct page_entry *page = page_get(upage);
|
||||
if (page != NULL)
|
||||
page->frame = kpage;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if the page is in the supplemental page table. That is, it is a page
|
||||
|
||||
323
src/vm/frame.c
323
src/vm/frame.c
@@ -2,7 +2,7 @@
|
||||
#include <hash.h>
|
||||
#include <list.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <stdio.h>
|
||||
#include "frame.h"
|
||||
#include "page.h"
|
||||
#include "threads/malloc.h"
|
||||
@@ -11,151 +11,117 @@
|
||||
#include "threads/synch.h"
|
||||
#include "devices/swap.h"
|
||||
|
||||
/* Hash table that maps every active frame's kernel virtual address
|
||||
to its corresponding 'frame_metadata'.*/
|
||||
struct frame_entry
|
||||
{
|
||||
void *frame;
|
||||
void *upage;
|
||||
struct thread *owner;
|
||||
bool pinned;
|
||||
|
||||
struct hash_elem hash_elem;
|
||||
struct list_elem list_elem;
|
||||
};
|
||||
|
||||
struct hash frame_table;
|
||||
|
||||
/* Linked list used to represent the circular queue in the 'clock'
|
||||
algorithm for page eviction. Iterating from the element that is
|
||||
currently pointed at by 'next_victim' yields an ordering of the entries
|
||||
from oldest to newest (in terms of when they were added or checked
|
||||
for having been referenced by a process). */
|
||||
struct lock frame_lock;
|
||||
struct list lru_list;
|
||||
struct list_elem *next_victim;
|
||||
|
||||
/* The next element in lru_list to be considered for eviction (oldest added
|
||||
or referenced page in the circular queue). If this page has has an
|
||||
'accessed' bit of 0 when considering eviction, then it will be the next
|
||||
victim. Otherwise, the next element in the queue is similarly considered. */
|
||||
struct list_elem *next_victim = NULL;
|
||||
|
||||
/* Synchronisation variables. */
|
||||
/* Protects access to 'lru_list'. */
|
||||
struct lock lru_lock;
|
||||
|
||||
struct frame_metadata
|
||||
{
|
||||
void *frame; /* The kernel virtual address holding the frame. */
|
||||
void *upage; /* The user virtual address pointing to the frame. */
|
||||
struct thread *owner; /* Pointer to the thread that owns the frame. */
|
||||
bool pinned;
|
||||
|
||||
struct hash_elem hash_elem; /* Tracks the position of the frame metadata
|
||||
within 'frame_table', whose key is the
|
||||
kernel virtual address of the frame. */
|
||||
struct list_elem list_elem; /* Tracks the position of the frame metadata
|
||||
in either the 'active' or 'inactive' list,
|
||||
so a victim can be chosen for eviction. */
|
||||
};
|
||||
|
||||
hash_hash_func frame_metadata_hash;
|
||||
hash_less_func frame_metadata_less;
|
||||
hash_hash_func frame_hash;
|
||||
hash_less_func frame_less;
|
||||
|
||||
static struct frame_entry *frame_get (void *frame);
|
||||
static struct frame_entry *get_victim (void);
|
||||
static struct list_elem *lru_next (struct list_elem *e);
|
||||
static struct list_elem *lru_prev (struct list_elem *e);
|
||||
static struct frame_metadata *frame_metadata_get (void *frame);
|
||||
static struct frame_metadata *get_victim (void);
|
||||
|
||||
/* Initialize the frame system by initializing the frame (hash) table with
|
||||
the frame_metadata hashing and comparison functions, as well as initializing
|
||||
'lru_list' and its associated synchronisation primitives. */
|
||||
void
|
||||
frame_init (void)
|
||||
{
|
||||
hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
|
||||
|
||||
hash_init (&frame_table, frame_hash, frame_less, NULL);
|
||||
lock_init (&frame_lock);
|
||||
list_init (&lru_list);
|
||||
lock_init (&lru_lock);
|
||||
}
|
||||
|
||||
/* TODO: Consider synchronisation more closely (i.e. just for hash
|
||||
table). */
|
||||
/* Attempt to allocate a frame for a user process, either by direct
|
||||
allocation of a user page if there is sufficient RAM, or by
|
||||
evicting a currently active page if memory allocated for user
|
||||
processes is fulled and storing it in swap. If swap is full in
|
||||
the former case, panic the kernel. */
|
||||
void *
|
||||
frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||
{
|
||||
struct frame_metadata *frame_metadata;
|
||||
lock_acquire (&frame_lock);
|
||||
struct frame_entry *frame_metadata;
|
||||
flags |= PAL_USER;
|
||||
|
||||
lock_acquire (&lru_lock);
|
||||
|
||||
void *frame = palloc_get_page (flags);
|
||||
|
||||
/* If a frame couldn't be allocated we must be out of main memory. Thus,
|
||||
obtain a victim page to replace with our page, and swap the victim
|
||||
into disk. */
|
||||
if (frame == NULL)
|
||||
{
|
||||
/* 1. Obtain victim. */
|
||||
if (next_victim == NULL)
|
||||
PANIC ("Couldn't allocate a single page to main memory!\n");
|
||||
{
|
||||
if (next_victim == NULL)
|
||||
PANIC ("Couldn't allocate a single page to main memory!\n");
|
||||
|
||||
struct frame_metadata *victim = get_victim ();
|
||||
ASSERT (victim != NULL); /* get_victim () should never return null. */
|
||||
struct frame_entry *victim = get_victim ();
|
||||
ASSERT (victim != NULL); /* get_victim () should never return null. */
|
||||
|
||||
/* 2. Swap out victim into disk. */
|
||||
/* Mark page as 'not present' and flag the page directory as having
|
||||
been modified *before* eviction begins to prevent the owner of the
|
||||
victim page from accessing/modifying it mid-eviction. */
|
||||
pagedir_clear_page (victim->owner->pagedir, victim->upage);
|
||||
/* 2. Swap out victim into disk. */
|
||||
/* Mark page as 'not present' and flag the page directory as having
|
||||
been modified *before* eviction begins to prevent the owner of the
|
||||
victim page from accessing/modifying it mid-eviction. */
|
||||
pagedir_clear_page (victim->owner->pagedir, victim->upage);
|
||||
|
||||
// TODO: Lock PTE of victim page for victim process.
|
||||
// TODO: Lock PTE of victim page for victim process.
|
||||
|
||||
size_t swap_slot = swap_out (victim->frame);
|
||||
page_set_swap (victim->owner, victim->upage, swap_slot);
|
||||
size_t swap_slot = swap_out (victim->frame);
|
||||
page_set_swap (victim->owner, victim->upage, swap_slot);
|
||||
|
||||
/* If zero flag is set, zero out the victim page. */
|
||||
if (flags & PAL_ZERO)
|
||||
memset (victim->frame, 0, PGSIZE);
|
||||
/* If zero flag is set, zero out the victim page. */
|
||||
if (flags & PAL_ZERO)
|
||||
memset (victim->frame, 0, PGSIZE);
|
||||
|
||||
/* 3. Indicate that the new frame's metadata will be stored
|
||||
inside the same structure that stored the victim's metadata.
|
||||
As both the new frame and the victim frame share the same kernel
|
||||
virtual address, the hash map need not be updated, and neither
|
||||
the list_elem value as both share the same lru_list position. */
|
||||
frame_metadata = victim;
|
||||
}
|
||||
/* 3. Indicate that the new frame's metadata will be stored
|
||||
inside the same structure that stored the victim's metadata.
|
||||
As both the new frame and the victim frame share the same kernel
|
||||
virtual address, the hash map need not be updated, and neither
|
||||
the list_elem value as both share the same lru_list position. */
|
||||
frame_metadata = victim;
|
||||
}
|
||||
|
||||
/* If sufficient main memory allows the frame to be directly allocated,
|
||||
we must update the frame table with a new entry, and grow lru_list. */
|
||||
/* If sufficient main memory allows the frame to be directly allocated,
|
||||
we must update the frame table with a new entry, and grow lru_list. */
|
||||
else
|
||||
{
|
||||
/* Must own lru_lock here, as otherwise there is a race condition
|
||||
with next_victim either being NULL or uninitialized. */
|
||||
frame_metadata = malloc (sizeof (struct frame_entry));
|
||||
frame_metadata->frame = frame;
|
||||
|
||||
/* Newly allocated frames are pushed to the back of the circular queue
|
||||
represented by lru_list. Must explicitly handle the case where the
|
||||
circular queue is empty (when next_victim == NULL). */
|
||||
if (next_victim == NULL)
|
||||
{
|
||||
/* Must own lru_lock here, as otherwise there is a race condition
|
||||
with next_victim either being NULL or uninitialized. */
|
||||
frame_metadata = malloc (sizeof (struct frame_metadata));
|
||||
frame_metadata->frame = frame;
|
||||
|
||||
/* Newly allocated frames are pushed to the back of the circular queue
|
||||
represented by lru_list. Must explicitly handle the case where the
|
||||
circular queue is empty (when next_victim == NULL). */
|
||||
if (next_victim == NULL)
|
||||
{
|
||||
list_push_back (&lru_list, &frame_metadata->list_elem);
|
||||
next_victim = &frame_metadata->list_elem;
|
||||
}
|
||||
else
|
||||
{
|
||||
struct list_elem *lru_tail = lru_prev (next_victim);
|
||||
list_insert (lru_tail, &frame_metadata->list_elem);
|
||||
}
|
||||
|
||||
hash_insert (&frame_table, &frame_metadata->hash_elem);
|
||||
list_push_back (&lru_list, &frame_metadata->list_elem);
|
||||
next_victim = &frame_metadata->list_elem;
|
||||
}
|
||||
else
|
||||
{
|
||||
struct list_elem *lru_tail = lru_prev (next_victim);
|
||||
list_insert (lru_tail, &frame_metadata->list_elem);
|
||||
}
|
||||
|
||||
hash_insert (&frame_table, &frame_metadata->hash_elem);
|
||||
}
|
||||
|
||||
frame_metadata->upage = upage;
|
||||
frame_metadata->owner = owner;
|
||||
frame_metadata->pinned = false;
|
||||
lock_release (&lru_lock);
|
||||
|
||||
return frame_metadata->frame;
|
||||
void *frame_addr = frame_metadata->frame;
|
||||
lock_release (&frame_lock);
|
||||
return frame_addr;
|
||||
}
|
||||
|
||||
void
|
||||
frame_pin (void *frame)
|
||||
{
|
||||
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
|
||||
struct frame_entry *frame_metadata = frame_get (frame);
|
||||
if (frame_metadata == NULL)
|
||||
PANIC ("Attempted to pin a frame at an unallocated kernel address '%p'\n",
|
||||
frame);
|
||||
@@ -166,7 +132,7 @@ frame_pin (void *frame)
|
||||
void
|
||||
frame_unpin (void *frame)
|
||||
{
|
||||
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
|
||||
struct frame_entry *frame_metadata = frame_get (frame);
|
||||
if (frame_metadata == NULL)
|
||||
PANIC ("Attempted to unpin a frame at an unallocated kernel address '%p'\n",
|
||||
frame);
|
||||
@@ -179,104 +145,111 @@ frame_unpin (void *frame)
|
||||
memory & metadata struct. Panics if the frame isn't active in memory. */
|
||||
void
|
||||
frame_free (void *frame)
|
||||
{
|
||||
lock_acquire(&frame_lock);
|
||||
struct frame_entry key_metadata;
|
||||
key_metadata.frame = frame;
|
||||
|
||||
struct hash_elem *e =
|
||||
hash_delete (&frame_table, &key_metadata.hash_elem);
|
||||
if (e == NULL)
|
||||
return;
|
||||
|
||||
struct frame_entry *frame_metadata =
|
||||
hash_entry (e, struct frame_entry, hash_elem);
|
||||
|
||||
struct page_entry *page = page_get (frame_metadata->upage);
|
||||
if (page != NULL)
|
||||
{
|
||||
struct frame_metadata key_metadata;
|
||||
key_metadata.frame = frame;
|
||||
|
||||
struct hash_elem *e =
|
||||
hash_delete (&frame_table, &key_metadata.hash_elem);
|
||||
if (e == NULL) PANIC ("Attempted to free a frame at kernel address %p, "
|
||||
"but this address is not allocated!\n", frame);
|
||||
|
||||
struct frame_metadata *frame_metadata =
|
||||
hash_entry (e, struct frame_metadata, hash_elem);
|
||||
|
||||
lock_acquire (&lru_lock);
|
||||
list_remove (&frame_metadata->list_elem);
|
||||
|
||||
/* If we're freeing the frame marked as the next victim, update
|
||||
next_victim to either be the next least recently used page, or NULL
|
||||
if no pages are loaded in main memory. */
|
||||
if (&frame_metadata->list_elem == next_victim)
|
||||
{
|
||||
if (list_empty (&lru_list))
|
||||
next_victim = NULL;
|
||||
else
|
||||
next_victim = lru_next (next_victim);
|
||||
}
|
||||
lock_release (&lru_lock);
|
||||
|
||||
free (frame_metadata);
|
||||
palloc_free_page (frame);
|
||||
page->frame = NULL;
|
||||
}
|
||||
|
||||
list_remove (&frame_metadata->list_elem);
|
||||
|
||||
/* If we're freeing the frame marked as the next victim, update
|
||||
next_victim to either be the next least recently used page, or NULL
|
||||
if no pages are loaded in main memory. */
|
||||
if (&frame_metadata->list_elem == next_victim)
|
||||
{
|
||||
if (list_empty (&lru_list))
|
||||
next_victim = NULL;
|
||||
else
|
||||
next_victim = lru_next (next_victim);
|
||||
}
|
||||
|
||||
free (frame_metadata);
|
||||
palloc_free_page (frame);
|
||||
|
||||
lock_release (&frame_lock);
|
||||
}
|
||||
|
||||
/* TODO: Account for page aliases when checking accessed bit. */
|
||||
/* A pre-condition for calling this function is that the calling thread
|
||||
owns lru_lock and that lru_list is non-empty. */
|
||||
static struct frame_metadata *
|
||||
static struct frame_entry *
|
||||
get_victim (void)
|
||||
{
|
||||
struct list_elem *e = next_victim;
|
||||
struct frame_entry *frame_metadata;
|
||||
uint32_t *pd;
|
||||
void *upage;
|
||||
for (;;)
|
||||
{
|
||||
struct list_elem *e = next_victim;
|
||||
struct frame_metadata *frame_metadata;
|
||||
uint32_t *pd;
|
||||
void *upage;
|
||||
for (;;)
|
||||
{
|
||||
frame_metadata = list_entry (e, struct frame_metadata, list_elem);
|
||||
pd = frame_metadata->owner->pagedir;
|
||||
upage = frame_metadata->upage;
|
||||
e = lru_next (e);
|
||||
frame_metadata = list_entry (e, struct frame_entry, list_elem);
|
||||
pd = frame_metadata->owner->pagedir;
|
||||
upage = frame_metadata->upage;
|
||||
e = lru_next (e);
|
||||
|
||||
/* Skip pinned frames */
|
||||
if (frame_metadata->pinned)
|
||||
continue;
|
||||
/* Skip pinned frames */
|
||||
if (frame_metadata->pinned)
|
||||
continue;
|
||||
|
||||
if (!pagedir_is_accessed (pd, upage))
|
||||
break;
|
||||
if (!pagedir_is_accessed (pd, upage))
|
||||
break;
|
||||
|
||||
pagedir_set_accessed (pd, upage, false);
|
||||
}
|
||||
|
||||
next_victim = e;
|
||||
return frame_metadata;
|
||||
pagedir_set_accessed (pd, upage, false);
|
||||
}
|
||||
|
||||
next_victim = e;
|
||||
return frame_metadata;
|
||||
}
|
||||
|
||||
/* Hash function for frame metadata, used for storing entries in the
|
||||
frame table. */
|
||||
unsigned
|
||||
frame_metadata_hash (const struct hash_elem *e, void *aux UNUSED)
|
||||
{
|
||||
struct frame_metadata *frame_metadata =
|
||||
hash_entry (e, struct frame_metadata, hash_elem);
|
||||
frame_hash (const struct hash_elem *e, void *aux UNUSED)
|
||||
{
|
||||
struct frame_entry *entry =
|
||||
hash_entry (e, struct frame_entry, hash_elem);
|
||||
|
||||
return hash_bytes (&frame_metadata->frame, sizeof (frame_metadata->frame));
|
||||
}
|
||||
return hash_bytes (&entry->frame, sizeof (entry->frame));
|
||||
}
|
||||
|
||||
/* 'less_func' comparison function for frame metadata, used for comparing
|
||||
/* 'less_func' comparison function for frame metadata, used for comparing
|
||||
the keys of the frame table. Returns true iff the kernel virtual address
|
||||
of the first frame is less than that of the second frame. */
|
||||
bool
|
||||
frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_,
|
||||
void *aux UNUSED)
|
||||
frame_less (const struct hash_elem *a_, const struct hash_elem *b_,
|
||||
void *aux UNUSED)
|
||||
{
|
||||
struct frame_metadata *a =
|
||||
hash_entry (a_, struct frame_metadata, hash_elem);
|
||||
struct frame_metadata *b =
|
||||
hash_entry (b_, struct frame_metadata, hash_elem);
|
||||
struct frame_entry *a =
|
||||
hash_entry (a_, struct frame_entry, hash_elem);
|
||||
struct frame_entry *b =
|
||||
hash_entry (b_, struct frame_entry, hash_elem);
|
||||
|
||||
return a->frame < b->frame;
|
||||
}
|
||||
|
||||
static struct frame_metadata *
|
||||
frame_metadata_get (void *frame)
|
||||
static struct frame_entry *
|
||||
frame_get (void *frame)
|
||||
{
|
||||
struct frame_metadata key_metadata;
|
||||
key_metadata.frame = frame;
|
||||
struct frame_entry fake_frame;
|
||||
fake_frame.frame = frame;
|
||||
|
||||
struct hash_elem *e = hash_find (&frame_table, &key_metadata.hash_elem);
|
||||
struct hash_elem *e = hash_find (&frame_table, &fake_frame.hash_elem);
|
||||
if (e == NULL) return NULL;
|
||||
|
||||
return hash_entry (e, struct frame_metadata, hash_elem);
|
||||
return hash_entry (e, struct frame_entry, hash_elem);
|
||||
}
|
||||
|
||||
/* Returns the next recently used element after the one provided, which
|
||||
|
||||
@@ -42,9 +42,10 @@ page_insert (struct file *file, off_t ofs, void *upage, uint32_t read_bytes,
|
||||
if (page == NULL)
|
||||
return NULL;
|
||||
|
||||
page->upage = upage;
|
||||
page->frame = NULL;
|
||||
page->file = file;
|
||||
page->offset = ofs;
|
||||
page->upage = upage;
|
||||
page->read_bytes = read_bytes;
|
||||
page->zero_bytes = zero_bytes;
|
||||
page->writable = writable;
|
||||
@@ -102,6 +103,8 @@ page_load (struct page_entry *page, bool writable)
|
||||
/* Zero out the remaining bytes in the frame. */
|
||||
memset (frame + page->read_bytes, 0, page->zero_bytes);
|
||||
|
||||
page->frame = frame;
|
||||
|
||||
/* Mark the page as loaded successfully. */
|
||||
return true;
|
||||
}
|
||||
@@ -111,7 +114,12 @@ page_load (struct page_entry *page, bool writable)
|
||||
void
|
||||
page_cleanup (struct hash_elem *e, void *aux UNUSED)
|
||||
{
|
||||
free (hash_entry (e, struct page_entry, elem));
|
||||
struct page_entry *page = hash_entry (e, struct page_entry, elem);
|
||||
|
||||
if (page->frame != NULL)
|
||||
frame_free (page->frame);
|
||||
|
||||
free (page);
|
||||
}
|
||||
|
||||
/* Updates the 'owner' thread's page table entry for virtual address 'upage'
|
||||
|
||||
@@ -12,6 +12,7 @@ enum page_type {
|
||||
struct page_entry {
|
||||
enum page_type type; /* Type of Data that should go into the page */
|
||||
void *upage; /* Start Address of the User Page (Key of hash table). */
|
||||
void *frame; /* Frame Address where the page is loaded. */
|
||||
|
||||
/* File Data */
|
||||
struct file *file; /* Pointer to the file for executables. */
|
||||
|
||||
Reference in New Issue
Block a user