264 lines
9.1 KiB
C
264 lines
9.1 KiB
C
#include <debug.h>
|
|
#include <hash.h>
|
|
#include <list.h>
|
|
#include <string.h>
|
|
|
|
#include "frame.h"
|
|
#include "page.h"
|
|
#include "threads/malloc.h"
|
|
#include "threads/vaddr.h"
|
|
#include "userprog/pagedir.h"
|
|
#include "threads/synch.h"
|
|
#include "devices/swap.h"
|
|
|
|
/* Hash table that maps every active frame's kernel virtual address
|
|
to its corresponding 'frame_metadata'.*/
|
|
struct hash frame_table;
|
|
|
|
/* Linked list used to represent the circular queue in the 'clock'
|
|
algorithm for page eviction. Iterating from the element that is
|
|
currently pointed at by 'next_victim' yields an ordering of the entries
|
|
from oldest to newest (in terms of when they were added or checked
|
|
for having been referenced by a process). */
|
|
struct list lru_list;
|
|
|
|
/* The next element in lru_list to be considered for eviction (oldest added
|
|
or referenced page in the circular queue). If this page has has an
|
|
'accessed' bit of 0 when considering eviction, then it will be the next
|
|
victim. Otherwise, the next element in the queue is similarly considered. */
|
|
struct list_elem *next_victim = NULL;
|
|
|
|
/* Synchronisation variables. */
|
|
/* Protects access to 'lru_list'. */
|
|
struct lock lru_lock;
|
|
|
|
struct frame_metadata
|
|
{
|
|
void *frame; /* The kernel virtual address holding the frame. */
|
|
void *upage; /* The user virtual address pointing to the frame. */
|
|
struct thread *owner; /* Pointer to the thread that owns the frame. */
|
|
struct hash_elem hash_elem; /* Tracks the position of the frame metadata
|
|
within 'frame_table', whose key is the
|
|
kernel virtual address of the frame. */
|
|
struct list_elem list_elem; /* Tracks the position of the frame metadata
|
|
in either the 'active' or 'inactive' list,
|
|
so a victim can be chosen for eviction. */
|
|
};
|
|
|
|
hash_hash_func frame_metadata_hash;
|
|
hash_less_func frame_metadata_less;
|
|
|
|
static struct list_elem *lru_next (struct list_elem *e);
|
|
static struct list_elem *lru_prev (struct list_elem *e);
|
|
static struct frame_metadata *get_victim (void);
|
|
|
|
/* Initialize the frame system by initializing the frame (hash) table with
|
|
the frame_metadata hashing and comparison functions, as well as initializing
|
|
'lru_list' and its associated synchronisation primitives. */
|
|
void
|
|
frame_init (void)
|
|
{
|
|
hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
|
|
|
|
list_init (&lru_list);
|
|
lock_init (&lru_lock);
|
|
}
|
|
|
|
/* TODO: Consider synchronisation more closely (i.e. just for hash
|
|
table). */
|
|
/* Attempt to allocate a frame for a user process, either by direct
|
|
allocation of a user page if there is sufficient RAM, or by
|
|
evicting a currently active page if memory allocated for user
|
|
processes is fulled and storing it in swap. If swap is full in
|
|
the former case, panic the kernel. */
|
|
void *
|
|
frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
|
{
|
|
struct frame_metadata *frame_metadata;
|
|
flags |= PAL_USER;
|
|
|
|
lock_acquire (&lru_lock);
|
|
void *frame = palloc_get_page (flags);
|
|
|
|
/* If a frame couldn't be allocated we must be out of main memory. Thus,
|
|
obtain a victim page to replace with our page, and swap the victim
|
|
into disk. */
|
|
if (frame == NULL)
|
|
{
|
|
/* 1. Obtain victim. */
|
|
if (next_victim == NULL)
|
|
PANIC ("Couldn't allocate a single page to main memory!\n");
|
|
|
|
struct frame_metadata *victim = get_victim ();
|
|
ASSERT (victim != NULL); /* get_victim () should never return null. */
|
|
|
|
/* 2. Swap out victim into disk. */
|
|
/* Mark page as 'not present' and flag the page directory as having
|
|
been modified *before* eviction begins to prevent the owner of the
|
|
victim page from accessing/modifying it mid-eviction. */
|
|
pagedir_clear_page (victim->owner->pagedir, victim->upage);
|
|
|
|
// TODO: Lock PTE of victim page for victim process.
|
|
|
|
size_t swap_slot = swap_out (victim->frame);
|
|
page_set_swap (victim->owner, victim->upage, swap_slot);
|
|
|
|
/* If zero flag is set, zero out the victim page. */
|
|
if (flags & PAL_ZERO)
|
|
memset (victim->frame, 0, PGSIZE);
|
|
|
|
/* 3. Indicate that the new frame's metadata will be stored
|
|
inside the same structure that stored the victim's metadata.
|
|
As both the new frame and the victim frame share the same kernel
|
|
virtual address, the hash map need not be updated, and neither
|
|
the list_elem value as both share the same lru_list position. */
|
|
frame_metadata = victim;
|
|
}
|
|
|
|
/* If sufficient main memory allows the frame to be directly allocated,
|
|
we must update the frame table with a new entry, and grow lru_list. */
|
|
else
|
|
{
|
|
/* Must own lru_lock here, as otherwise there is a race condition
|
|
with next_victim either being NULL or uninitialized. */
|
|
frame_metadata = malloc (sizeof (struct frame_metadata));
|
|
frame_metadata->frame = frame;
|
|
|
|
/* Newly allocated frames are pushed to the back of the circular queue
|
|
represented by lru_list. Must explicitly handle the case where the
|
|
circular queue is empty (when next_victim == NULL). */
|
|
if (next_victim == NULL)
|
|
{
|
|
list_push_back (&lru_list, &frame_metadata->list_elem);
|
|
next_victim = &frame_metadata->list_elem;
|
|
}
|
|
else
|
|
{
|
|
struct list_elem *lru_tail = lru_prev (next_victim);
|
|
list_insert (lru_tail, &frame_metadata->list_elem);
|
|
}
|
|
|
|
hash_insert (&frame_table, &frame_metadata->hash_elem);
|
|
}
|
|
|
|
frame_metadata->upage = upage;
|
|
frame_metadata->owner = owner;
|
|
lock_release (&lru_lock);
|
|
|
|
return frame_metadata->frame;
|
|
}
|
|
|
|
/* Attempt to deallocate a frame for a user process by removing it from the
|
|
frame table as well as lru_list, and freeing the underlying page
|
|
memory & metadata struct. Panics if the frame isn't active in memory. */
|
|
void
|
|
frame_free (void *frame)
|
|
{
|
|
struct frame_metadata key_metadata;
|
|
key_metadata.frame = frame;
|
|
|
|
struct hash_elem *e =
|
|
hash_delete (&frame_table, &key_metadata.hash_elem);
|
|
if (e == NULL) PANIC ("Attempted to free a frame at kernel address %p, "
|
|
"but this address is not allocated!\n", frame);
|
|
|
|
struct frame_metadata *frame_metadata =
|
|
hash_entry (e, struct frame_metadata, hash_elem);
|
|
|
|
lock_acquire (&lru_lock);
|
|
list_remove (&frame_metadata->list_elem);
|
|
|
|
/* If we're freeing the frame marked as the next victim, update
|
|
next_victim to either be the next least recently used page, or NULL
|
|
if no pages are loaded in main memory. */
|
|
if (&frame_metadata->list_elem == next_victim)
|
|
{
|
|
if (list_empty (&lru_list))
|
|
next_victim = NULL;
|
|
else
|
|
next_victim = lru_next (next_victim);
|
|
}
|
|
lock_release (&lru_lock);
|
|
|
|
free (frame_metadata);
|
|
palloc_free_page (frame);
|
|
}
|
|
|
|
/* TODO: Account for page aliases when checking accessed bit. */
|
|
/* A pre-condition for calling this function is that the calling thread
|
|
owns lru_lock and that lru_list is non-empty. */
|
|
static struct frame_metadata *
|
|
get_victim (void)
|
|
{
|
|
struct list_elem *e = next_victim;
|
|
struct frame_metadata *frame_metadata;
|
|
uint32_t *pd;
|
|
void *upage;
|
|
for (;;)
|
|
{
|
|
frame_metadata = list_entry (e, struct frame_metadata, list_elem);
|
|
pd = frame_metadata->owner->pagedir;
|
|
upage = frame_metadata->upage;
|
|
e = lru_next (e);
|
|
|
|
if (!pagedir_is_accessed (pd, upage))
|
|
break;
|
|
|
|
pagedir_set_accessed (pd, upage, false);
|
|
}
|
|
|
|
next_victim = e;
|
|
return frame_metadata;
|
|
}
|
|
|
|
/* Hash function for frame metadata, used for storing entries in the
|
|
frame table. */
|
|
unsigned
|
|
frame_metadata_hash (const struct hash_elem *e, void *aux UNUSED)
|
|
{
|
|
struct frame_metadata *frame_metadata =
|
|
hash_entry (e, struct frame_metadata, hash_elem);
|
|
|
|
return hash_bytes (&frame_metadata->frame, sizeof (frame_metadata->frame));
|
|
}
|
|
|
|
/* 'less_func' comparison function for frame metadata, used for comparing
|
|
the keys of the frame table. Returns true iff the kernel virtual address
|
|
of the first frame is less than that of the second frame. */
|
|
bool
|
|
frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_,
|
|
void *aux UNUSED)
|
|
{
|
|
struct frame_metadata *a =
|
|
hash_entry (a_, struct frame_metadata, hash_elem);
|
|
struct frame_metadata *b =
|
|
hash_entry (b_, struct frame_metadata, hash_elem);
|
|
|
|
return a->frame < b->frame;
|
|
}
|
|
|
|
/* Returns the next recently used element after the one provided, which
|
|
is achieved by iterating through lru_list like a circular queue
|
|
(wrapping around the list at the tail). */
|
|
static struct list_elem *
|
|
lru_next (struct list_elem *e)
|
|
{
|
|
if (!list_empty (&lru_list) && e == list_back (&lru_list))
|
|
return list_front (&lru_list);
|
|
|
|
return list_next (e);
|
|
}
|
|
|
|
/* Returns the previous recently used element after the one provided, which
|
|
is achieved by iterating through lru_list like a circular queue
|
|
(wrapping around the list at the head). */
|
|
static struct list_elem *
|
|
lru_prev (struct list_elem *e)
|
|
{
|
|
if (!list_empty (&lru_list) && e == list_front (&lru_list))
|
|
return list_back (&lru_list);
|
|
|
|
return list_prev (e);
|
|
}
|
|
|