feat: implement proper destruction of pages, including for shared pages

This commit is contained in:
2024-12-05 22:23:50 +00:00
parent dd46200256
commit 9aa9cdb91e
4 changed files with 119 additions and 45 deletions

View File

@@ -35,7 +35,7 @@ struct frame_metadata
{
void *frame; /* The kernel virtual address holding the frame. */
void *upage; /* The user virtual address pointing to the frame. */
struct thread *owner; /* Pointer to the thread that owns the frame. */
struct list owners; /* List of threads that own the frame. */
struct hash_elem hash_elem; /* Tracks the position of the frame metadata
within 'frame_table', whose key is the
kernel virtual address of the frame. */
@@ -92,7 +92,18 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
ASSERT (victim != NULL); /* get_victim () should never return null. */
/* 2. Swap out victim into disk. */
page_insert_swapped (victim->upage, victim->frame, victim->owner);
page_insert_swapped (victim->upage, victim->frame, &victim->owners);
/* Free victim's owners. */
struct list_elem *oe;
for (oe = list_begin (&victim->owners);
oe != list_end (&victim->owners);)
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
oe = list_remove (oe);
free (list_entry (oe, struct frame_owner, elem));
}
/* If zero flag is set, zero out the victim page. */
if (flags & PAL_ZERO)
@@ -113,6 +124,8 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
/* Must own lru_lock here, as otherwise there is a race condition
with next_victim either being NULL or uninitialized. */
frame_metadata = malloc (sizeof (struct frame_metadata));
if (frame_metadata == NULL)
PANIC ("Couldn't allocate memory for frame metadata!\n");
frame_metadata->frame = frame;
/* Newly allocated frames are pushed to the back of the circular queue
@@ -132,8 +145,13 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
hash_insert (&frame_table, &frame_metadata->hash_elem);
}
struct frame_owner *frame_owner = malloc (sizeof (struct frame_owner));
if (frame_owner == NULL)
PANIC ("Couldn't allocate memory for frame owner!\n");
frame_owner->owner = owner;
list_init (&frame_metadata->owners);
list_push_back (&frame_metadata->owners, &frame_owner->elem);
frame_metadata->upage = upage;
frame_metadata->owner = owner;
lock_release (&lru_lock);
return frame_metadata->frame;
@@ -181,24 +199,34 @@ frame_free (void *frame)
static struct frame_metadata *
get_victim (void)
{
struct list_elem *e = next_victim;
struct list_elem *ve = next_victim;
struct frame_metadata *frame_metadata;
uint32_t *pd;
void *upage;
for (;;)
bool found = false;
while (!found)
{
frame_metadata = list_entry (e, struct frame_metadata, list_elem);
pd = frame_metadata->owner->pagedir;
upage = frame_metadata->upage;
e = lru_next (e);
frame_metadata = list_entry (ve, struct frame_metadata, list_elem);
ve = lru_next (ve);
struct list_elem *oe;
if (!pagedir_is_accessed (pd, upage))
break;
/* Returns once a frame that was not accessed by any owner is found. */
found = true;
for (oe = list_begin (&frame_metadata->owners);
oe != list_end (&frame_metadata->owners); oe = list_next (oe))
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
uint32_t *pd = frame_owner->owner->pagedir;
void *upage = frame_metadata->upage;
if (pagedir_is_accessed (pd, upage))
{
found = false;
pagedir_set_accessed (pd, upage, false);
}
}
}
next_victim = e;
next_victim = ve;
return frame_metadata;
}

View File

@@ -4,6 +4,12 @@
#include "threads/thread.h"
#include "threads/palloc.h"
struct frame_owner
{
struct thread *owner; /* The thread that owns the frame. */
struct list_elem elem; /* List element for the list of owners. */
};
void frame_init (void);
void *frame_alloc (enum palloc_flags, void *, struct thread *);
void frame_free (void *frame);

View File

@@ -68,14 +68,27 @@ static void page_set_swap (struct thread *owner, uint32_t *pte,
/* Swap out 'owner' process's 'upage' stored at 'kpage'. Then, allocate and
insert a new page entry into the user process thread's SPT representing
this swapped out page. */
struct page_entry *
page_insert_swapped (void *upage, void *kpage, struct thread *owner)
bool
page_insert_swapped (void *upage, void *kpage, struct list *owners)
{
struct list_elem *e;
for (e = list_begin (owners); e != list_end (owners); e = list_next (e))
{
struct thread *owner = list_entry (e, struct frame_owner, elem)->owner;
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
if (page_is_shared_pte (pte))
{
ASSERT (list_size (owners) == 1);
pagedir_clear_page (owner->pagedir, upage);
size_t swap_slot = swap_out (kpage);
return true;
}
ASSERT (list_size (owners) == 1);
/* 1. Initialize swapped page entry. */
struct page_entry *page = malloc(sizeof (struct page_entry));
struct page_entry *page = malloc (sizeof (struct page_entry));
if (page == NULL)
return NULL;
return false;
page->upage = upage;
lock_init (&page->lock);
@@ -83,15 +96,14 @@ page_insert_swapped (void *upage, void *kpage, struct thread *owner)
/* Mark page as 'swapped' and flag the page directory as having
been modified *before* eviction begins to prevent the owner of the
victim page from accessing/modifying it mid-eviction. */
/* TODO: We need to stop the process from destroying pagedir mid-eviction,
as this could render the page table entry invalid. */
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
/* TODO: We need to stop the process from destroying pagedir
mid-eviction, as this could render the page table entry invalid. */
ASSERT (pte != NULL);
page_flag_swap (pte, true);
lock_acquire (&page->lock);
struct hash_elem *elem = hash_insert (&owner->pages, &page->elem);
ASSERT(elem == NULL);
ASSERT (elem == NULL);
pagedir_clear_page (owner->pagedir, upage);
@@ -99,7 +111,8 @@ page_insert_swapped (void *upage, void *kpage, struct thread *owner)
page_set_swap (owner, pte, swap_slot);
lock_release (&page->lock);
return page;
}
return true;
}
/* Allocate and insert a new page entry into the user process thread's
@@ -190,8 +203,9 @@ page_load_file (struct page_entry *page)
}
page_flag_shared (t, page->upage, true);
}
lock_release (&shared_file_pages_lock);
sfp->ref_count++;
page->type = PAGE_SHARED;
lock_release (&shared_file_pages_lock);
return true;
}
}
@@ -242,8 +256,8 @@ page_load_file (struct page_entry *page)
sfp->ref_count = 1;
hash_insert (&shared_file_pages, &sfp->elem);
page_flag_shared (t, page->upage, true);
lock_release (&shared_file_pages_lock);
page->type = PAGE_SHARED;
lock_release (&shared_file_pages_lock);
}
/* Mark the page as loaded successfully. */
@@ -255,7 +269,26 @@ page_load_file (struct page_entry *page)
void
page_cleanup (struct hash_elem *e, void *aux UNUSED)
{
free (hash_entry (e, struct page_entry, elem));
struct page_entry *page = hash_entry (e, struct page_entry, elem);
if (page->type == PAGE_SHARED)
{
lock_acquire (&shared_file_pages_lock);
struct shared_file_page *sfp
= shared_file_page_get (page->file, page->upage);
ASSERT (sfp != NULL);
sfp->ref_count--;
if (sfp->ref_count == 0)
{
hash_delete (&shared_file_pages, &sfp->elem);
if (sfp->frame != NULL)
frame_free (sfp->frame);
else
swap_drop (sfp->swap_slot);
free (sfp);
}
lock_release (&shared_file_pages_lock);
}
free (page);
}
/* Flags the provided page table entry as representing a swapped out page. */
@@ -381,5 +414,13 @@ shared_file_page_less (const struct hash_elem *a_, const struct hash_elem *b_,
static struct shared_file_page *
shared_file_page_get (struct file *file, void *upage)
{
struct shared_file_page fake_sfp;
fake_sfp.file = file;
fake_sfp.upage = upage;
struct hash_elem *e = hash_find (&shared_file_pages, &fake_sfp.elem);
if (e == NULL)
return NULL;
return hash_entry (e, struct shared_file_page, elem);
}

View File

@@ -45,8 +45,7 @@ struct shared_file_page
};
bool init_pages (struct hash *pages);
struct page_entry *page_insert_swapped (void *upage, void* kpage,
struct thread *owner);
bool page_insert_swapped (void *upage, void *kpage, struct list *owners);
struct page_entry *page_insert_file (struct file *file, off_t ofs, void *upage,
uint32_t read_bytes, uint32_t zero_bytes,
bool writable, enum page_type);