feat: implement proper destruction of pages, including for shared pages

This commit is contained in:
2024-12-05 22:23:50 +00:00
parent dd46200256
commit 9aa9cdb91e
4 changed files with 119 additions and 45 deletions

View File

@@ -35,7 +35,7 @@ struct frame_metadata
{
void *frame; /* The kernel virtual address holding the frame. */
void *upage; /* The user virtual address pointing to the frame. */
struct thread *owner; /* Pointer to the thread that owns the frame. */
struct list owners; /* List of threads that own the frame. */
struct hash_elem hash_elem; /* Tracks the position of the frame metadata
within 'frame_table', whose key is the
kernel virtual address of the frame. */
@@ -92,7 +92,18 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
ASSERT (victim != NULL); /* get_victim () should never return null. */
/* 2. Swap out victim into disk. */
page_insert_swapped (victim->upage, victim->frame, victim->owner);
page_insert_swapped (victim->upage, victim->frame, &victim->owners);
/* Free victim's owners. */
struct list_elem *oe;
for (oe = list_begin (&victim->owners);
oe != list_end (&victim->owners);)
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
oe = list_remove (oe);
free (list_entry (oe, struct frame_owner, elem));
}
/* If zero flag is set, zero out the victim page. */
if (flags & PAL_ZERO)
@@ -113,6 +124,8 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
/* Must own lru_lock here, as otherwise there is a race condition
with next_victim either being NULL or uninitialized. */
frame_metadata = malloc (sizeof (struct frame_metadata));
if (frame_metadata == NULL)
PANIC ("Couldn't allocate memory for frame metadata!\n");
frame_metadata->frame = frame;
/* Newly allocated frames are pushed to the back of the circular queue
@@ -132,8 +145,13 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
hash_insert (&frame_table, &frame_metadata->hash_elem);
}
struct frame_owner *frame_owner = malloc (sizeof (struct frame_owner));
if (frame_owner == NULL)
PANIC ("Couldn't allocate memory for frame owner!\n");
frame_owner->owner = owner;
list_init (&frame_metadata->owners);
list_push_back (&frame_metadata->owners, &frame_owner->elem);
frame_metadata->upage = upage;
frame_metadata->owner = owner;
lock_release (&lru_lock);
return frame_metadata->frame;
@@ -181,24 +199,34 @@ frame_free (void *frame)
static struct frame_metadata *
get_victim (void)
{
struct list_elem *e = next_victim;
struct list_elem *ve = next_victim;
struct frame_metadata *frame_metadata;
uint32_t *pd;
void *upage;
for (;;)
bool found = false;
while (!found)
{
frame_metadata = list_entry (e, struct frame_metadata, list_elem);
pd = frame_metadata->owner->pagedir;
upage = frame_metadata->upage;
e = lru_next (e);
frame_metadata = list_entry (ve, struct frame_metadata, list_elem);
ve = lru_next (ve);
struct list_elem *oe;
if (!pagedir_is_accessed (pd, upage))
break;
/* Returns once a frame that was not accessed by any owner is found. */
found = true;
for (oe = list_begin (&frame_metadata->owners);
oe != list_end (&frame_metadata->owners); oe = list_next (oe))
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
uint32_t *pd = frame_owner->owner->pagedir;
void *upage = frame_metadata->upage;
pagedir_set_accessed (pd, upage, false);
if (pagedir_is_accessed (pd, upage))
{
found = false;
pagedir_set_accessed (pd, upage, false);
}
}
}
next_victim = e;
next_victim = ve;
return frame_metadata;
}