Compare commits

..

1 Commits

Author SHA1 Message Date
EDiasAlberto
7965c007c8 feat: add general SPT lock for operations across threads 2024-12-06 06:03:22 +00:00
11 changed files with 82 additions and 89 deletions

View File

@@ -38,3 +38,4 @@ test_vm:
extends: .pintos_tests
variables:
DIR: vm
IGNORE: (tests/vm/pt-grow-stack|tests/vm/pt-grow-pusha|tests/vm/pt-big-stk-obj|tests/vm/pt-overflowstk|tests/vm/pt-write-code2|tests/vm/pt-grow-stk-sc|tests/vm/page-linear|tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-over-stk)

View File

@@ -736,7 +736,9 @@ init_thread (struct thread *t, const char *name, int nice, int priority,
t->recent_cpu = recent_cpu;
t->priority = t->base_priority;
lock_init (&t->pages_lock);
#ifdef VM
lock_init (&t->spt_lock);
#endif
old_level = intr_disable ();
list_push_back (&all_list, &t->allelem);

View File

@@ -136,7 +136,6 @@ struct thread
struct list_elem elem; /* List element. */
struct hash pages; /* Table of open user pages. */
struct lock pages_lock; /* Lock for the supplementary page table. */
/* Memory mapped files for user virtual memory. */
struct hash mmap_files; /* List of memory mapped files. */
@@ -150,6 +149,8 @@ struct thread
struct hash open_files; /* Hash Table of FD -> Struct File. */
#endif
struct lock spt_lock;
void *curr_esp;
/* Owned by thread.c. */

View File

@@ -181,11 +181,11 @@ page_fault (struct intr_frame *f)
/* If the page fault occurred in kernel mode, then we intentionally indicate
a fault (for get_user() etc). */
if (!user)
{
f->eip = (void *)f->eax;
f->eax = 0xffffffff;
return;
}
{
f->eip = (void *)f->eax;
f->eax = 0xffffffff;
return;
}
/* To implement virtual memory, delete the rest of the function
@@ -289,7 +289,7 @@ fetch_page (void *upage, bool write)
bool success = false;
switch (page->type) {
case PAGE_MMAP:
case PAGE_EXECUTABLE:
case PAGE_FILE:
case PAGE_SHARED:
success = page_load_file (page);
break;

View File

@@ -371,9 +371,7 @@ process_exit (void)
hash_destroy (&cur->open_files, fd_cleanup);
/* Clean up the thread's supplemental page table. */
lock_acquire (&cur->pages_lock);
hash_destroy (&cur->pages, page_cleanup);
lock_release (&cur->pages_lock);
/* Close the executable file, implicitly allowing it to be written to. */
if (cur->exec_file != NULL)
@@ -717,7 +715,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
/* Add the page metadata to the SPT to be lazy loaded later on */
if (page_insert_file (file, ofs, upage, page_read_bytes, page_zero_bytes,
writable, PAGE_EXECUTABLE) == NULL)
writable, PAGE_FILE) == NULL)
return false;
/* Advance. */

View File

@@ -461,9 +461,10 @@ syscall_mmap (int fd, void *addr)
/* Check and ensure that there is enough space in the user virtual memory to
hold the entire file. */
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
{
if (page_get (thread_current (), addr + ofs) != NULL)
return MMAP_FAILURE;
}
/* Map the file data into the user virtual memory starting from addr. */
for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE)
@@ -472,7 +473,7 @@ syscall_mmap (int fd, void *addr)
off_t zero_bytes = PGSIZE - read_bytes;
if (page_insert_file (file, ofs, addr + ofs, read_bytes, zero_bytes, true,
PAGE_MMAP) == NULL)
PAGE_FILE) == NULL)
return MMAP_FAILURE;
}
@@ -481,6 +482,7 @@ syscall_mmap (int fd, void *addr)
if (mmap == NULL)
return MMAP_FAILURE;
return mmap->mapping;
}

View File

@@ -28,6 +28,9 @@ struct list lru_list;
victim. Otherwise, the next element in the queue is similarly considered. */
struct list_elem *next_victim = NULL;
/* Synchronisation variables. */
/* Protects access to 'lru_list'. */
struct lock lru_lock;
struct frame_metadata
{
@@ -40,8 +43,8 @@ struct frame_metadata
within 'frame_table', whose key is the
kernel virtual address of the frame. */
struct list_elem list_elem; /* Tracks the position of the frame metadata
within 'lru_list', so a victim can be
chosen for eviction. */
in either the 'active' or 'inactive' list,
so a victim can be chosen for eviction. */
};
hash_hash_func frame_metadata_hash;
@@ -63,7 +66,7 @@ frame_init (void)
hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
list_init (&lru_list);
lock_init (&ftable_lock);
lock_init (&lru_lock);
}
/* TODO: Consider synchronisation more closely (i.e. just for hash
@@ -79,7 +82,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
struct frame_metadata *frame_metadata;
flags |= PAL_USER;
lock_acquire (&ftable_lock);
lock_acquire (&lru_lock);
void *frame = palloc_get_page (flags);
/* If a frame couldn't be allocated we must be out of main memory. Thus,
@@ -122,7 +125,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
memset (victim->frame, 0, PGSIZE);
/* 3. Indicate that the new frame's metadata will be stored
inside the same structure that stored the victim's metadata.frame.c
inside the same structure that stored the victim's metadata.
As both the new frame and the victim frame share the same kernel
virtual address, the hash map need not be updated, and neither
the list_elem value as both share the same lru_list position. */
@@ -133,7 +136,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
we must update the frame table with a new entry, and grow lru_list. */
else
{
/* Must own ftable_lock here, as otherwise there is a race condition
/* Must own lru_lock here, as otherwise there is a race condition
with next_victim either being NULL or uninitialized. */
frame_metadata = malloc (sizeof (struct frame_metadata));
if (frame_metadata == NULL)
@@ -165,36 +168,30 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
list_push_back (&frame_metadata->owners, &frame_owner->elem);
frame_metadata->upage = upage;
frame_metadata->pinned = false;
lock_release (&ftable_lock);
lock_release (&lru_lock);
return frame_metadata->frame;
}
void
frame_pin (void *frame)
{
ASSERT (frame != NULL);
lock_acquire (&ftable_lock);
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to pin a frame at an unallocated kernel address '%p'\n",
frame);
frame_metadata->pinned = true;
lock_release (&ftable_lock);
}
void
frame_unpin (void *frame)
{
ASSERT (frame != NULL);
lock_acquire (&ftable_lock);
struct frame_metadata *frame_metadata = frame_metadata_get (frame);
if (frame_metadata == NULL)
PANIC ("Attempted to unpin a frame at an unallocated kernel address '%p'\n",
frame);
frame_metadata->pinned = false;
lock_release (&ftable_lock);
}
/* Attempt to deallocate a frame for a user process by removing it from the
@@ -210,7 +207,7 @@ frame_free (void *frame)
frame);
free_owners (&frame_metadata->owners);
lock_acquire (&ftable_lock);
lock_acquire (&lru_lock);
hash_delete (&frame_table, &frame_metadata->hash_elem);
list_remove (&frame_metadata->list_elem);
@@ -224,7 +221,7 @@ frame_free (void *frame)
else
next_victim = lru_next (next_victim);
}
lock_release (&ftable_lock);
lock_release (&lru_lock);
free (frame_metadata);
palloc_free_page (frame);
@@ -241,7 +238,6 @@ frame_owner_insert (void *frame, struct thread *owner)
struct frame_owner *frame_owner = malloc (sizeof (struct frame_owner));
if (frame_owner == NULL)
return false;
frame_owner->owner = owner;
list_push_back (&frame_metadata->owners, &frame_owner->elem);
return true;
@@ -264,7 +260,6 @@ frame_owner_remove (void *frame, struct thread *owner)
{
struct frame_owner *frame_owner
= list_entry (oe, struct frame_owner, elem);
oe = list_next (oe);
if (frame_owner->owner == owner)
{
@@ -286,12 +281,12 @@ frame_metadata_find (void *frame)
struct hash_elem *e = hash_find (&frame_table, &key_metadata.hash_elem);
if (e == NULL)
return NULL;
return hash_entry (e, struct frame_metadata, hash_elem);
}
/* TODO: Account for page aliases when checking accessed bit. */
/* A pre-condition for calling this function is that the calling thread
owns ftable_lock and that lru_list is non-empty. */
owns lru_lock and that lru_list is non-empty. */
static struct frame_metadata *
get_victim (void)
{
@@ -377,6 +372,7 @@ frame_metadata_get (void *frame)
struct hash_elem *e = hash_find (&frame_table, &key_metadata.hash_elem);
if (e == NULL) return NULL;
return hash_entry (e, struct frame_metadata, hash_elem);
}

View File

@@ -10,12 +10,7 @@ struct frame_owner
struct list_elem elem; /* List element for the list of owners. */
};
/* Synchronisation variables. */
/* Protects access to the frame table and its related components. */
struct lock ftable_lock;
void frame_init (void);
void *frame_alloc (enum palloc_flags, void *, struct thread *);
void frame_pin (void *frame);
void frame_unpin (void *frame);

View File

@@ -67,27 +67,26 @@ mmap_unmap (struct mmap_entry *mmap)
if necessary. */
off_t length = file_length (mmap->file);
for (off_t ofs = 0; ofs < length; ofs += PGSIZE)
{
void *upage = mmap->upage + ofs;
/* Get the SPT page entry for this page. */
struct page_entry *page = page_get(thread_current (), upage);
if (page == NULL)
continue;
/* Write the page back to the file if it is dirty. */
if (pagedir_is_dirty (thread_current ()->pagedir, upage))
{
void *upage = mmap->upage + ofs;
/* Get the SPT page entry for this page. */
struct page_entry *page = page_get(thread_current (), upage);
if (page == NULL)
continue;
/* Write the page back to the file if it is dirty. */
if (pagedir_is_dirty (thread_current ()->pagedir, upage))
{
lock_acquire (&filesys_lock);
file_write_at (mmap->file, upage, page->read_bytes, ofs);
lock_release (&filesys_lock);
}
/* Remove the page from the supplemental page table. */
hash_delete (&thread_current ()->pages, &page->elem);
lock_acquire (&filesys_lock);
file_write_at (mmap->file, upage, page->read_bytes, ofs);
lock_release (&filesys_lock);
}
/* Close the file and free the mmap entry. */
/* Remove the page from the supplemental page table. */
hash_delete (&thread_current ()->pages, &page->elem);
}
file_close (mmap->file);
free (mmap);
}

View File

@@ -90,7 +90,6 @@ page_insert_swapped (void *upage, void *kpage, struct list *owners)
/* 1. Initialize swapped page entry. */
struct page_entry *page = page_get (owner, upage);
lock_acquire (&owner->pages_lock);
if (page == NULL)
{
page = malloc (sizeof (struct page_entry));
@@ -114,7 +113,6 @@ page_insert_swapped (void *upage, void *kpage, struct list *owners)
page_set_swap (owner, pte, swap_slot);
lock_release (&page->lock);
lock_release (&owner->pages_lock);
}
if (exec_file != NULL)
{
@@ -135,7 +133,8 @@ page_insert_file (struct file *file, off_t ofs, void *upage,
enum page_type type)
{
/* If page exists, just update it. */
struct page_entry *existing = page_get (thread_current (), upage);
struct thread *t = thread_current ();
struct page_entry *existing = page_get (t, upage);
if (existing != NULL)
{
ASSERT (existing->read_bytes == read_bytes);
@@ -143,7 +142,7 @@ page_insert_file (struct file *file, off_t ofs, void *upage,
existing->writable = existing->writable || writable;
return existing;
}
struct page_entry *page = malloc(sizeof (struct page_entry));
if (page == NULL)
return NULL;
@@ -157,10 +156,7 @@ page_insert_file (struct file *file, off_t ofs, void *upage,
page->writable = writable;
lock_init (&page->lock);
struct thread *t = thread_current ();
lock_acquire (&t->pages_lock);
hash_insert (&t->pages, &page->elem);
lock_release (&t->pages_lock);
hash_insert (&thread_current ()->pages, &page->elem);
return page;
}
@@ -169,13 +165,13 @@ page_insert_file (struct file *file, off_t ofs, void *upage,
struct page_entry *
page_get (struct thread *thread, void *upage)
{
lock_acquire (&thread->spt_lock);
struct page_entry fake_page_entry;
fake_page_entry.upage = upage;
lock_acquire (&thread->pages_lock);
struct hash_elem *e
= hash_find (&thread->pages, &fake_page_entry.elem);
lock_release (&thread->pages_lock);
lock_release (&thread->spt_lock);
if (e == NULL)
return NULL;
@@ -195,7 +191,6 @@ page_load_file (struct page_entry *page)
lock_acquire (&shared_file_pages_lock);
struct shared_file_page *sfp
= shared_file_page_get (page->file, page->upage);
if (sfp != NULL)
{
/* Frame exists, just install it. */
@@ -206,10 +201,13 @@ page_load_file (struct page_entry *page)
lock_release (&shared_file_pages_lock);
return false;
}
frame_owner_insert (sfp->frame, t);
/* First time adding the shared page, so add thread as owner. */
if (page->type != PAGE_SHARED)
{
frame_owner_insert (sfp->frame, t);
}
}
/* Otherwise, shared page is in swap. Load it. */
/* Shared page is in swap. Load it. */
else
{
void *frame = frame_alloc (PAL_USER, page->upage, t);
@@ -225,7 +223,6 @@ page_load_file (struct page_entry *page)
return false;
}
}
page_flag_shared (t, page->upage, true);
if (page->type != PAGE_SHARED)
{
@@ -296,6 +293,7 @@ page_load_file (struct page_entry *page)
void
page_cleanup (struct hash_elem *e, void *aux UNUSED)
{
lock_acquire (&thread_current ()->spt_lock);
struct page_entry *page = hash_entry (e, struct page_entry, elem);
if (page->type == PAGE_SHARED)
{
@@ -318,17 +316,18 @@ page_cleanup (struct hash_elem *e, void *aux UNUSED)
lock_release (&shared_file_pages_lock);
}
free (page);
lock_release (&thread_current ()->spt_lock);
}
/* Flags the provided page table entry as representing a swapped out page. */
void
page_flag_swap (uint32_t *pte, bool set)
{
if (set)
*pte |= (1 << SWAP_FLAG_BIT);
else
*pte &= ~(1 << SWAP_FLAG_BIT);
}
{
if (set)
*pte |= (1 << SWAP_FLAG_BIT);
else
*pte &= ~(1 << SWAP_FLAG_BIT);
}
/* Sets the address bits of the page table entry to the provided swap slot
value. To be used for later retrieval of the swap slot when page faulting. */
@@ -349,7 +348,9 @@ page_set_swap (struct thread *owner, uint32_t *pte, size_t swap_slot)
bool
page_in_swap (struct thread *owner, void *upage)
{
lock_acquire (&owner->spt_lock);
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
lock_release (&owner->spt_lock);
return page_in_swap_pte (pte);
}
@@ -366,6 +367,7 @@ page_in_swap_pte (uint32_t *pte)
size_t
page_get_swap (struct thread *owner, void *upage)
{
lock_acquire (&owner->spt_lock);
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
ASSERT (pte != NULL);
@@ -373,6 +375,7 @@ page_get_swap (struct thread *owner, void *upage)
/* Masks the address bits and returns truncated value. */
page_flag_swap (pte, false);
lock_release (&owner->spt_lock);
return ((*pte & PTE_ADDR) >> ADDR_START_BIT);
}

View File

@@ -7,8 +7,9 @@
enum page_type
{
PAGE_EXECUTABLE,
PAGE_FILE,
PAGE_MMAP,
PAGE_EMPTY,
PAGE_SHARED
};
@@ -34,18 +35,13 @@ struct page_entry
struct shared_file_page
{
struct file *file; /* The shared file page's source file, used for indexing
the table. */
void *upage; /* The shared page's upage which is the same across all process
using it. Used for indexing the table. */
void *frame; /* Set to the frame address of the page when it is in memory.
Set to NULL when the page is in swap. */
size_t swap_slot; /* Set to the swap_slot of the shared paged if it is
currently in swap. Should not be used when frame is not
NULL.*/
int ref_count; /* Number of processes that are using this shared page. */
struct file *file;
void *upage;
void *frame;
size_t swap_slot;
int ref_count;
struct hash_elem elem; /* AN elem for the hash table. */
struct hash_elem elem;
};
bool init_pages (struct hash *pages);