diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a291160..2f9cb09 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -38,4 +38,3 @@ test_vm: extends: .pintos_tests variables: DIR: vm - IGNORE: (tests/vm/pt-grow-stack|tests/vm/pt-grow-pusha|tests/vm/pt-big-stk-obj|tests/vm/pt-overflowstk|tests/vm/pt-write-code2|tests/vm/pt-grow-stk-sc|tests/vm/page-linear|tests/vm/page-parallel|tests/vm/page-merge-seq|tests/vm/page-merge-par|tests/vm/page-merge-stk|tests/vm/page-merge-mm|tests/vm/mmap-over-stk) diff --git a/src/userprog/exception.c b/src/userprog/exception.c index db07db9..84de825 100644 --- a/src/userprog/exception.c +++ b/src/userprog/exception.c @@ -289,7 +289,7 @@ fetch_page (void *upage, bool write) bool success = false; switch (page->type) { case PAGE_MMAP: - case PAGE_FILE: + case PAGE_EXECUTABLE: case PAGE_SHARED: success = page_load_file (page); break; diff --git a/src/userprog/process.c b/src/userprog/process.c index ca99aee..9024540 100644 --- a/src/userprog/process.c +++ b/src/userprog/process.c @@ -715,7 +715,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage, /* Add the page metadata to the SPT to be lazy loaded later on */ if (page_insert_file (file, ofs, upage, page_read_bytes, page_zero_bytes, - writable, PAGE_FILE) == NULL) + writable, PAGE_EXECUTABLE) == NULL) return false; /* Advance. */ diff --git a/src/userprog/syscall.c b/src/userprog/syscall.c index 8275d51..20e66fc 100644 --- a/src/userprog/syscall.c +++ b/src/userprog/syscall.c @@ -461,10 +461,9 @@ syscall_mmap (int fd, void *addr) /* Check and ensure that there is enough space in the user virtual memory to hold the entire file. */ for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE) - { if (page_get (thread_current (), addr + ofs) != NULL) return MMAP_FAILURE; - } + /* Map the file data into the user virtual memory starting from addr. */ for (off_t ofs = 0; ofs < file_size; ofs += PGSIZE) @@ -473,7 +472,7 @@ syscall_mmap (int fd, void *addr) off_t zero_bytes = PGSIZE - read_bytes; if (page_insert_file (file, ofs, addr + ofs, read_bytes, zero_bytes, true, - PAGE_FILE) == NULL) + PAGE_MMAP) == NULL) return MMAP_FAILURE; } @@ -482,7 +481,6 @@ syscall_mmap (int fd, void *addr) if (mmap == NULL) return MMAP_FAILURE; - return mmap->mapping; } diff --git a/src/vm/frame.c b/src/vm/frame.c index 4730558..1a6c9a1 100644 --- a/src/vm/frame.c +++ b/src/vm/frame.c @@ -28,9 +28,6 @@ struct list lru_list; victim. Otherwise, the next element in the queue is similarly considered. */ struct list_elem *next_victim = NULL; -/* Synchronisation variables. */ -/* Protects access to 'lru_list'. */ -struct lock lru_lock; struct frame_metadata { @@ -43,8 +40,8 @@ struct frame_metadata within 'frame_table', whose key is the kernel virtual address of the frame. */ struct list_elem list_elem; /* Tracks the position of the frame metadata - in either the 'active' or 'inactive' list, - so a victim can be chosen for eviction. */ + within 'lru_list', so a victim can be + chosen for eviction. */ }; hash_hash_func frame_metadata_hash; @@ -66,7 +63,7 @@ frame_init (void) hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL); list_init (&lru_list); - lock_init (&lru_lock); + lock_init (&ftable_lock); } /* TODO: Consider synchronisation more closely (i.e. just for hash @@ -82,7 +79,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) struct frame_metadata *frame_metadata; flags |= PAL_USER; - lock_acquire (&lru_lock); + lock_acquire (&ftable_lock); void *frame = palloc_get_page (flags); /* If a frame couldn't be allocated we must be out of main memory. Thus, @@ -125,7 +122,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) memset (victim->frame, 0, PGSIZE); /* 3. Indicate that the new frame's metadata will be stored - inside the same structure that stored the victim's metadata. + inside the same structure that stored the victim's metadata.frame.c As both the new frame and the victim frame share the same kernel virtual address, the hash map need not be updated, and neither the list_elem value as both share the same lru_list position. */ @@ -136,7 +133,7 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) we must update the frame table with a new entry, and grow lru_list. */ else { - /* Must own lru_lock here, as otherwise there is a race condition + /* Must own ftable_lock here, as otherwise there is a race condition with next_victim either being NULL or uninitialized. */ frame_metadata = malloc (sizeof (struct frame_metadata)); if (frame_metadata == NULL) @@ -168,30 +165,36 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner) list_push_back (&frame_metadata->owners, &frame_owner->elem); frame_metadata->upage = upage; frame_metadata->pinned = false; - lock_release (&lru_lock); + lock_release (&ftable_lock); return frame_metadata->frame; } void frame_pin (void *frame) { + ASSERT (frame != NULL); + lock_acquire (&ftable_lock); struct frame_metadata *frame_metadata = frame_metadata_get (frame); if (frame_metadata == NULL) PANIC ("Attempted to pin a frame at an unallocated kernel address '%p'\n", frame); frame_metadata->pinned = true; + lock_release (&ftable_lock); } - + void frame_unpin (void *frame) { + ASSERT (frame != NULL); + lock_acquire (&ftable_lock); struct frame_metadata *frame_metadata = frame_metadata_get (frame); if (frame_metadata == NULL) PANIC ("Attempted to unpin a frame at an unallocated kernel address '%p'\n", frame); frame_metadata->pinned = false; + lock_release (&ftable_lock); } /* Attempt to deallocate a frame for a user process by removing it from the @@ -207,7 +210,7 @@ frame_free (void *frame) frame); free_owners (&frame_metadata->owners); - lock_acquire (&lru_lock); + lock_acquire (&ftable_lock); hash_delete (&frame_table, &frame_metadata->hash_elem); list_remove (&frame_metadata->list_elem); @@ -221,7 +224,7 @@ frame_free (void *frame) else next_victim = lru_next (next_victim); } - lock_release (&lru_lock); + lock_release (&ftable_lock); free (frame_metadata); palloc_free_page (frame); @@ -284,9 +287,8 @@ frame_metadata_find (void *frame) return hash_entry (e, struct frame_metadata, hash_elem); } -/* TODO: Account for page aliases when checking accessed bit. */ /* A pre-condition for calling this function is that the calling thread - owns lru_lock and that lru_list is non-empty. */ + owns ftable_lock and that lru_list is non-empty. */ static struct frame_metadata * get_victim (void) { @@ -372,7 +374,6 @@ frame_metadata_get (void *frame) struct hash_elem *e = hash_find (&frame_table, &key_metadata.hash_elem); if (e == NULL) return NULL; - return hash_entry (e, struct frame_metadata, hash_elem); } diff --git a/src/vm/frame.h b/src/vm/frame.h index 76a801a..62a1ec6 100644 --- a/src/vm/frame.h +++ b/src/vm/frame.h @@ -10,7 +10,12 @@ struct frame_owner struct list_elem elem; /* List element for the list of owners. */ }; +/* Synchronisation variables. */ +/* Protects access to the frame table and its related components. */ +struct lock ftable_lock; + void frame_init (void); + void *frame_alloc (enum palloc_flags, void *, struct thread *); void frame_pin (void *frame); void frame_unpin (void *frame); diff --git a/src/vm/page.c b/src/vm/page.c index fb94fa0..8228687 100644 --- a/src/vm/page.c +++ b/src/vm/page.c @@ -183,6 +183,7 @@ page_load_file (struct page_entry *page) panics as this should not happen if eviction is working correctly. */ struct thread *t = thread_current (); bool shareable = !page->writable && file_compare (page->file, t->exec_file); + shareable = false; if (shareable) { lock_acquire (&shared_file_pages_lock); @@ -198,11 +199,7 @@ page_load_file (struct page_entry *page) lock_release (&shared_file_pages_lock); return false; } - /* First time adding the shared page, so add thread as owner. */ - if (page->type != PAGE_SHARED) - { - frame_owner_insert (sfp->frame, t); - } + frame_owner_insert (sfp->frame, t); } /* Shared page is in swap. Load it. */ else diff --git a/src/vm/page.h b/src/vm/page.h index 7b45d9d..994da72 100644 --- a/src/vm/page.h +++ b/src/vm/page.h @@ -7,9 +7,8 @@ enum page_type { - PAGE_FILE, + PAGE_EXECUTABLE, PAGE_MMAP, - PAGE_EMPTY, PAGE_SHARED }; @@ -35,13 +34,18 @@ struct page_entry struct shared_file_page { - struct file *file; - void *upage; - void *frame; - size_t swap_slot; - int ref_count; + struct file *file; /* The shared file page's source file, used for indexing + the table. */ + void *upage; /* The shared page's upage which is the same across all process + using it. Used for indexing the table. */ + void *frame; /* Set to the frame address of the page when it is in memory. + Set to NULL when the page is in swap. */ + size_t swap_slot; /* Set to the swap_slot of the shared paged if it is + currently in swap. Should not be used when frame is not + NULL.*/ + int ref_count; /* Number of processes that are using this shared page. */ - struct hash_elem elem; + struct hash_elem elem; /* AN elem for the hash table. */ }; bool init_pages (struct hash *pages);