fix: only use lazy loading if VM flag is enabled

This commit is contained in:
Themis Demetriades
2024-12-04 21:33:21 +00:00
parent 1e236a5c47
commit 723055f485
3 changed files with 69 additions and 4 deletions

View File

@@ -259,14 +259,19 @@ thread_create (const char *name, int priority,
return TID_ERROR; return TID_ERROR;
} }
#define USERPROG
#ifdef USERPROG #ifdef USERPROG
/* Initialize the thread's file descriptor table. */ /* Initialize the thread's file descriptor table. */
t->fd_counter = MINIMUM_USER_FD; t->fd_counter = MINIMUM_USER_FD;
if (!hash_init (&t->open_files, fd_hash, fd_less, NULL) bool success = hash_init (&t->open_files, fd_hash, fd_less, NULL);
|| !hash_init (&t->child_results, process_result_hash, success = success && hash_init (&t->child_results, process_result_hash,
process_result_less, t) process_result_less, t);
|| !hash_init (&t->pages, page_hash, page_less, NULL)) #ifdef VM
success = success && hash_init (&t->pages, page_hash, page_less, NULL);
#endif
if (!success)
{ {
palloc_free_page (t); palloc_free_page (t);
free (t->result); free (t->result);

View File

@@ -215,6 +215,7 @@ page_fault (struct intr_frame *f)
kill (f); kill (f);
} }
#ifdef VM
bool bool
try_fetch_page (void *upage, bool write) try_fetch_page (void *upage, bool write)
{ {
@@ -244,3 +245,5 @@ try_fetch_page (void *upage, bool write)
return success; return success;
} }
#endif

View File

@@ -365,7 +365,9 @@ process_exit (void)
/* Clean up all open files */ /* Clean up all open files */
hash_destroy (&cur->open_files, fd_cleanup); hash_destroy (&cur->open_files, fd_cleanup);
#ifdef VM
hash_destroy (&cur->pages, page_cleanup); hash_destroy (&cur->pages, page_cleanup);
#endif
/* Close the executable file, implicitly allowing it to be written to. */ /* Close the executable file, implicitly allowing it to be written to. */
if (cur->exec_file != NULL) if (cur->exec_file != NULL)
@@ -623,6 +625,9 @@ load (const char *file_name, void (**eip) (void), void **esp)
done: done:
/* We arrive here whether the load is successful or not. */ /* We arrive here whether the load is successful or not. */
#ifndef VM
file_close (file);
#endif
lock_release (&filesys_lock); lock_release (&filesys_lock);
return success; return success;
} }
@@ -696,6 +701,7 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
ASSERT (pg_ofs (upage) == 0); ASSERT (pg_ofs (upage) == 0);
ASSERT (ofs % PGSIZE == 0); ASSERT (ofs % PGSIZE == 0);
#ifdef VM
while (read_bytes > 0 || zero_bytes > 0) while (read_bytes > 0 || zero_bytes > 0)
{ {
/* Calculate how to fill this page. /* Calculate how to fill this page.
@@ -716,6 +722,57 @@ load_segment (struct file *file, off_t ofs, uint8_t *upage,
upage += PGSIZE; upage += PGSIZE;
} }
return true; return true;
#else
file_seek (file, ofs);
while (read_bytes > 0 || zero_bytes > 0)
{
/* Calculate how to fill this page.
We will read PAGE_READ_BYTES bytes from FILE
and zero the final PAGE_ZERO_BYTES bytes. */
size_t page_read_bytes = read_bytes < PGSIZE ? read_bytes : PGSIZE;
size_t page_zero_bytes = PGSIZE - page_read_bytes;
/* Check if virtual page already allocated */
struct thread *t = thread_current ();
uint8_t *kpage = pagedir_get_page (t->pagedir, upage);
if (kpage == NULL){
/* Get a new page of memory. */
kpage = get_usr_kpage (0, upage);
if (kpage == NULL){
return false;
}
/* Add the page to the process's address space. */
if (!install_page (upage, kpage, writable))
{
free_usr_kpage (kpage);
return false;
}
} else {
/* Check if writable flag for the page should be updated */
if(writable && !pagedir_is_writable(t->pagedir, upage)){
pagedir_set_writable(t->pagedir, upage, writable);
}
}
/* Load data into the page. */
if (file_read (file, kpage, page_read_bytes) != (int) page_read_bytes){
return false;
}
memset (kpage + page_read_bytes, 0, page_zero_bytes);
/* Advance. */
read_bytes -= page_read_bytes;
zero_bytes -= page_zero_bytes;
upage += PGSIZE;
}
return true;
#endif
} }
/* Create a minimal stack by mapping a zeroed page at the top of /* Create a minimal stack by mapping a zeroed page at the top of