Merge remote-tracking branch 'origin/vm/page-swap-synch' into vm/virtual-memory/saleh
# Conflicts: # .gitlab-ci.yml # src/Makefile.build # src/threads/thread.c # src/userprog/exception.c # src/userprog/process.c # src/vm/frame.c # src/vm/page.c # src/vm/page.h # src/vm/stackgrowth.c # src/vm/stackgrowth.h
This commit is contained in:
@@ -66,6 +66,7 @@ vm_SRC += vm/frame.c # Frame table manager.
|
||||
vm_SRC += vm/page.c # Page table manager.
|
||||
vm_SRC += vm/mmap.c # Memory-mapped files.
|
||||
vm_SRC += devices/swap.c # Swap block manager.
|
||||
#vm_SRC = vm/file.c # Some other file.
|
||||
|
||||
# Filesystem code.
|
||||
filesys_SRC = filesys/filesys.c # Filesystem core.
|
||||
|
||||
@@ -149,6 +149,10 @@ struct thread
|
||||
struct hash open_files; /* Hash Table of FD -> Struct File. */
|
||||
#endif
|
||||
|
||||
#ifdef VM
|
||||
struct hash pages; /* Table of open user pages. */
|
||||
#endif
|
||||
|
||||
void *curr_esp;
|
||||
|
||||
/* Owned by thread.c. */
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# -*- makefile -*-
|
||||
|
||||
kernel.bin: DEFINES = -DUSERPROG -DFILESYS -DVM
|
||||
KERNEL_SUBDIRS = threads devices lib lib/kernel userprog filesys vm
|
||||
kernel.bin: DEFINES = -DUSERPROG -DFILESYS
|
||||
KERNEL_SUBDIRS = threads devices lib lib/kernel userprog filesys
|
||||
TEST_SUBDIRS = tests/userprog tests/userprog/no-vm tests/filesys/base
|
||||
GRADING_FILE = $(SRCDIR)/tests/userprog/Grading
|
||||
SIMULATOR = --qemu
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include "userprog/exception.h"
|
||||
#include <inttypes.h>
|
||||
#include <stdio.h>
|
||||
#include "stdbool.h"
|
||||
#include "userprog/gdt.h"
|
||||
#include "userprog/pagedir.h"
|
||||
#include "userprog/process.h"
|
||||
@@ -8,11 +9,13 @@
|
||||
#include "threads/palloc.h"
|
||||
#include "threads/thread.h"
|
||||
#include "threads/vaddr.h"
|
||||
#include "vm/frame.h"
|
||||
#include "vm/page.h"
|
||||
#include "devices/swap.h"
|
||||
#include "userprog/pagedir.h"
|
||||
|
||||
#define MAX_STACK_SIZE (8 * 1024 * 1024) // 8MB
|
||||
#define MAX_STACK_OFFSET 32 // 32 bytes offset below stack pointer (ESP)
|
||||
|
||||
/* Number of page faults processed. */
|
||||
static long long page_fault_cnt;
|
||||
|
||||
@@ -164,9 +167,24 @@ page_fault (struct intr_frame *f)
|
||||
be just that the stack needs to grow or that it needs to be lazily loaded.
|
||||
So we attempt to grow the stack. If this does not work, we check our SPT to
|
||||
see if the page is expected to have data loaded in memory. */
|
||||
struct thread *t = thread_current ();
|
||||
void *upage = pg_round_down (fault_addr);
|
||||
if (not_present && is_user_vaddr (upage) && upage != NULL)
|
||||
{
|
||||
/* Check if the non-present user page is in the swap partition.
|
||||
If so, swap it back into main memory, updating the PTE for
|
||||
the faulted virtual address to point to the newly allocated
|
||||
frame. */
|
||||
if (page_in_swap (t, fault_addr))
|
||||
{
|
||||
size_t swap_slot = page_get_swap (t, fault_addr);
|
||||
void *kpage = frame_alloc (0, upage, t);
|
||||
swap_in (kpage, swap_slot);
|
||||
|
||||
bool writeable = pagedir_is_writable (t->pagedir, upage);
|
||||
if (pagedir_set_page (t->pagedir, upage, kpage, writeable)) return;
|
||||
}
|
||||
|
||||
if (is_valid_stack_access (fault_addr, esp))
|
||||
if (grow_stack (upage))
|
||||
return;
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
#include "threads/palloc.h"
|
||||
|
||||
static uint32_t *active_pd (void);
|
||||
static void invalidate_pagedir (uint32_t *);
|
||||
|
||||
/* Creates a new page directory that has mappings for kernel
|
||||
virtual addresses, but none for user virtual addresses.
|
||||
@@ -53,7 +52,7 @@ pagedir_destroy (uint32_t *pd)
|
||||
on CREATE. If CREATE is true, then a new page table is
|
||||
created and a pointer into it is returned. Otherwise, a null
|
||||
pointer is returned. */
|
||||
static uint32_t *
|
||||
uint32_t *
|
||||
lookup_page (uint32_t *pd, const void *vaddr, bool create)
|
||||
{
|
||||
uint32_t *pt, *pde;
|
||||
@@ -278,7 +277,7 @@ active_pd (void)
|
||||
This function invalidates the TLB if PD is the active page
|
||||
directory. (If PD is not active then its entries are not in
|
||||
the TLB, so there is no need to invalidate anything.) */
|
||||
static void
|
||||
void
|
||||
invalidate_pagedir (uint32_t *pd)
|
||||
{
|
||||
if (active_pd () == pd)
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
uint32_t *pagedir_create (void);
|
||||
void pagedir_destroy (uint32_t *pd);
|
||||
uint32_t *lookup_page (uint32_t *pd, const void *vaddr, bool create);
|
||||
bool pagedir_set_page (uint32_t *pd, void *upage, void *kpage, bool rw);
|
||||
void *pagedir_get_page (uint32_t *pd, const void *upage);
|
||||
void pagedir_clear_page (uint32_t *pd, void *upage);
|
||||
@@ -16,5 +17,6 @@ void pagedir_set_accessed (uint32_t *pd, const void *upage, bool accessed);
|
||||
bool pagedir_is_writable (uint32_t *pd, const void *upage);
|
||||
void pagedir_set_writable (uint32_t *pd, const void *upage, bool writable);
|
||||
void pagedir_activate (uint32_t *pd);
|
||||
void invalidate_pagedir (uint32_t *pd);
|
||||
|
||||
#endif /* userprog/pagedir.h */
|
||||
|
||||
@@ -369,6 +369,8 @@ process_exit (void)
|
||||
|
||||
/* Clean up all open files */
|
||||
hash_destroy (&cur->open_files, fd_cleanup);
|
||||
|
||||
/* Clean up the thread's supplemental page table. */
|
||||
hash_destroy (&cur->pages, page_cleanup);
|
||||
|
||||
/* Close the executable file, implicitly allowing it to be written to. */
|
||||
@@ -627,6 +629,9 @@ load (const char *file_name, void (**eip) (void), void **esp)
|
||||
|
||||
done:
|
||||
/* We arrive here whether the load is successful or not. */
|
||||
#ifndef VM
|
||||
file_close (file);
|
||||
#endif
|
||||
lock_release (&filesys_lock);
|
||||
return success;
|
||||
}
|
||||
@@ -758,6 +763,7 @@ get_usr_kpage (enum palloc_flags flags, void *upage)
|
||||
return NULL;
|
||||
else
|
||||
page = frame_alloc (flags, upage, t);
|
||||
pagedir_set_accessed (t->pagedir, upage, true);
|
||||
#else
|
||||
page = palloc_get_page (flags | PAL_USER);
|
||||
#endif
|
||||
|
||||
@@ -616,4 +616,4 @@ put_user (uint8_t *udst, uint8_t byte)
|
||||
: "=&a"(error_code), "=m"(*udst)
|
||||
: "q"(byte));
|
||||
return error_code != -1;
|
||||
}
|
||||
}
|
||||
|
||||
181
src/vm/frame.c
181
src/vm/frame.c
@@ -7,6 +7,7 @@
|
||||
#include "page.h"
|
||||
#include "threads/malloc.h"
|
||||
#include "threads/vaddr.h"
|
||||
#include "userprog/pagedir.h"
|
||||
#include "threads/synch.h"
|
||||
#include "devices/swap.h"
|
||||
|
||||
@@ -14,20 +15,22 @@
|
||||
to its corresponding 'frame_metadata'.*/
|
||||
struct hash frame_table;
|
||||
|
||||
/* Linked list of frame_metadata whose pages are predicted to currently
|
||||
be in the working set of a process. They are not considered for
|
||||
eviction, but are considered for demotion to the 'inactive' list. */
|
||||
struct list active_list;
|
||||
/* Linked list used to represent the circular queue in the 'clock'
|
||||
algorithm for page eviction. Iterating from the element that is
|
||||
currently pointed at by 'next_victim' yields an ordering of the entries
|
||||
from oldest to newest (in terms of when they were added or checked
|
||||
for having been referenced by a process). */
|
||||
struct list lru_list;
|
||||
|
||||
/* Linked list of frame_metadata whose pages are predicted to leave the
|
||||
working set of their processes soon, so are considered for eviction.
|
||||
Pages are considered for eviction from the tail end, and are initially
|
||||
demoted to 'inactive' at the head. */
|
||||
struct list inactive_list;
|
||||
/* The next element in lru_list to be considered for eviction (oldest added
|
||||
or referenced page in the circular queue). If this page has has an
|
||||
'accessed' bit of 0 when considering eviction, then it will be the next
|
||||
victim. Otherwise, the next element in the queue is similarly considered. */
|
||||
struct list_elem *next_victim = NULL;
|
||||
|
||||
/* Synchronisation variables. */
|
||||
/* Protects access to the 'inactive' list. */
|
||||
struct lock inactive_lock;
|
||||
/* Protects access to 'lru_list'. */
|
||||
struct lock lru_lock;
|
||||
|
||||
struct frame_metadata
|
||||
{
|
||||
@@ -45,22 +48,24 @@ struct frame_metadata
|
||||
hash_hash_func frame_metadata_hash;
|
||||
hash_less_func frame_metadata_less;
|
||||
|
||||
static struct list_elem *lru_next (struct list_elem *e);
|
||||
static struct list_elem *lru_prev (struct list_elem *e);
|
||||
static struct frame_metadata *get_victim (void);
|
||||
|
||||
/* Initialize the frame system by initializing the frame (hash) table with
|
||||
the frame_metadata hashing and comparison functions, as well as initializing
|
||||
the active & inactive lists. Also initializes the system's synchronisation
|
||||
primitives. */
|
||||
'lru_list' and its associated synchronisation primitives. */
|
||||
void
|
||||
frame_init (void)
|
||||
{
|
||||
hash_init (&frame_table, frame_metadata_hash, frame_metadata_less, NULL);
|
||||
list_init (&active_list);
|
||||
list_init (&inactive_list);
|
||||
|
||||
lock_init (&inactive_lock);
|
||||
list_init (&lru_list);
|
||||
lock_init (&lru_lock);
|
||||
}
|
||||
|
||||
/* TODO: Consider synchronisation more closely (i.e. just for hash
|
||||
table). */
|
||||
/* Attempt to allocate a frame for a user process, either by direct
|
||||
allocation of a user page if there is sufficient RAM, or by
|
||||
evicting a currently active page if memory allocated for user
|
||||
@@ -69,7 +74,10 @@ frame_init (void)
|
||||
void *
|
||||
frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||
{
|
||||
struct frame_metadata *frame_metadata;
|
||||
flags |= PAL_USER;
|
||||
|
||||
lock_acquire (&lru_lock);
|
||||
void *frame = palloc_get_page (flags);
|
||||
|
||||
/* If a frame couldn't be allocated we must be out of main memory. Thus,
|
||||
@@ -77,11 +85,20 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||
into disk. */
|
||||
if (frame == NULL)
|
||||
{
|
||||
/* TODO: Deal with race condition wherein a page may be evicted in one
|
||||
thread while it's in the middle of being evicted in another. */
|
||||
/* 1. Obtain victim. */
|
||||
if (next_victim == NULL)
|
||||
PANIC ("Couldn't allocate a single page to main memory!\n");
|
||||
|
||||
struct frame_metadata *victim = get_victim ();
|
||||
if (victim == NULL)
|
||||
return NULL;
|
||||
ASSERT (victim != NULL); /* get_victim () should never return null. */
|
||||
|
||||
/* 2. Swap out victim into disk. */
|
||||
/* Mark page as 'not present' and flag the page directory as having
|
||||
been modified *before* eviction begins to prevent the owner of the
|
||||
victim page from accessing/modifying it mid-eviction. */
|
||||
pagedir_clear_page (victim->owner->pagedir, victim->upage);
|
||||
|
||||
// TODO: Lock PTE of victim page for victim process.
|
||||
|
||||
size_t swap_slot = swap_out (victim->frame);
|
||||
page_set_swap (victim->owner, victim->upage, swap_slot);
|
||||
@@ -90,30 +107,50 @@ frame_alloc (enum palloc_flags flags, void *upage, struct thread *owner)
|
||||
if (flags & PAL_ZERO)
|
||||
memset (victim->frame, 0, PGSIZE);
|
||||
|
||||
frame = victim->frame;
|
||||
/* 3. Indicate that the new frame's metadata will be stored
|
||||
inside the same structure that stored the victim's metadata.
|
||||
As both the new frame and the victim frame share the same kernel
|
||||
virtual address, the hash map need not be updated, and neither
|
||||
the list_elem value as both share the same lru_list position. */
|
||||
frame_metadata = victim;
|
||||
}
|
||||
|
||||
/* If sufficient main memory allows the frame to be directly allocated,
|
||||
we must update the frame table with a new entry, and grow lru_list. */
|
||||
else
|
||||
{
|
||||
/* Must own lru_lock here, as otherwise there is a race condition
|
||||
with next_victim either being NULL or uninitialized. */
|
||||
frame_metadata = malloc (sizeof (struct frame_metadata));
|
||||
frame_metadata->frame = frame;
|
||||
|
||||
/* Newly allocated frames are pushed to the back of the circular queue
|
||||
represented by lru_list. Must explicitly handle the case where the
|
||||
circular queue is empty (when next_victim == NULL). */
|
||||
if (next_victim == NULL)
|
||||
{
|
||||
list_push_back (&lru_list, &frame_metadata->list_elem);
|
||||
next_victim = &frame_metadata->list_elem;
|
||||
}
|
||||
else
|
||||
{
|
||||
struct list_elem *lru_tail = lru_prev (next_victim);
|
||||
list_insert (lru_tail, &frame_metadata->list_elem);
|
||||
}
|
||||
|
||||
hash_insert (&frame_table, &frame_metadata->hash_elem);
|
||||
}
|
||||
|
||||
struct frame_metadata *frame_metadata =
|
||||
malloc (sizeof (struct frame_metadata));
|
||||
frame_metadata->frame = frame;
|
||||
frame_metadata->upage = upage;
|
||||
frame_metadata->owner = owner;
|
||||
lock_release (&lru_lock);
|
||||
|
||||
/* Newly faulted pages begin at the head of the inactive list. */
|
||||
lock_acquire (&inactive_lock);
|
||||
list_push_front (&inactive_list, &frame_metadata->list_elem);
|
||||
lock_release (&inactive_lock);
|
||||
|
||||
/* Finally, insert frame metadata within the frame table, with the key as its
|
||||
allocated kernel address. */
|
||||
hash_replace (&frame_table, &frame_metadata->hash_elem);
|
||||
|
||||
return frame;
|
||||
return frame_metadata->frame;
|
||||
}
|
||||
|
||||
/* Attempt to deallocate a frame for a user process by removing it from the
|
||||
frame table as well as active/inactive list, and freeing the underlying
|
||||
page memory. Panics if the frame isn't active in memory. */
|
||||
frame table as well as lru_list, and freeing the underlying page
|
||||
memory & metadata struct. Panics if the frame isn't active in memory. */
|
||||
void
|
||||
frame_free (void *frame)
|
||||
{
|
||||
@@ -122,33 +159,56 @@ frame_free (void *frame)
|
||||
|
||||
struct hash_elem *e =
|
||||
hash_delete (&frame_table, &key_metadata.hash_elem);
|
||||
if (e == NULL) PANIC ("Attempted to free a frame without a corresponding "
|
||||
"kernel address!\n");
|
||||
if (e == NULL) PANIC ("Attempted to free a frame at kernel address %p, "
|
||||
"but this address is not allocated!\n", frame);
|
||||
|
||||
struct frame_metadata *frame_metadata =
|
||||
hash_entry (e, struct frame_metadata, hash_elem);
|
||||
|
||||
lock_acquire (&lru_lock);
|
||||
list_remove (&frame_metadata->list_elem);
|
||||
|
||||
/* If we're freeing the frame marked as the next victim, update
|
||||
next_victim to either be the next least recently used page, or NULL
|
||||
if no pages are loaded in main memory. */
|
||||
if (&frame_metadata->list_elem == next_victim)
|
||||
{
|
||||
if (list_empty (&lru_list))
|
||||
next_victim = NULL;
|
||||
else
|
||||
next_victim = lru_next (next_victim);
|
||||
}
|
||||
lock_release (&lru_lock);
|
||||
|
||||
free (frame_metadata);
|
||||
palloc_free_page (frame);
|
||||
}
|
||||
|
||||
/* Obtain a pointer to the metadata of the frame we should evict next. */
|
||||
/* TODO: Account for page aliases when checking accessed bit. */
|
||||
/* A pre-condition for calling this function is that the calling thread
|
||||
owns lru_lock and that lru_list is non-empty. */
|
||||
static struct frame_metadata *
|
||||
get_victim (void)
|
||||
{
|
||||
lock_acquire (&inactive_lock);
|
||||
if (list_empty (&inactive_list))
|
||||
struct list_elem *e = next_victim;
|
||||
struct frame_metadata *frame_metadata;
|
||||
uint32_t *pd;
|
||||
void *upage;
|
||||
for (;;)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
struct list_elem *victim_elem = list_pop_back (&inactive_list);
|
||||
lock_release (&inactive_lock);
|
||||
|
||||
return list_entry (victim_elem, struct frame_metadata, list_elem);
|
||||
frame_metadata = list_entry (e, struct frame_metadata, list_elem);
|
||||
pd = frame_metadata->owner->pagedir;
|
||||
upage = frame_metadata->upage;
|
||||
e = lru_next (e);
|
||||
|
||||
if (!pagedir_is_accessed (pd, upage))
|
||||
break;
|
||||
|
||||
pagedir_set_accessed (pd, upage, false);
|
||||
}
|
||||
|
||||
next_victim = e;
|
||||
return frame_metadata;
|
||||
}
|
||||
|
||||
/* Hash function for frame metadata, used for storing entries in the
|
||||
@@ -177,3 +237,26 @@ frame_metadata_less (const struct hash_elem *a_, const struct hash_elem *b_,
|
||||
return a->frame < b->frame;
|
||||
}
|
||||
|
||||
/* Returns the next recently used element after the one provided, which
|
||||
is achieved by iterating through lru_list like a circular queue
|
||||
(wrapping around the list at the tail). */
|
||||
static struct list_elem *
|
||||
lru_next (struct list_elem *e)
|
||||
{
|
||||
if (!list_empty (&lru_list) && e == list_back (&lru_list))
|
||||
return list_front (&lru_list);
|
||||
|
||||
return list_next (e);
|
||||
}
|
||||
|
||||
/* Returns the previous recently used element after the one provided, which
|
||||
is achieved by iterating through lru_list like a circular queue
|
||||
(wrapping around the list at the head). */
|
||||
static struct list_elem *
|
||||
lru_prev (struct list_elem *e)
|
||||
{
|
||||
if (!list_empty (&lru_list) && e == list_front (&lru_list))
|
||||
return list_back (&lru_list);
|
||||
|
||||
return list_prev (e);
|
||||
}
|
||||
|
||||
@@ -2,18 +2,23 @@
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include "filesys/file.h"
|
||||
#include "threads/pte.h"
|
||||
#include "threads/malloc.h"
|
||||
#include "threads/palloc.h"
|
||||
#include "userprog/process.h"
|
||||
#include "userprog/pagedir.h"
|
||||
#include "vm/frame.h"
|
||||
|
||||
#define SWAP_FLAG_BIT 9
|
||||
#define ADDR_START_BIT 12
|
||||
|
||||
/* Hashing function needed for the SPT table. Returns a hash for an entry,
|
||||
based on its upage. */
|
||||
unsigned
|
||||
page_hash (const struct hash_elem *e, UNUSED void *aux)
|
||||
{
|
||||
struct page_entry *page = hash_entry (e, struct page_entry, elem);
|
||||
return hash_ptr(page->upage);
|
||||
return hash_ptr (page->upage);
|
||||
}
|
||||
|
||||
/* Comparator function for the SPT table. Compares two entries based on their
|
||||
@@ -71,7 +76,9 @@ page_load (struct page_entry *page, bool writable)
|
||||
/* Allocate a frame for the page. If a frame allocation fails, then
|
||||
frame_alloc should try to evict a page. If it is still NULL, the OS
|
||||
panics as this should not happen if eviction is working correctly. */
|
||||
void *frame = frame_alloc (PAL_USER, page->upage, thread_current ());
|
||||
struct thread *t = thread_current ();
|
||||
void *frame = frame_alloc (PAL_USER, page->upage, t);
|
||||
pagedir_set_accessed (t->pagedir, page->upage, true);
|
||||
if (frame == NULL)
|
||||
PANIC ("Could not allocate a frame to load page into memory.");
|
||||
|
||||
@@ -106,3 +113,45 @@ page_cleanup (struct hash_elem *e, void *aux UNUSED)
|
||||
{
|
||||
free (hash_entry (e, struct page_entry, elem));
|
||||
}
|
||||
|
||||
/* Updates the 'owner' thread's page table entry for virtual address 'upage'
|
||||
to flag the page as being stored in swap, and stores the specified swap slot
|
||||
value in the entry at the address bits for later retrieval from disk. */
|
||||
void
|
||||
page_set_swap (struct thread *owner, void *upage, size_t swap_slot)
|
||||
{
|
||||
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
|
||||
|
||||
/* Store the provided swap slot in the address bits of the page table
|
||||
entry, truncating excess bits. */
|
||||
*pte |= (1 << SWAP_FLAG_BIT);
|
||||
uint32_t swap_slot_bits = (swap_slot << ADDR_START_BIT) & PTE_ADDR;
|
||||
*pte = (*pte & PTE_FLAGS) | swap_slot_bits;
|
||||
|
||||
invalidate_pagedir (owner->pagedir);
|
||||
}
|
||||
|
||||
/* Returns true iff the page with user address 'upage' owned by 'owner'
|
||||
is flagged to be in the swap disk via the owner's page table. */
|
||||
bool
|
||||
page_in_swap (struct thread *owner, void *upage)
|
||||
{
|
||||
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
|
||||
return pte != NULL &&
|
||||
(*pte & (1 << SWAP_FLAG_BIT)) != 0;
|
||||
}
|
||||
|
||||
/* Given that the page with user address 'upage' owned by 'owner' is flagged
|
||||
to be in the swap disk via the owner's page table, returns its stored
|
||||
swap slot. Otherwise panics the kernel. */
|
||||
size_t
|
||||
page_get_swap (struct thread *owner, void *upage)
|
||||
{
|
||||
uint32_t *pte = lookup_page (owner->pagedir, upage, false);
|
||||
|
||||
ASSERT (pte != NULL);
|
||||
ASSERT ((*pte & PTE_P) == 0);
|
||||
|
||||
/* Masks the address bits and returns truncated value. */
|
||||
return ((*pte & PTE_ADDR) >> ADDR_START_BIT);
|
||||
}
|
||||
|
||||
@@ -32,7 +32,9 @@ struct page_entry *page_insert (struct file *file, off_t ofs, void *upage,
|
||||
struct page_entry *page_get (void *upage);
|
||||
bool page_load (struct page_entry *page, bool writable);
|
||||
void page_cleanup (struct hash_elem *e, void *aux);
|
||||
|
||||
void page_set_swap (struct thread *, void *, size_t);
|
||||
bool page_in_swap (struct thread *, void *);
|
||||
size_t page_get_swap (struct thread *, void *);
|
||||
|
||||
#endif /* vm/frame.h */
|
||||
|
||||
Reference in New Issue
Block a user