provided code

This commit is contained in:
LabTS
2024-10-01 23:37:39 +01:00
commit 8724a2641e
697 changed files with 74252 additions and 0 deletions

3
src/threads/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
build
bochsrc.txt
bochsout.txt

7
src/threads/Make.vars Normal file
View File

@@ -0,0 +1,7 @@
# -*- makefile -*-
kernel.bin: DEFINES = -DTHREADS
KERNEL_SUBDIRS = threads devices lib lib/kernel $(TEST_SUBDIRS)
TEST_SUBDIRS = tests/threads tests/devices
GRADING_FILE = $(SRCDIR)/tests/threads/Grading
SIMULATOR = --qemu

1
src/threads/Makefile Normal file
View File

@@ -0,0 +1 @@
include ../Makefile.kernel

8
src/threads/flags.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef THREADS_FLAGS_H
#define THREADS_FLAGS_H
/* EFLAGS Register. */
#define FLAG_MBS 0x00000002 /* Must be set. */
#define FLAG_IF 0x00000200 /* Interrupt Flag. */
#endif /* threads/flags.h */

437
src/threads/init.c Normal file
View File

@@ -0,0 +1,437 @@
#include "threads/init.h"
#include <console.h>
#include <debug.h>
#include <inttypes.h>
#include <limits.h>
#include <random.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "devices/kbd.h"
#include "devices/input.h"
#include "devices/serial.h"
#include "devices/shutdown.h"
#include "devices/timer.h"
#include "devices/vga.h"
#include "devices/rtc.h"
#include "threads/interrupt.h"
#include "threads/io.h"
#include "threads/loader.h"
#include "threads/malloc.h"
#include "threads/palloc.h"
#include "threads/pte.h"
#include "threads/thread.h"
#ifdef USERPROG
#include "userprog/process.h"
#include "userprog/exception.h"
#include "userprog/gdt.h"
#include "userprog/syscall.h"
#include "userprog/tss.h"
#else
#include "tests/threads/tests.h"
#endif
#ifdef VM
#include "devices/swap.h"
#endif
#ifdef FILESYS
#include "devices/block.h"
#include "devices/ide.h"
#include "filesys/filesys.h"
#include "filesys/fsutil.h"
#endif
/* Page directory with kernel mappings only. */
uint32_t *init_page_dir;
#ifdef FILESYS
/* -f: Format the file system? */
static bool format_filesys;
/* -filesys, -scratch, -swap: Names of block devices to use,
overriding the defaults. */
static const char *filesys_bdev_name;
static const char *scratch_bdev_name;
#ifdef VM
static const char *swap_bdev_name;
#endif
#endif /* FILESYS */
/* -ul: Maximum number of pages to put into palloc's user pool. */
static size_t user_page_limit = SIZE_MAX;
static void bss_init (void);
static void paging_init (void);
static char **read_command_line (void);
static char **parse_options (char **argv);
static void run_actions (char **argv);
static void usage (void);
#ifdef FILESYS
static void locate_block_devices (void);
static void locate_block_device (enum block_type, const char *name);
#endif
int main (void) NO_RETURN;
/* PintOS main program. */
int
main (void)
{
char **argv;
/* Clear BSS. */
bss_init ();
/* Break command line into arguments and parse options. */
argv = read_command_line ();
argv = parse_options (argv);
/* Initialize ourselves as a thread so we can use locks,
then enable console locking. */
thread_init ();
console_init ();
/* Greet user. */
printf ("PintOS booting with %'"PRIu32" kB RAM...\n",
init_ram_pages * PGSIZE / 1024);
/* Initialize memory system. */
palloc_init (user_page_limit);
malloc_init ();
paging_init ();
/* Segmentation. */
#ifdef USERPROG
tss_init ();
gdt_init ();
#endif
/* Initialize interrupt handlers. */
intr_init ();
timer_init ();
kbd_init ();
input_init ();
#ifdef USERPROG
exception_init ();
syscall_init ();
#endif
/* Start thread scheduler and enable interrupts. */
thread_start ();
serial_init_queue ();
timer_calibrate ();
#ifdef FILESYS
/* Initialize file system. */
ide_init ();
locate_block_devices ();
filesys_init (format_filesys);
#endif
#ifdef VM
/* Initialise the swap disk */
swap_init ();
#endif
printf ("Boot complete.\n");
/* Run actions specified on kernel command line. */
run_actions (argv);
/* Finish up. */
shutdown ();
thread_exit ();
}
/* Clear the "BSS", a segment that should be initialized to
zeros. It isn't actually stored on disk or zeroed by the
kernel loader, so we have to zero it ourselves.
The start and end of the BSS segment is recorded by the
linker as _start_bss and _end_bss. See kernel.lds. */
static void
bss_init (void)
{
extern char _start_bss, _end_bss;
memset (&_start_bss, 0, &_end_bss - &_start_bss);
}
/* Populates the base page directory and page table with the
kernel virtual mapping, and then sets up the CPU to use the
new page directory. Points init_page_dir to the page
directory it creates. */
static void
paging_init (void)
{
uint32_t *pd, *pt;
size_t page;
extern char _start, _end_kernel_text;
pd = init_page_dir = palloc_get_page (PAL_ASSERT | PAL_ZERO);
pt = NULL;
for (page = 0; page < init_ram_pages; page++)
{
uintptr_t paddr = page * PGSIZE;
char *vaddr = ptov (paddr);
size_t pde_idx = pd_no (vaddr);
size_t pte_idx = pt_no (vaddr);
bool in_kernel_text = &_start <= vaddr && vaddr < &_end_kernel_text;
if (pd[pde_idx] == 0)
{
pt = palloc_get_page (PAL_ASSERT | PAL_ZERO);
pd[pde_idx] = pde_create (pt);
}
pt[pte_idx] = pte_create_kernel (vaddr, !in_kernel_text);
}
/* Store the physical address of the page directory into CR3
aka PDBR (page directory base register). This activates our
new page tables immediately. See [IA32-v2a] "MOV--Move
to/from Control Registers" and [IA32-v3a] 3.7.5 "Base Address
of the Page Directory". */
asm volatile ("movl %0, %%cr3" : : "r" (vtop (init_page_dir)));
}
/* Breaks the kernel command line into words and returns them as
an argv-like array. */
static char **
read_command_line (void)
{
static char *argv[LOADER_ARGS_LEN / 2 + 1];
char *p, *end;
int argc;
int i;
argc = *(uint32_t *) ptov (LOADER_ARG_CNT);
p = ptov (LOADER_ARGS);
end = p + LOADER_ARGS_LEN;
for (i = 0; i < argc; i++)
{
if (p >= end)
PANIC ("command line arguments overflow");
argv[i] = p;
p += strnlen (p, end - p) + 1;
}
argv[argc] = NULL;
/* Print kernel command line. */
printf ("Kernel command line:");
for (i = 0; i < argc; i++)
if (strchr (argv[i], ' ') == NULL)
printf (" %s", argv[i]);
else
printf (" '%s'", argv[i]);
printf ("\n");
return argv;
}
/* Parses options in ARGV[]
and returns the first non-option argument. */
static char **
parse_options (char **argv)
{
for (; *argv != NULL && **argv == '-'; argv++)
{
char *save_ptr;
char *name = strtok_r (*argv, "=", &save_ptr);
char *value = strtok_r (NULL, "", &save_ptr);
if (!strcmp (name, "-h"))
usage ();
else if (!strcmp (name, "-q"))
shutdown_configure (SHUTDOWN_POWER_OFF);
else if (!strcmp (name, "-r"))
shutdown_configure (SHUTDOWN_REBOOT);
#ifdef FILESYS
else if (!strcmp (name, "-f"))
format_filesys = true;
else if (!strcmp (name, "-filesys"))
filesys_bdev_name = value;
else if (!strcmp (name, "-scratch"))
scratch_bdev_name = value;
#ifdef VM
else if (!strcmp (name, "-swap"))
swap_bdev_name = value;
#endif
#endif
else if (!strcmp (name, "-rs"))
random_init (atoi (value));
else if (!strcmp (name, "-mlfqs"))
thread_mlfqs = true;
#ifdef USERPROG
else if (!strcmp (name, "-ul"))
user_page_limit = atoi (value);
#endif
else
PANIC ("unknown option `%s' (use -h for help)", name);
}
/* Initialize the random number generator based on the system
time. This has no effect if an "-rs" option was specified.
When running under Bochs, this is not enough by itself to
get a good seed value, because the pintos script sets the
initial time to a predictable value, not to the local time,
for reproducibility. To fix this, give the "-r" option to
the pintos script to request real-time execution. */
random_init (rtc_get_time ());
return argv;
}
/* Runs the task specified in ARGV[1]. */
static void
run_task (char **argv)
{
const char *task = argv[1];
printf ("Executing '%s':\n", task);
#ifdef USERPROG
process_wait (process_execute (task));
#else
run_test (task);
#endif
printf ("Execution of '%s' complete.\n", task);
}
/* Executes all of the actions specified in ARGV[]
up to the null pointer sentinel. */
static void
run_actions (char **argv)
{
/* An action. */
struct action
{
char *name; /* Action name. */
int argc; /* # of args, including action name. */
void (*function) (char **argv); /* Function to execute action. */
};
/* Table of supported actions. */
static const struct action actions[] =
{
{"run", 2, run_task},
#ifdef FILESYS
{"ls", 1, fsutil_ls},
{"cat", 2, fsutil_cat},
{"rm", 2, fsutil_rm},
{"extract", 1, fsutil_extract},
{"append", 2, fsutil_append},
#endif
{NULL, 0, NULL},
};
while (*argv != NULL)
{
const struct action *a;
int i;
/* Find action name. */
for (a = actions; ; a++)
if (a->name == NULL)
PANIC ("unknown action `%s' (use -h for help)", *argv);
else if (!strcmp (*argv, a->name))
break;
/* Check for required arguments. */
for (i = 1; i < a->argc; i++)
if (argv[i] == NULL)
PANIC ("action `%s' requires %d argument(s)", *argv, a->argc - 1);
/* Invoke action and advance. */
a->function (argv);
argv += a->argc;
}
}
/* Prints a kernel command line help message and powers off the
machine. */
static void
usage (void)
{
printf ("\nCommand line syntax: [OPTION...] [ACTION...]\n"
"Options must precede actions.\n"
"Actions are executed in the order specified.\n"
"\nAvailable actions:\n"
#ifdef USERPROG
" run 'PROG [ARG...]' Run PROG and wait for it to complete.\n"
#else
" run TEST Run TEST.\n"
#endif
#ifdef FILESYS
" ls List files in the root directory.\n"
" cat FILE Print FILE to the console.\n"
" rm FILE Delete FILE.\n"
"Use these actions indirectly via `pintos' -g and -p options:\n"
" extract Untar from scratch device into file system.\n"
" append FILE Append FILE to tar file on scratch device.\n"
#endif
"\nOptions:\n"
" -h Print this help message and power off.\n"
" -q Power off VM after actions or on panic.\n"
" -r Reboot after actions.\n"
#ifdef FILESYS
" -f Format file system device during startup.\n"
" -filesys=BDEV Use BDEV for file system instead of default.\n"
" -scratch=BDEV Use BDEV for scratch instead of default.\n"
#ifdef VM
" -swap=BDEV Use BDEV for swap instead of default.\n"
#endif
#endif
" -rs=SEED Set random number seed to SEED.\n"
" -mlfqs Use multi-level feedback queue scheduler.\n"
#ifdef USERPROG
" -ul=COUNT Limit user memory to COUNT pages.\n"
#endif
);
shutdown_power_off ();
}
#ifdef FILESYS
/* Figure out what block devices to cast in the various PintOS roles. */
static void
locate_block_devices (void)
{
locate_block_device (BLOCK_FILESYS, filesys_bdev_name);
locate_block_device (BLOCK_SCRATCH, scratch_bdev_name);
#ifdef VM
locate_block_device (BLOCK_SWAP, swap_bdev_name);
#endif
}
/* Figures out what block device to use for the given ROLE: the
block device with the given NAME, if NAME is non-null,
otherwise the first block device in probe order of type
ROLE. */
static void
locate_block_device (enum block_type role, const char *name)
{
struct block *block = NULL;
if (name != NULL)
{
block = block_get_by_name (name);
if (block == NULL)
PANIC ("No such block device \"%s\"", name);
}
else
{
for (block = block_first (); block != NULL; block = block_next (block))
if (block_type (block) == role)
break;
}
if (block != NULL)
{
printf ("%s: using %s\n", block_type_name (role), block_name (block));
block_set_role (role, block);
}
}
#endif

12
src/threads/init.h Normal file
View File

@@ -0,0 +1,12 @@
#ifndef THREADS_INIT_H
#define THREADS_INIT_H
#include <debug.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
/* Page directory with kernel mappings only. */
extern uint32_t *init_page_dir;
#endif /* threads/init.h */

438
src/threads/interrupt.c Normal file
View File

@@ -0,0 +1,438 @@
#include "threads/interrupt.h"
#include <debug.h>
#include <inttypes.h>
#include <stdint.h>
#include <stdio.h>
#include "threads/flags.h"
#include "threads/intr-stubs.h"
#include "threads/io.h"
#include "threads/thread.h"
#include "threads/vaddr.h"
#include "devices/timer.h"
/* Programmable Interrupt Controller (PIC) registers.
A PC has two PICs, called the master and slave PICs, with the
slave attached ("cascaded") to the master IRQ line 2. */
#define PIC0_CTRL 0x20 /* Master PIC control register address. */
#define PIC0_DATA 0x21 /* Master PIC data register address. */
#define PIC1_CTRL 0xa0 /* Slave PIC control register address. */
#define PIC1_DATA 0xa1 /* Slave PIC data register address. */
/* Number of x86 interrupts. */
#define INTR_CNT 256
/* The Interrupt Descriptor Table (IDT). The format is fixed by
the CPU. See [IA32-v3a] sections 5.10 "Interrupt Descriptor
Table (IDT)", 5.11 "IDT Descriptors", 5.12.1.2 "Flag Usage By
Exception- or Interrupt-Handler Procedure". */
static uint64_t idt[INTR_CNT];
/* Interrupt handler functions for each interrupt. */
static intr_handler_func *intr_handlers[INTR_CNT];
/* Names for each interrupt, for debugging purposes. */
static const char *intr_names[INTR_CNT];
/* Number of unexpected interrupts for each vector. An
unexpected interrupt is one that has no registered handler. */
static unsigned int unexpected_cnt[INTR_CNT];
/* External interrupts are those generated by devices outside the
CPU, such as the timer. External interrupts run with
interrupts turned off, so they never nest, nor are they ever
pre-empted. Handlers for external interrupts also may not
sleep, although they may invoke intr_yield_on_return() to
request that a new process be scheduled just before the
interrupt returns. */
static bool in_external_intr; /* Are we processing an external interrupt? */
static bool yield_on_return; /* Should we yield on interrupt return? */
/* Programmable Interrupt Controller helpers. */
static void pic_init (void);
static void pic_end_of_interrupt (int irq);
/* Interrupt Descriptor Table helpers. */
static uint64_t make_intr_gate (void (*) (void), int dpl);
static uint64_t make_trap_gate (void (*) (void), int dpl);
static inline uint64_t make_idtr_operand (uint16_t limit, void *base);
/* Interrupt handlers. */
void intr_handler (struct intr_frame *args);
static void unexpected_interrupt (const struct intr_frame *);
/* Returns the current interrupt status. */
enum intr_level
intr_get_level (void)
{
uint32_t flags;
/* Push the flags register on the processor stack, then pop the
value off the stack into `flags'. See [IA32-v2b] "PUSHF"
and "POP" and [IA32-v3a] 5.8.1 "Masking Maskable Hardware
Interrupts". */
asm volatile ("pushfl; popl %0" : "=g" (flags));
return flags & FLAG_IF ? INTR_ON : INTR_OFF;
}
/* Enables or disables interrupts as specified by LEVEL and
returns the previous interrupt status. */
enum intr_level
intr_set_level (enum intr_level level)
{
return level == INTR_ON ? intr_enable () : intr_disable ();
}
/* Enables interrupts and returns the previous interrupt status. */
enum intr_level
intr_enable (void)
{
enum intr_level old_level = intr_get_level ();
ASSERT (!intr_context ());
/* Enable interrupts by setting the interrupt flag.
See [IA32-v2b] "STI" and [IA32-v3a] 5.8.1 "Masking Maskable
Hardware Interrupts". */
asm volatile ("sti");
return old_level;
}
/* Disables interrupts and returns the previous interrupt status. */
enum intr_level
intr_disable (void)
{
enum intr_level old_level = intr_get_level ();
/* Disable interrupts by clearing the interrupt flag.
See [IA32-v2b] "CLI" and [IA32-v3a] 5.8.1 "Masking Maskable
Hardware Interrupts". */
asm volatile ("cli" : : : "memory");
return old_level;
}
/* Initializes the interrupt system. */
void
intr_init (void)
{
uint64_t idtr_operand;
int i;
/* Initialize interrupt controller. */
pic_init ();
/* Initialize IDT. */
for (i = 0; i < INTR_CNT; i++)
idt[i] = make_intr_gate (intr_stubs[i], 0);
/* Load IDT register.
See [IA32-v2a] "LIDT" and [IA32-v3a] 5.10 "Interrupt
Descriptor Table (IDT)". */
idtr_operand = make_idtr_operand (sizeof idt - 1, idt);
asm volatile ("lidt %0" : : "m" (idtr_operand));
/* Initialize intr_names. */
for (i = 0; i < INTR_CNT; i++)
intr_names[i] = "unknown";
intr_names[0] = "#DE Divide Error";
intr_names[1] = "#DB Debug Exception";
intr_names[2] = "NMI Interrupt";
intr_names[3] = "#BP Breakpoint Exception";
intr_names[4] = "#OF Overflow Exception";
intr_names[5] = "#BR BOUND Range Exceeded Exception";
intr_names[6] = "#UD Invalid Opcode Exception";
intr_names[7] = "#NM Device Not Available Exception";
intr_names[8] = "#DF Double Fault Exception";
intr_names[9] = "Coprocessor Segment Overrun";
intr_names[10] = "#TS Invalid TSS Exception";
intr_names[11] = "#NP Segment Not Present";
intr_names[12] = "#SS Stack Fault Exception";
intr_names[13] = "#GP General Protection Exception";
intr_names[14] = "#PF Page-Fault Exception";
intr_names[16] = "#MF x87 FPU Floating-Point Error";
intr_names[17] = "#AC Alignment Check Exception";
intr_names[18] = "#MC Machine-Check Exception";
intr_names[19] = "#XF SIMD Floating-Point Exception";
}
/* Registers interrupt VEC_NO to invoke HANDLER with descriptor
privilege level DPL. Names the interrupt NAME for debugging
purposes. The interrupt handler will be invoked with
interrupt status set to LEVEL. */
static void
register_handler (uint8_t vec_no, int dpl, enum intr_level level,
intr_handler_func *handler, const char *name)
{
ASSERT (intr_handlers[vec_no] == NULL);
if (level == INTR_ON)
idt[vec_no] = make_trap_gate (intr_stubs[vec_no], dpl);
else
idt[vec_no] = make_intr_gate (intr_stubs[vec_no], dpl);
intr_handlers[vec_no] = handler;
intr_names[vec_no] = name;
}
/* Registers external interrupt VEC_NO to invoke HANDLER, which
is named NAME for debugging purposes. The handler will
execute with interrupts disabled. */
void
intr_register_ext (uint8_t vec_no, intr_handler_func *handler,
const char *name)
{
ASSERT (vec_no >= 0x20 && vec_no <= 0x2f);
register_handler (vec_no, 0, INTR_OFF, handler, name);
}
/* Registers internal interrupt VEC_NO to invoke HANDLER, which
is named NAME for debugging purposes. The interrupt handler
will be invoked with interrupt status LEVEL.
The handler will have descriptor privilege level DPL, meaning
that it can be invoked intentionally when the processor is in
the DPL or lower-numbered ring. In practice, DPL==3 allows
user mode to invoke the interrupts and DPL==0 prevents such
invocation. Faults and exceptions that occur in user mode
still cause interrupts with DPL==0 to be invoked. See
[IA32-v3a] sections 4.5 "Privilege Levels" and 4.8.1.1
"Accessing Nonconforming Code Segments" for further
discussion. */
void
intr_register_int (uint8_t vec_no, int dpl, enum intr_level level,
intr_handler_func *handler, const char *name)
{
ASSERT (vec_no < 0x20 || vec_no > 0x2f);
register_handler (vec_no, dpl, level, handler, name);
}
/* Returns true during processing of an external interrupt
and false at all other times. */
bool
intr_context (void)
{
return in_external_intr;
}
/* During processing of an external interrupt, directs the
interrupt handler to yield to a new process just before
returning from the interrupt. May not be called at any other
time. */
void
intr_yield_on_return (void)
{
ASSERT (intr_context ());
yield_on_return = true;
}
/* 8259A Programmable Interrupt Controller. */
/* Initializes the PICs. Refer to [8259A] for details.
By default, interrupts 0...15 delivered by the PICs will go to
interrupt vectors 0...15. Those vectors are also used for CPU
traps and exceptions, so we reprogram the PICs so that
interrupts 0...15 are delivered to interrupt vectors 32...47
(0x20...0x2f) instead. */
static void
pic_init (void)
{
/* Mask all interrupts on both PICs. */
outb (PIC0_DATA, 0xff);
outb (PIC1_DATA, 0xff);
/* Initialize master. */
outb (PIC0_CTRL, 0x11); /* ICW1: single mode, edge triggered, expect ICW4. */
outb (PIC0_DATA, 0x20); /* ICW2: line IR0...7 -> irq 0x20...0x27. */
outb (PIC0_DATA, 0x04); /* ICW3: slave PIC on line IR2. */
outb (PIC0_DATA, 0x01); /* ICW4: 8086 mode, normal EOI, non-buffered. */
/* Initialize slave. */
outb (PIC1_CTRL, 0x11); /* ICW1: single mode, edge triggered, expect ICW4. */
outb (PIC1_DATA, 0x28); /* ICW2: line IR0...7 -> irq 0x28...0x2f. */
outb (PIC1_DATA, 0x02); /* ICW3: slave ID is 2. */
outb (PIC1_DATA, 0x01); /* ICW4: 8086 mode, normal EOI, non-buffered. */
/* Unmask all interrupts. */
outb (PIC0_DATA, 0x00);
outb (PIC1_DATA, 0x00);
}
/* Sends an end-of-interrupt signal to the PIC for the given IRQ.
If we don't acknowledge the IRQ, it will never be delivered to
us again, so this is important. */
static void
pic_end_of_interrupt (int irq)
{
ASSERT (irq >= 0x20 && irq < 0x30);
/* Acknowledge master PIC. */
outb (0x20, 0x20);
/* Acknowledge slave PIC if this is a slave interrupt. */
if (irq >= 0x28)
outb (0xa0, 0x20);
}
/* Creates a gate that invokes FUNCTION.
The gate has descriptor privilege level DPL, meaning that it
can be invoked intentionally when the processor is in the DPL
or lower-numbered ring. In practice, DPL==3 allows user mode
to call into the gate and DPL==0 prevents such calls. Faults
and exceptions that occur in user mode still cause gates with
DPL==0 to be invoked. See [IA32-v3a] sections 4.5 "Privilege
Levels" and 4.8.1.1 "Accessing Nonconforming Code Segments"
for further discussion.
TYPE must be either 14 (for an interrupt gate) or 15 (for a
trap gate). The difference is that entering an interrupt gate
disables interrupts, but entering a trap gate does not. See
[IA32-v3a] section 5.12.1.2 "Flag Usage By Exception- or
Interrupt-Handler Procedure" for discussion. */
static uint64_t
make_gate (void (*function) (void), int dpl, int type)
{
uint32_t e0, e1;
ASSERT (function != NULL);
ASSERT (dpl >= 0 && dpl <= 3);
ASSERT (type >= 0 && type <= 15);
e0 = (((uint32_t) function & 0xffff) /* Offset 15:0. */
| (SEL_KCSEG << 16)); /* Target code segment. */
e1 = (((uint32_t) function & 0xffff0000) /* Offset 31:16. */
| (1 << 15) /* Present. */
| ((uint32_t) dpl << 13) /* Descriptor privilege level. */
| (0 << 12) /* System. */
| ((uint32_t) type << 8)); /* Gate type. */
return e0 | ((uint64_t) e1 << 32);
}
/* Creates an interrupt gate that invokes FUNCTION with the given
DPL. */
static uint64_t
make_intr_gate (void (*function) (void), int dpl)
{
return make_gate (function, dpl, 14);
}
/* Creates a trap gate that invokes FUNCTION with the given
DPL. */
static uint64_t
make_trap_gate (void (*function) (void), int dpl)
{
return make_gate (function, dpl, 15);
}
/* Returns a descriptor that yields the given LIMIT and BASE when
used as an operand for the LIDT instruction. */
static inline uint64_t
make_idtr_operand (uint16_t limit, void *base)
{
return limit | ((uint64_t) (uint32_t) base << 16);
}
/* Interrupt handlers. */
/* Handler for all interrupts, faults, and exceptions. This
function is called by the assembly language interrupt stubs in
intr-stubs.S. FRAME describes the interrupt and the
interrupted thread's registers. */
void
intr_handler (struct intr_frame *frame)
{
bool external;
intr_handler_func *handler;
/* External interrupts are special.
We only handle one at a time (so interrupts must be off)
and they need to be acknowledged on the PIC (see below).
An external interrupt handler cannot sleep. */
external = frame->vec_no >= 0x20 && frame->vec_no < 0x30;
if (external)
{
ASSERT (intr_get_level () == INTR_OFF);
ASSERT (!intr_context ());
in_external_intr = true;
yield_on_return = false;
}
/* Invoke the interrupt's handler. */
handler = intr_handlers[frame->vec_no];
if (handler != NULL)
handler (frame);
else if (frame->vec_no == 0x27 || frame->vec_no == 0x2f)
{
/* There is no handler, but this interrupt can trigger
spuriously due to a hardware fault or hardware race
condition. Ignore it. */
}
else
unexpected_interrupt (frame);
/* Complete the processing of an external interrupt. */
if (external)
{
ASSERT (intr_get_level () == INTR_OFF);
ASSERT (intr_context ());
in_external_intr = false;
pic_end_of_interrupt (frame->vec_no);
if (yield_on_return)
thread_yield ();
}
}
/* Handles an unexpected interrupt with interrupt frame F. An
unexpected interrupt is one that has no registered handler. */
static void
unexpected_interrupt (const struct intr_frame *f)
{
/* Count the number so far. */
unsigned int n = ++unexpected_cnt[f->vec_no];
/* If the number is a power of 2, print a message. This rate
limiting means that we get information about an uncommon
unexpected interrupt the first time and fairly often after
that, but one that occurs many times will not overwhelm the
console. */
if ((n & (n - 1)) == 0)
printf ("Unexpected interrupt %#04x (%s)\n",
f->vec_no, intr_names[f->vec_no]);
}
/* Dumps interrupt frame F to the console, for debugging. */
void
intr_dump_frame (const struct intr_frame *f)
{
uint32_t cr2;
/* Store current value of CR2 into `cr2'.
CR2 is the linear address of the last page fault.
See [IA32-v2a] "MOV--Move to/from Control Registers" and
[IA32-v3a] 5.14 "Interrupt 14--Page Fault Exception
(#PF)". */
asm ("movl %%cr2, %0" : "=r" (cr2));
printf ("Interrupt %#04x (%s) at eip=%p\n",
f->vec_no, intr_names[f->vec_no], f->eip);
printf (" cr2=%08"PRIx32" error=%08"PRIx32"\n", cr2, f->error_code);
printf (" eax=%08"PRIx32" ebx=%08"PRIx32" ecx=%08"PRIx32" edx=%08"PRIx32"\n",
f->eax, f->ebx, f->ecx, f->edx);
printf (" esi=%08"PRIx32" edi=%08"PRIx32" esp=%08"PRIx32" ebp=%08"PRIx32"\n",
f->esi, f->edi, (uint32_t) f->esp, f->ebp);
printf (" cs=%04"PRIx16" ds=%04"PRIx16" es=%04"PRIx16" ss=%04"PRIx16"\n",
f->cs, f->ds, f->es, f->ss);
}
/* Returns the name of interrupt VEC. */
const char *
intr_name (uint8_t vec)
{
return intr_names[vec];
}

70
src/threads/interrupt.h Normal file
View File

@@ -0,0 +1,70 @@
#ifndef THREADS_INTERRUPT_H
#define THREADS_INTERRUPT_H
#include <stdbool.h>
#include <stdint.h>
/* Interrupts on or off? */
enum intr_level
{
INTR_OFF, /* Interrupts disabled. */
INTR_ON /* Interrupts enabled. */
};
enum intr_level intr_get_level (void);
enum intr_level intr_set_level (enum intr_level);
enum intr_level intr_enable (void);
enum intr_level intr_disable (void);
/* Interrupt stack frame. */
struct intr_frame
{
/* Pushed by intr_entry in intr-stubs.S.
These are the interrupted task's saved registers. */
uint32_t edi; /* Saved EDI. */
uint32_t esi; /* Saved ESI. */
uint32_t ebp; /* Saved EBP. */
uint32_t esp_dummy; /* Not used. */
uint32_t ebx; /* Saved EBX. */
uint32_t edx; /* Saved EDX. */
uint32_t ecx; /* Saved ECX. */
uint32_t eax; /* Saved EAX. */
uint16_t gs, :16; /* Saved GS segment register. */
uint16_t fs, :16; /* Saved FS segment register. */
uint16_t es, :16; /* Saved ES segment register. */
uint16_t ds, :16; /* Saved DS segment register. */
/* Pushed by intrNN_stub in intr-stubs.S. */
uint32_t vec_no; /* Interrupt vector number. */
/* Sometimes pushed by the CPU,
otherwise for consistency pushed as 0 by intrNN_stub.
The CPU puts it just under `eip', but we move it here. */
uint32_t error_code; /* Error code. */
/* Pushed by intrNN_stub in intr-stubs.S.
This frame pointer eases interpretation of backtraces. */
void *frame_pointer; /* Saved EBP (frame pointer). */
/* Pushed by the CPU.
These are the interrupted task's saved registers. */
void (*eip) (void); /* Next instruction to execute. */
uint16_t cs, :16; /* Code segment for eip. */
uint32_t eflags; /* Saved CPU flags. */
void *esp; /* Saved stack pointer. */
uint16_t ss, :16; /* Data segment for esp. */
};
typedef void intr_handler_func (struct intr_frame *);
void intr_init (void);
void intr_register_ext (uint8_t vec, intr_handler_func *, const char *name);
void intr_register_int (uint8_t vec, int dpl, enum intr_level,
intr_handler_func *, const char *name);
bool intr_context (void);
void intr_yield_on_return (void);
void intr_dump_frame (const struct intr_frame *);
const char *intr_name (uint8_t vec);
#endif /* threads/interrupt.h */

203
src/threads/intr-stubs.S Normal file
View File

@@ -0,0 +1,203 @@
#include "threads/loader.h"
.text
/* Main interrupt entry point.
An internal or external interrupt starts in one of the
intrNN_stub routines, which push the `struct intr_frame'
frame_pointer, error_code, and vec_no members on the stack,
then jump here.
We save the rest of the `struct intr_frame' members to the
stack, set up some registers as needed by the kernel, and then
call intr_handler(), which actually handles the interrupt.
We "fall through" to intr_exit to return from the interrupt.
*/
.func intr_entry
intr_entry:
/* Save caller's registers. */
pushl %ds
pushl %es
pushl %fs
pushl %gs
pushal
/* Set up kernel environment. */
cld /* String instructions go upward. */
mov $SEL_KDSEG, %eax /* Initialize segment registers. */
mov %eax, %ds
mov %eax, %es
leal 56(%esp), %ebp /* Set up frame pointer. */
/* Call interrupt handler. */
pushl %esp
.globl intr_handler
call intr_handler
addl $4, %esp
.endfunc
/* Interrupt exit.
Restores the caller's registers, discards extra data on the
stack, and returns to the caller.
This is a separate function because it is called directly when
we launch a new user process (see start_process() in
userprog/process.c). */
.globl intr_exit
.func intr_exit
intr_exit:
/* Restore caller's registers. */
popal
popl %gs
popl %fs
popl %es
popl %ds
/* Discard `struct intr_frame' vec_no, error_code,
frame_pointer members. */
addl $12, %esp
/* Return to caller. */
iret
.endfunc
/* Interrupt stubs.
This defines 256 fragments of code, named `intr00_stub'
through `intrff_stub', each of which is used as the entry
point for the corresponding interrupt vector. It also puts
the address of each of these functions in the correct spot in
`intr_stubs', an array of function pointers.
Most of the stubs do this:
1. Push %ebp on the stack (frame_pointer in `struct intr_frame').
2. Push 0 on the stack (error_code).
3. Push the interrupt number on the stack (vec_no).
The CPU pushes an extra "error code" on the stack for a few
interrupts. Because we want %ebp to be where the error code
is, we follow a different path:
1. Push a duplicate copy of the error code on the stack.
2. Replace the original copy of the error code by %ebp.
3. Push the interrupt number on the stack. */
.data
.globl intr_stubs
intr_stubs:
/* This implements steps 1 and 2, described above, in the common
case where we just push a 0 error code. */
#define zero \
pushl %ebp; \
pushl $0
/* This implements steps 1 and 2, described above, in the case
where the CPU already pushed an error code. */
#define REAL \
pushl (%esp); \
movl %ebp, 4(%esp)
/* Emits a stub for interrupt vector NUMBER.
TYPE is `zero', for the case where we push a 0 error code,
or `REAL', if the CPU pushes an error code for us. */
#define STUB(NUMBER, TYPE) \
.text; \
.func intr##NUMBER##_stub; \
intr##NUMBER##_stub: \
TYPE; \
push $0x##NUMBER; \
jmp intr_entry; \
.endfunc; \
\
.data; \
.long intr##NUMBER##_stub;
/* All the stubs. */
STUB(00, zero) STUB(01, zero) STUB(02, zero) STUB(03, zero)
STUB(04, zero) STUB(05, zero) STUB(06, zero) STUB(07, zero)
STUB(08, REAL) STUB(09, zero) STUB(0a, REAL) STUB(0b, REAL)
STUB(0c, zero) STUB(0d, REAL) STUB(0e, REAL) STUB(0f, zero)
STUB(10, zero) STUB(11, REAL) STUB(12, zero) STUB(13, zero)
STUB(14, zero) STUB(15, zero) STUB(16, zero) STUB(17, zero)
STUB(18, REAL) STUB(19, zero) STUB(1a, REAL) STUB(1b, REAL)
STUB(1c, zero) STUB(1d, REAL) STUB(1e, REAL) STUB(1f, zero)
STUB(20, zero) STUB(21, zero) STUB(22, zero) STUB(23, zero)
STUB(24, zero) STUB(25, zero) STUB(26, zero) STUB(27, zero)
STUB(28, zero) STUB(29, zero) STUB(2a, zero) STUB(2b, zero)
STUB(2c, zero) STUB(2d, zero) STUB(2e, zero) STUB(2f, zero)
STUB(30, zero) STUB(31, zero) STUB(32, zero) STUB(33, zero)
STUB(34, zero) STUB(35, zero) STUB(36, zero) STUB(37, zero)
STUB(38, zero) STUB(39, zero) STUB(3a, zero) STUB(3b, zero)
STUB(3c, zero) STUB(3d, zero) STUB(3e, zero) STUB(3f, zero)
STUB(40, zero) STUB(41, zero) STUB(42, zero) STUB(43, zero)
STUB(44, zero) STUB(45, zero) STUB(46, zero) STUB(47, zero)
STUB(48, zero) STUB(49, zero) STUB(4a, zero) STUB(4b, zero)
STUB(4c, zero) STUB(4d, zero) STUB(4e, zero) STUB(4f, zero)
STUB(50, zero) STUB(51, zero) STUB(52, zero) STUB(53, zero)
STUB(54, zero) STUB(55, zero) STUB(56, zero) STUB(57, zero)
STUB(58, zero) STUB(59, zero) STUB(5a, zero) STUB(5b, zero)
STUB(5c, zero) STUB(5d, zero) STUB(5e, zero) STUB(5f, zero)
STUB(60, zero) STUB(61, zero) STUB(62, zero) STUB(63, zero)
STUB(64, zero) STUB(65, zero) STUB(66, zero) STUB(67, zero)
STUB(68, zero) STUB(69, zero) STUB(6a, zero) STUB(6b, zero)
STUB(6c, zero) STUB(6d, zero) STUB(6e, zero) STUB(6f, zero)
STUB(70, zero) STUB(71, zero) STUB(72, zero) STUB(73, zero)
STUB(74, zero) STUB(75, zero) STUB(76, zero) STUB(77, zero)
STUB(78, zero) STUB(79, zero) STUB(7a, zero) STUB(7b, zero)
STUB(7c, zero) STUB(7d, zero) STUB(7e, zero) STUB(7f, zero)
STUB(80, zero) STUB(81, zero) STUB(82, zero) STUB(83, zero)
STUB(84, zero) STUB(85, zero) STUB(86, zero) STUB(87, zero)
STUB(88, zero) STUB(89, zero) STUB(8a, zero) STUB(8b, zero)
STUB(8c, zero) STUB(8d, zero) STUB(8e, zero) STUB(8f, zero)
STUB(90, zero) STUB(91, zero) STUB(92, zero) STUB(93, zero)
STUB(94, zero) STUB(95, zero) STUB(96, zero) STUB(97, zero)
STUB(98, zero) STUB(99, zero) STUB(9a, zero) STUB(9b, zero)
STUB(9c, zero) STUB(9d, zero) STUB(9e, zero) STUB(9f, zero)
STUB(a0, zero) STUB(a1, zero) STUB(a2, zero) STUB(a3, zero)
STUB(a4, zero) STUB(a5, zero) STUB(a6, zero) STUB(a7, zero)
STUB(a8, zero) STUB(a9, zero) STUB(aa, zero) STUB(ab, zero)
STUB(ac, zero) STUB(ad, zero) STUB(ae, zero) STUB(af, zero)
STUB(b0, zero) STUB(b1, zero) STUB(b2, zero) STUB(b3, zero)
STUB(b4, zero) STUB(b5, zero) STUB(b6, zero) STUB(b7, zero)
STUB(b8, zero) STUB(b9, zero) STUB(ba, zero) STUB(bb, zero)
STUB(bc, zero) STUB(bd, zero) STUB(be, zero) STUB(bf, zero)
STUB(c0, zero) STUB(c1, zero) STUB(c2, zero) STUB(c3, zero)
STUB(c4, zero) STUB(c5, zero) STUB(c6, zero) STUB(c7, zero)
STUB(c8, zero) STUB(c9, zero) STUB(ca, zero) STUB(cb, zero)
STUB(cc, zero) STUB(cd, zero) STUB(ce, zero) STUB(cf, zero)
STUB(d0, zero) STUB(d1, zero) STUB(d2, zero) STUB(d3, zero)
STUB(d4, zero) STUB(d5, zero) STUB(d6, zero) STUB(d7, zero)
STUB(d8, zero) STUB(d9, zero) STUB(da, zero) STUB(db, zero)
STUB(dc, zero) STUB(dd, zero) STUB(de, zero) STUB(df, zero)
STUB(e0, zero) STUB(e1, zero) STUB(e2, zero) STUB(e3, zero)
STUB(e4, zero) STUB(e5, zero) STUB(e6, zero) STUB(e7, zero)
STUB(e8, zero) STUB(e9, zero) STUB(ea, zero) STUB(eb, zero)
STUB(ec, zero) STUB(ed, zero) STUB(ee, zero) STUB(ef, zero)
STUB(f0, zero) STUB(f1, zero) STUB(f2, zero) STUB(f3, zero)
STUB(f4, zero) STUB(f5, zero) STUB(f6, zero) STUB(f7, zero)
STUB(f8, zero) STUB(f9, zero) STUB(fa, zero) STUB(fb, zero)
STUB(fc, zero) STUB(fd, zero) STUB(fe, zero) STUB(ff, zero)

19
src/threads/intr-stubs.h Normal file
View File

@@ -0,0 +1,19 @@
#ifndef THREADS_INTR_STUBS_H
#define THREADS_INTR_STUBS_H
/* Interrupt stubs.
These are little snippets of code in intr-stubs.S, one for
each of the 256 possible x86 interrupts. Each one does a
little bit of stack manipulation, then jumps to intr_entry().
See intr-stubs.S for more information.
This array points to each of the interrupt stub entry points
so that intr_init() can easily find them. */
typedef void intr_stub_func (void);
extern intr_stub_func *intr_stubs[256];
/* Interrupt return path. */
void intr_exit (void);
#endif /* threads/intr-stubs.h */

115
src/threads/io.h Normal file
View File

@@ -0,0 +1,115 @@
#ifndef THREADS_IO_H
#define THREADS_IO_H
#include <stddef.h>
#include <stdint.h>
/* Reads and returns a byte from PORT. */
static inline uint8_t
inb (uint16_t port)
{
/* See [IA32-v2a] "IN". */
uint8_t data;
asm volatile ("inb %w1, %b0" : "=a" (data) : "Nd" (port));
return data;
}
/* Reads CNT bytes from PORT, one after another, and stores them
into the buffer starting at ADDR. */
static inline void
insb (uint16_t port, void *addr, size_t cnt)
{
/* See [IA32-v2a] "INS". */
asm volatile ("rep insb" : "+D" (addr), "+c" (cnt) : "d" (port) : "memory");
}
/* Reads and returns 16 bits from PORT. */
static inline uint16_t
inw (uint16_t port)
{
uint16_t data;
/* See [IA32-v2a] "IN". */
asm volatile ("inw %w1, %w0" : "=a" (data) : "Nd" (port));
return data;
}
/* Reads CNT 16-bit (halfword) units from PORT, one after
another, and stores them into the buffer starting at ADDR. */
static inline void
insw (uint16_t port, void *addr, size_t cnt)
{
/* See [IA32-v2a] "INS". */
asm volatile ("rep insw" : "+D" (addr), "+c" (cnt) : "d" (port) : "memory");
}
/* Reads and returns 32 bits from PORT. */
static inline uint32_t
inl (uint16_t port)
{
/* See [IA32-v2a] "IN". */
uint32_t data;
asm volatile ("inl %w1, %0" : "=a" (data) : "Nd" (port));
return data;
}
/* Reads CNT 32-bit (word) units from PORT, one after another,
and stores them into the buffer starting at ADDR. */
static inline void
insl (uint16_t port, void *addr, size_t cnt)
{
/* See [IA32-v2a] "INS". */
asm volatile ("rep insl" : "+D" (addr), "+c" (cnt) : "d" (port) : "memory");
}
/* Writes byte DATA to PORT. */
static inline void
outb (uint16_t port, uint8_t data)
{
/* See [IA32-v2b] "OUT". */
asm volatile ("outb %b0, %w1" : : "a" (data), "Nd" (port));
}
/* Writes to PORT each byte of data in the CNT-byte buffer
starting at ADDR. */
static inline void
outsb (uint16_t port, const void *addr, size_t cnt)
{
/* See [IA32-v2b] "OUTS". */
asm volatile ("rep outsb" : "+S" (addr), "+c" (cnt) : "d" (port));
}
/* Writes the 16-bit DATA to PORT. */
static inline void
outw (uint16_t port, uint16_t data)
{
/* See [IA32-v2b] "OUT". */
asm volatile ("outw %w0, %w1" : : "a" (data), "Nd" (port));
}
/* Writes to PORT each 16-bit unit (halfword) of data in the
CNT-halfword buffer starting at ADDR. */
static inline void
outsw (uint16_t port, const void *addr, size_t cnt)
{
/* See [IA32-v2b] "OUTS". */
asm volatile ("rep outsw" : "+S" (addr), "+c" (cnt) : "d" (port));
}
/* Writes the 32-bit DATA to PORT. */
static inline void
outl (uint16_t port, uint32_t data)
{
/* See [IA32-v2b] "OUT". */
asm volatile ("outl %0, %w1" : : "a" (data), "Nd" (port));
}
/* Writes to PORT each 32-bit unit (word) of data in the CNT-word
buffer starting at ADDR. */
static inline void
outsl (uint16_t port, const void *addr, size_t cnt)
{
/* See [IA32-v2b] "OUTS". */
asm volatile ("rep outsl" : "+S" (addr), "+c" (cnt) : "d" (port));
}
#endif /* threads/io.h */

30
src/threads/kernel.lds.S Normal file
View File

@@ -0,0 +1,30 @@
#include "threads/loader.h"
OUTPUT_FORMAT("elf32-i386")
OUTPUT_ARCH("i386")
ENTRY(start) /* Kernel starts at "start" symbol. */
SECTIONS
{
/* Specify the kernel base address. */
_start = LOADER_PHYS_BASE + LOADER_KERN_BASE;
/* Make room for the ELF headers. */
. = _start + SIZEOF_HEADERS;
/* Kernel starts with code, followed by read-only data and writable data. */
.text : { *(.start) *(.text) } = 0x90
.rodata : { *(.rodata) *(.rodata.*)
. = ALIGN(0x1000);
_end_kernel_text = .; }
.data : { *(.data) *(.data.*)
_signature = .; LONG(0xaa55aa55) }
/* BSS (zero-initialized data) is after everything else. */
_start_bss = .;
.bss : { *(.bss) }
_end_bss = .;
_end = .;
ASSERT (_end - _start <= 512K, "Kernel image is too big.")
}

263
src/threads/loader.S Normal file
View File

@@ -0,0 +1,263 @@
#include "threads/loader.h"
#### Kernel loader.
#### This code should be stored in the first sector of a hard disk.
#### When the BIOS runs, it loads this code at physical address
#### 0x7c00-0x7e00 (512 bytes) and jumps to the beginning of it,
#### in real mode. The loader loads the kernel into memory and jumps
#### to its entry point, which is the start function in start.S.
####
#### The BIOS passes in the drive that the loader was read from as
#### DL, with floppy drives numbered 0x00, 0x01, ... and hard drives
#### numbered 0x80, 0x81, ... We want to support booting a kernel on
#### a different drive from the loader, so we don't take advantage of
#### this.
# Runs in real mode, which is a 16-bit segment.
.code16
# Set up segment registers.
# Set stack to grow downward from 60 kB (after boot, the kernel
# continues to use this stack for its initial thread).
sub %ax, %ax
mov %ax, %ds
mov %ax, %ss
mov $0xf000, %esp
# Configure serial port so we can report progress without connected VGA.
# See [IntrList] for details.
sub %dx, %dx # Serial port 0.
mov $0xe3, %al # 9600 bps, N-8-1.
# AH is already 0 (Initialize Port).
int $0x14 # Destroys AX.
call puts
.string "PiLo"
#### Read the partition table on each system hard disk and scan for a
#### partition of type 0x20, which is the type that we use for a
#### PintOS kernel.
####
#### Read [Partitions] for a description of the partition table format
#### that we parse.
####
#### We print out status messages to show the disk and partition being
#### scanned, e.g. hda1234 as we scan four partitions on the first
#### hard disk.
mov $0x80, %dl # Hard disk 0.
read_mbr:
sub %ebx, %ebx # Sector 0.
mov $0x2000, %ax # Use 0x20000 for buffer.
mov %ax, %es
call read_sector
jc no_such_drive
# Print hd[a-z].
call puts
.string " hd"
mov %dl, %al
add $'a' - 0x80, %al
call putc
# Check for MBR signature--if not present, it's not a
# partitioned hard disk.
cmpw $0xaa55, %es:510
jne next_drive
mov $446, %si # Offset of partition table entry 1.
mov $'1', %al
check_partition:
# Is it an unused partition?
cmpl $0, %es:(%si)
je next_partition
# Print [1-4].
call putc
# Is it a PintOS kernel partition?
cmpb $0x20, %es:4(%si)
jne next_partition
# Is it a bootable partition?
cmpb $0x80, %es:(%si)
je load_kernel
next_partition:
# No match for this partition, go on to the next one.
add $16, %si # Offset to next partition table entry.
inc %al
cmp $510, %si
jb check_partition
next_drive:
# No match on this drive, go on to the next one.
inc %dl
jnc read_mbr
no_such_drive:
no_boot_partition:
# Didn't find a PintOS kernel partition anywhere, give up.
call puts
.string "\rNot found\r"
# Notify BIOS that boot failed. See [IntrList].
int $0x18
#### We found a kernel. The kernel's drive is in DL. The partition
#### table entry for the kernel's partition is at ES:SI. Our job now
#### is to read the kernel from disk and jump to its start address.
load_kernel:
call puts
.string "\rLoading"
# Figure out number of sectors to read. A PintOS kernel is
# just an ELF format object, which doesn't have an
# easy-to-read field to identify its own size (see [ELF1]).
# But we limit PintOS kernels to 512 kB for other reasons, so
# it's easy enough to just read the entire contents of the
# partition or 512 kB from disk, whichever is smaller.
mov %es:12(%si), %ecx # EBP = number of sectors
cmp $1024, %ecx # Cap size at 512 kB
jbe 1f
mov $1024, %cx
1:
mov %es:8(%si), %ebx # EBX = first sector
mov $0x2000, %ax # Start load address: 0x20000
next_sector:
# Read one sector into memory.
mov %ax, %es # ES:0000 -> load address
call read_sector
jc read_failed
# Print '.' as progress indicator once every 16 sectors == 8 kB.
test $15, %bl
jnz 1f
call puts
.string "."
1:
# Advance memory pointer and disk sector.
add $0x20, %ax
inc %bx
loop next_sector
call puts
.string "\r"
#### Transfer control to the kernel that we loaded. We read the start
#### address out of the ELF header (see [ELF1]) and convert it from a
#### 32-bit linear address into a 16:16 segment:offset address for
#### real mode, then jump to the converted address. The 80x86 doesn't
#### have an instruction to jump to an absolute segment:offset kept in
#### registers, so in fact we store the address in a temporary memory
#### location, then jump indirectly through that location. To save 4
#### bytes in the loader, we reuse 4 bytes of the loader's code for
#### this temporary pointer.
mov $0x2000, %ax
mov %ax, %es
mov %es:0x18, %dx
mov %dx, start
movw $0x2000, start + 2
ljmp *start
read_failed:
start:
# Disk sector read failed.
call puts
1: .string "\rBad read\r"
# Notify BIOS that boot failed. See [IntrList].
int $0x18
#### Print string subroutine. To save space in the loader, this
#### subroutine takes its null-terminated string argument from the
#### code stream just after the call, and then returns to the byte
#### just after the terminating null. This subroutine preserves all
#### general-purpose registers.
puts: xchg %si, %ss:(%esp)
push %ax
next_char:
mov %cs:(%si), %al
inc %si
test %al, %al
jz 1f
call putc
jmp next_char
1: pop %ax
xchg %si, %ss:(%esp)
ret
#### Character output subroutine. Prints the character in AL to the
#### VGA display and serial port 0, using BIOS services (see
#### [IntrList]). Preserves all general-purpose registers.
####
#### If called upon to output a carriage return, this subroutine
#### automatically supplies the following line feed.
putc: pusha
1: sub %bh, %bh # Page 0.
mov $0x0e, %ah # Teletype output service.
int $0x10
mov $0x01, %ah # Serial port output service.
sub %dx, %dx # Serial port 0.
2: int $0x14 # Destroys AH.
test $0x80, %ah # Output timed out?
jz 3f
movw $0x9090, 2b # Turn "int $0x14" above into NOPs.
3:
cmp $'\r', %al
jne popa_ret
mov $'\n', %al
jmp 1b
#### Sector read subroutine. Takes a drive number in DL (0x80 = hard
#### disk 0, 0x81 = hard disk 1, ...) and a sector number in EBX, and
#### reads the specified sector into memory at ES:0000. Returns with
#### carry set on error, clear otherwise. Preserves all
#### general-purpose registers.
read_sector:
pusha
sub %ax, %ax
push %ax # LBA sector number [48:63]
push %ax # LBA sector number [32:47]
push %ebx # LBA sector number [0:31]
push %es # Buffer segment
push %ax # Buffer offset (always 0)
push $1 # Number of sectors to read
push $16 # Packet size
mov $0x42, %ah # Extended read
mov %sp, %si # DS:SI -> packet
int $0x13 # Error code in CF
popa # Pop 16 bytes, preserve flags
popa_ret:
popa
ret # Error code still in CF
#### Command-line arguments and their count.
#### This is written by the `pintos' utility and read by the kernel.
#### The loader itself does not do anything with the command line.
.org LOADER_ARG_CNT - LOADER_BASE
.fill LOADER_ARG_CNT_LEN, 1, 0
.org LOADER_ARGS - LOADER_BASE
.fill LOADER_ARGS_LEN, 1, 0
#### Partition table.
.org LOADER_PARTS - LOADER_BASE
.fill LOADER_PARTS_LEN, 1, 0
#### Boot-sector signature for BIOS inspection.
.org LOADER_SIG - LOADER_BASE
.word 0xaa55

40
src/threads/loader.h Normal file
View File

@@ -0,0 +1,40 @@
#ifndef THREADS_LOADER_H
#define THREADS_LOADER_H
/* Constants fixed by the PC BIOS. */
#define LOADER_BASE 0x7c00 /* Physical address of loader's base. */
#define LOADER_END 0x7e00 /* Physical address of end of loader. */
/* Physical address of kernel base. */
#define LOADER_KERN_BASE 0x20000 /* 128 kB. */
/* Kernel virtual address at which all physical memory is mapped.
Must be aligned on a 4 MB boundary. */
#define LOADER_PHYS_BASE 0xc0000000 /* 3 GB. */
/* Important loader physical addresses. */
#define LOADER_SIG (LOADER_END - LOADER_SIG_LEN) /* 0xaa55 BIOS signature. */
#define LOADER_PARTS (LOADER_SIG - LOADER_PARTS_LEN) /* Partition table. */
#define LOADER_ARGS (LOADER_PARTS - LOADER_ARGS_LEN) /* Command-line args. */
#define LOADER_ARG_CNT (LOADER_ARGS - LOADER_ARG_CNT_LEN) /* Number of args. */
/* Sizes of loader data structures. */
#define LOADER_SIG_LEN 2
#define LOADER_PARTS_LEN 64
#define LOADER_ARGS_LEN 128
#define LOADER_ARG_CNT_LEN 4
/* GDT selectors defined by loader.
More selectors are defined by userprog/gdt.h. */
#define SEL_NULL 0x00 /* Null selector. */
#define SEL_KCSEG 0x08 /* Kernel code selector. */
#define SEL_KDSEG 0x10 /* Kernel data selector. */
#ifndef __ASSEMBLER__
#include <stdint.h>
/* Amount of physical memory, in 4 kB pages. */
extern uint32_t init_ram_pages;
#endif
#endif /* threads/loader.h */

294
src/threads/malloc.c Normal file
View File

@@ -0,0 +1,294 @@
#include "threads/malloc.h"
#include <debug.h>
#include <list.h>
#include <round.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include "threads/palloc.h"
#include "threads/synch.h"
#include "threads/vaddr.h"
/* A simple implementation of malloc().
The size of each request, in bytes, is rounded up to a power
of 2 and assigned to the "descriptor" that manages blocks of
that size. The descriptor keeps a list of free blocks. If
the free list is nonempty, one of its blocks is used to
satisfy the request.
Otherwise, a new page of memory, called an "arena", is
obtained from the page allocator (if none is available,
malloc() returns a null pointer). The new arena is divided
into blocks, all of which are added to the descriptor's free
list. Then we return one of the new blocks.
When we free a block, we add it to its descriptor's free list.
But if the arena that the block was in now has no in-use
blocks, we remove all of the arena's blocks from the free list
and give the arena back to the page allocator.
We can't handle blocks bigger than 2 kB using this scheme,
because they're too big to fit in a single page with a
descriptor. We handle those by allocating contiguous pages
with the page allocator and sticking the allocation size at
the beginning of the allocated block's arena header. */
/* Descriptor. */
struct desc
{
size_t block_size; /* Size of each element in bytes. */
size_t blocks_per_arena; /* Number of blocks in an arena. */
struct list free_list; /* List of free blocks. */
struct lock lock; /* Lock. */
};
/* Magic number for detecting arena corruption. */
#define ARENA_MAGIC 0x9a548eed
/* Arena. */
struct arena
{
unsigned magic; /* Always set to ARENA_MAGIC. */
struct desc *desc; /* Owning descriptor, null for big block. */
size_t free_cnt; /* Free blocks; pages in big block. */
};
/* Free block. */
struct block
{
struct list_elem free_elem; /* Free list element. */
};
/* Our set of descriptors. */
static struct desc descs[10]; /* Descriptors. */
static size_t desc_cnt; /* Number of descriptors. */
static struct arena *block_to_arena (struct block *);
static struct block *arena_to_block (struct arena *, size_t idx);
/* Initializes the malloc() descriptors. */
void
malloc_init (void)
{
size_t block_size;
for (block_size = 16; block_size < PGSIZE / 2; block_size *= 2)
{
struct desc *d = &descs[desc_cnt++];
ASSERT (desc_cnt <= sizeof descs / sizeof *descs);
d->block_size = block_size;
d->blocks_per_arena = (PGSIZE - sizeof (struct arena)) / block_size;
list_init (&d->free_list);
lock_init (&d->lock);
}
}
/* Obtains and returns a new block of at least SIZE bytes.
Returns a null pointer if memory is not available. */
void *
malloc (size_t size)
{
struct desc *d;
struct block *b;
struct arena *a;
/* A null pointer satisfies a request for 0 bytes. */
if (size == 0)
return NULL;
/* Find the smallest descriptor that satisfies a SIZE-byte
request. */
for (d = descs; d < descs + desc_cnt; d++)
if (d->block_size >= size)
break;
if (d == descs + desc_cnt)
{
/* SIZE is too big for any descriptor.
Allocate enough pages to hold SIZE plus an arena. */
size_t page_cnt = DIV_ROUND_UP (size + sizeof *a, PGSIZE);
a = palloc_get_multiple (0, page_cnt);
if (a == NULL)
return NULL;
/* Initialize the arena to indicate a big block of PAGE_CNT
pages, and return it. */
a->magic = ARENA_MAGIC;
a->desc = NULL;
a->free_cnt = page_cnt;
return a + 1;
}
lock_acquire (&d->lock);
/* If the free list is empty, create a new arena. */
if (list_empty (&d->free_list))
{
size_t i;
/* Allocate a page. */
a = palloc_get_page (0);
if (a == NULL)
{
lock_release (&d->lock);
return NULL;
}
/* Initialize arena and add its blocks to the free list. */
a->magic = ARENA_MAGIC;
a->desc = d;
a->free_cnt = d->blocks_per_arena;
for (i = 0; i < d->blocks_per_arena; i++)
{
struct block *b = arena_to_block (a, i);
list_push_back (&d->free_list, &b->free_elem);
}
}
/* Get a block from free list and return it. */
b = list_entry (list_pop_front (&d->free_list), struct block, free_elem);
a = block_to_arena (b);
a->free_cnt--;
lock_release (&d->lock);
return b;
}
/* Allocates and return A times B bytes initialized to zeroes.
Returns a null pointer if memory is not available. */
void *
calloc (size_t a, size_t b)
{
void *p;
size_t size;
/* Calculate block size and make sure it fits in size_t. */
size = a * b;
if (size < a || size < b)
return NULL;
/* Allocate and zero memory. */
p = malloc (size);
if (p != NULL)
memset (p, 0, size);
return p;
}
/* Returns the number of bytes allocated for BLOCK. */
static size_t
block_size (void *block)
{
struct block *b = block;
struct arena *a = block_to_arena (b);
struct desc *d = a->desc;
return d != NULL ? d->block_size : PGSIZE * a->free_cnt - pg_ofs (block);
}
/* Attempts to resize OLD_BLOCK to NEW_SIZE bytes, possibly
moving it in the process.
If successful, returns the new block; on failure, returns a
null pointer.
A call with null OLD_BLOCK is equivalent to malloc(NEW_SIZE).
A call with zero NEW_SIZE is equivalent to free(OLD_BLOCK). */
void *
realloc (void *old_block, size_t new_size)
{
if (new_size == 0)
{
free (old_block);
return NULL;
}
else
{
void *new_block = malloc (new_size);
if (old_block != NULL && new_block != NULL)
{
size_t old_size = block_size (old_block);
size_t min_size = new_size < old_size ? new_size : old_size;
memcpy (new_block, old_block, min_size);
free (old_block);
}
return new_block;
}
}
/* Frees block P, which must have been previously allocated with
malloc(), calloc(), or realloc(). */
void
free (void *p)
{
if (p != NULL)
{
struct block *b = p;
struct arena *a = block_to_arena (b);
struct desc *d = a->desc;
if (d != NULL)
{
/* It's a normal block. We handle it here. */
#ifndef NDEBUG
/* Clear the block to help detect use-after-free bugs. */
memset (b, 0xcc, d->block_size);
#endif
lock_acquire (&d->lock);
/* Add block to free list. */
list_push_front (&d->free_list, &b->free_elem);
/* If the arena is now entirely unused, free it. */
if (++a->free_cnt >= d->blocks_per_arena)
{
size_t i;
ASSERT (a->free_cnt == d->blocks_per_arena);
for (i = 0; i < d->blocks_per_arena; i++)
{
struct block *b = arena_to_block (a, i);
list_remove (&b->free_elem);
}
palloc_free_page (a);
}
lock_release (&d->lock);
}
else
{
/* It's a big block. Free its pages. */
palloc_free_multiple (a, a->free_cnt);
return;
}
}
}
/* Returns the arena that block B is inside. */
static struct arena *
block_to_arena (struct block *b)
{
struct arena *a = pg_round_down (b);
/* Check that the arena is valid. */
ASSERT (a != NULL);
ASSERT (a->magic == ARENA_MAGIC);
/* Check that the block is properly aligned for the arena. */
ASSERT (a->desc == NULL
|| (pg_ofs (b) - sizeof *a) % a->desc->block_size == 0);
ASSERT (a->desc != NULL || pg_ofs (b) == sizeof *a);
return a;
}
/* Returns the (IDX - 1)'th block within arena A. */
static struct block *
arena_to_block (struct arena *a, size_t idx)
{
ASSERT (a != NULL);
ASSERT (a->magic == ARENA_MAGIC);
ASSERT (idx < a->desc->blocks_per_arena);
return (struct block *) ((uint8_t *) a
+ sizeof *a
+ idx * a->desc->block_size);
}

13
src/threads/malloc.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef THREADS_MALLOC_H
#define THREADS_MALLOC_H
#include <debug.h>
#include <stddef.h>
void malloc_init (void);
void *malloc (size_t) __attribute__ ((malloc));
void *calloc (size_t, size_t) __attribute__ ((malloc));
void *realloc (void *, size_t);
void free (void *);
#endif /* threads/malloc.h */

182
src/threads/palloc.c Normal file
View File

@@ -0,0 +1,182 @@
#include "threads/palloc.h"
#include <bitmap.h>
#include <debug.h>
#include <inttypes.h>
#include <round.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include "threads/loader.h"
#include "threads/synch.h"
#include "threads/vaddr.h"
/* Page allocator. Hands out memory in page-size (or
page-multiple) chunks. See malloc.h for an allocator that
hands out smaller chunks.
System memory is divided into two "pools" called the kernel
and user pools. The user pool is for user (virtual) memory
pages, the kernel pool for everything else. The idea here is
that the kernel needs to have memory for its own operations
even if user processes are swapping like mad.
By default, half of system RAM is given to the kernel pool and
half to the user pool. That should be huge overkill for the
kernel pool, but that's just fine for demonstration purposes. */
/* A memory pool. */
struct pool
{
struct lock lock; /* Mutual exclusion. */
struct bitmap *used_map; /* Bitmap of free pages. */
uint8_t *base; /* Base of pool. */
};
/* Two pools: one for kernel data, one for user pages. */
static struct pool kernel_pool, user_pool;
static void init_pool (struct pool *, void *base, size_t page_cnt,
const char *name);
static bool page_from_pool (const struct pool *, void *page);
/* Initializes the page allocator. At most USER_PAGE_LIMIT
pages are put into the user pool. */
void
palloc_init (size_t user_page_limit)
{
/* Free memory starts at 1 MB and runs to the end of RAM. */
uint8_t *free_start = ptov (1024 * 1024);
uint8_t *free_end = ptov (init_ram_pages * PGSIZE);
size_t free_pages = (free_end - free_start) / PGSIZE;
size_t user_pages = free_pages / 2;
size_t kernel_pages;
if (user_pages > user_page_limit)
user_pages = user_page_limit;
kernel_pages = free_pages - user_pages;
/* Give half of memory to kernel, half to user. */
init_pool (&kernel_pool, free_start, kernel_pages, "kernel pool");
init_pool (&user_pool, free_start + kernel_pages * PGSIZE,
user_pages, "user pool");
}
/* Obtains and returns a group of PAGE_CNT contiguous free pages.
If PAL_USER is set, the pages are obtained from the user pool,
otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
then the pages are filled with zeros. If too few pages are
available, returns a null pointer, unless PAL_ASSERT is set in
FLAGS, in which case the kernel panics. */
void *
palloc_get_multiple (enum palloc_flags flags, size_t page_cnt)
{
struct pool *pool = flags & PAL_USER ? &user_pool : &kernel_pool;
void *pages;
size_t page_idx;
if (page_cnt == 0)
return NULL;
lock_acquire (&pool->lock);
page_idx = bitmap_scan_and_flip (pool->used_map, 0, page_cnt, false);
lock_release (&pool->lock);
if (page_idx != BITMAP_ERROR)
pages = pool->base + PGSIZE * page_idx;
else
pages = NULL;
if (pages != NULL)
{
if (flags & PAL_ZERO)
memset (pages, 0, PGSIZE * page_cnt);
}
else
{
if (flags & PAL_ASSERT)
PANIC ("palloc_get: out of pages");
}
return pages;
}
/* Obtains a single free page and returns its kernel virtual
address.
If PAL_USER is set, the page is obtained from the user pool,
otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
then the page is filled with zeros. If no pages are
available, returns a null pointer, unless PAL_ASSERT is set in
FLAGS, in which case the kernel panics. */
void *
palloc_get_page (enum palloc_flags flags)
{
return palloc_get_multiple (flags, 1);
}
/* Frees the PAGE_CNT pages starting at PAGES. */
void
palloc_free_multiple (void *pages, size_t page_cnt)
{
struct pool *pool;
size_t page_idx;
ASSERT (pg_ofs (pages) == 0);
if (pages == NULL || page_cnt == 0)
return;
if (page_from_pool (&kernel_pool, pages))
pool = &kernel_pool;
else if (page_from_pool (&user_pool, pages))
pool = &user_pool;
else
NOT_REACHED ();
page_idx = pg_no (pages) - pg_no (pool->base);
#ifndef NDEBUG
memset (pages, 0xcc, PGSIZE * page_cnt);
#endif
ASSERT (bitmap_all (pool->used_map, page_idx, page_cnt));
bitmap_set_multiple (pool->used_map, page_idx, page_cnt, false);
}
/* Frees the page at PAGE. */
void
palloc_free_page (void *page)
{
palloc_free_multiple (page, 1);
}
/* Initializes pool P as starting at START and ending at END,
naming it NAME for debugging purposes. */
static void
init_pool (struct pool *p, void *base, size_t page_cnt, const char *name)
{
/* We'll put the pool's used_map at its base.
Calculate the space needed for the bitmap
and subtract it from the pool's size. */
size_t bm_pages = DIV_ROUND_UP (bitmap_buf_size (page_cnt), PGSIZE);
if (bm_pages > page_cnt)
PANIC ("Not enough memory in %s for bitmap.", name);
page_cnt -= bm_pages;
printf ("%zu pages available in %s.\n", page_cnt, name);
/* Initialize the pool. */
lock_init (&p->lock);
p->used_map = bitmap_create_in_buf (page_cnt, base, bm_pages * PGSIZE);
p->base = base + bm_pages * PGSIZE;
}
/* Returns true if PAGE was allocated from POOL,
false otherwise. */
static bool
page_from_pool (const struct pool *pool, void *page)
{
size_t page_no = pg_no (page);
size_t start_page = pg_no (pool->base);
size_t end_page = start_page + bitmap_size (pool->used_map);
return page_no >= start_page && page_no < end_page;
}

20
src/threads/palloc.h Normal file
View File

@@ -0,0 +1,20 @@
#ifndef THREADS_PALLOC_H
#define THREADS_PALLOC_H
#include <stddef.h>
/* How to allocate pages. */
enum palloc_flags
{
PAL_ASSERT = 001, /* Panic on failure. */
PAL_ZERO = 002, /* Zero page contents. */
PAL_USER = 004 /* User page. */
};
void palloc_init (size_t user_page_limit);
void *palloc_get_page (enum palloc_flags);
void *palloc_get_multiple (enum palloc_flags, size_t page_cnt);
void palloc_free_page (void *);
void palloc_free_multiple (void *, size_t page_cnt);
#endif /* threads/palloc.h */

107
src/threads/pte.h Normal file
View File

@@ -0,0 +1,107 @@
#ifndef THREADS_PTE_H
#define THREADS_PTE_H
#include "threads/vaddr.h"
/* Functions and macros for working with x86 hardware page
tables.
See vaddr.h for more generic functions and macros for virtual
addresses.
Virtual addresses are structured as follows:
31 22 21 12 11 0
+----------------------+----------------------+----------------------+
| Page Directory Index | Page Table Index | Page Offset |
+----------------------+----------------------+----------------------+
*/
/* Page table index (bits 12:21). */
#define PTSHIFT PGBITS /* First page table bit. */
#define PTBITS 10 /* Number of page table bits. */
#define PTSPAN (1 << PTBITS << PGBITS) /* Bytes covered by a page table. */
#define PTMASK BITMASK(PTSHIFT, PTBITS) /* Page table bits (12:21). */
/* Page directory index (bits 22:31). */
#define PDSHIFT (PTSHIFT + PTBITS) /* First page directory bit. */
#define PDBITS 10 /* Number of page dir bits. */
#define PDMASK BITMASK(PDSHIFT, PDBITS) /* Page directory bits (22:31). */
/* Obtains page table index from a virtual address. */
static inline unsigned pt_no (const void *va) {
return ((uintptr_t) va & PTMASK) >> PTSHIFT;
}
/* Obtains page directory index from a virtual address. */
static inline uintptr_t pd_no (const void *va) {
return (uintptr_t) va >> PDSHIFT;
}
/* Page directory and page table entries.
For more information see the section on page tables in the
PintOS reference guide chapter, or [IA32-v3a] 3.7.6
"Page-Directory and Page-Table Entries".
PDEs and PTEs share a common format:
31 12 11 0
+------------------------------------+------------------------+
| Physical Address | Flags |
+------------------------------------+------------------------+
In a PDE, the physical address points to a page table.
In a PTE, the physical address points to a data or code page.
The important flags are listed below.
When a PDE or PTE is not "present", the other flags are
ignored.
A PDE or PTE that is initialized to 0 will be interpreted as
"not present", which is just fine. */
#define PTE_FLAGS 0x00000fff /* Flag bits. */
#define PTE_ADDR 0xfffff000 /* Address bits. */
#define PTE_AVL 0x00000e00 /* Bits available for OS use. */
#define PTE_P 0x1 /* 1=present, 0=not present. */
#define PTE_W 0x2 /* 1=read/write, 0=read-only. */
#define PTE_U 0x4 /* 1=user/kernel, 0=kernel only. */
#define PTE_A 0x20 /* 1=accessed, 0=not acccessed. */
#define PTE_D 0x40 /* 1=dirty, 0=not dirty (PTEs only). */
/* Returns a PDE that points to page table PT. */
static inline uint32_t pde_create (uint32_t *pt) {
ASSERT (pg_ofs (pt) == 0);
return vtop (pt) | PTE_U | PTE_P | PTE_W;
}
/* Returns a pointer to the page table that page directory entry
PDE, which must "present", points to. */
static inline uint32_t *pde_get_pt (uint32_t pde) {
ASSERT (pde & PTE_P);
return ptov (pde & PTE_ADDR);
}
/* Returns a PTE that points to PAGE.
The PTE's page is readable.
If WRITABLE is true then it will be writable as well.
The page will be usable only by ring 0 code (the kernel). */
static inline uint32_t pte_create_kernel (void *page, bool writable) {
ASSERT (pg_ofs (page) == 0);
return vtop (page) | PTE_P | (writable ? PTE_W : 0);
}
/* Returns a PTE that points to PAGE.
The PTE's page is readable.
If WRITABLE is true then it will be writable as well.
The page will be usable by both user and kernel code. */
static inline uint32_t pte_create_user (void *page, bool writable) {
return pte_create_kernel (page, writable) | PTE_U;
}
/* Returns a pointer to the page that page table entry PTE points
to. */
static inline void *pte_get_page (uint32_t pte) {
return ptov (pte & PTE_ADDR);
}
#endif /* threads/pte.h */

204
src/threads/start.S Normal file
View File

@@ -0,0 +1,204 @@
#include "threads/loader.h"
#### Kernel startup code.
#### The loader (in loader.S) loads the kernel at physical address
#### 0x20000 (128 kB) and jumps to "start", defined here. This code
#### switches from real mode to 32-bit protected mode and calls
#### main().
/* Flags in control register 0. */
#define CR0_PE 0x00000001 /* Protection Enable. */
#define CR0_EM 0x00000004 /* (Floating-point) Emulation. */
#define CR0_PG 0x80000000 /* Paging. */
#define CR0_WP 0x00010000 /* Write-Protect enable in kernel mode. */
.section .start
# The following code runs in real mode, which is a 16-bit code segment.
.code16
.func start
.globl start
start:
# The loader called into us with CS = 0x2000, SS = 0x0000, ESP = 0xf000,
# but we should initialize the other segment registers.
mov $0x2000, %ax
mov %ax, %ds
mov %ax, %es
# Set string instructions to go upward.
cld
#### Get memory size, via interrupt 15h function 88h (see [IntrList]),
#### which returns AX = (kB of physical memory) - 1024. This only
#### works for memory sizes <= 65 MB, which should be fine for our
#### purposes. We cap memory at 64 MB because that's all we prepare
#### page tables for, below.
movb $0x88, %ah
int $0x15
addl $1024, %eax # Total kB memory
cmp $0x10000, %eax # Cap at 64 MB
jbe 1f
mov $0x10000, %eax
1: shrl $2, %eax # Total 4 kB pages
addr32 movl %eax, init_ram_pages - LOADER_PHYS_BASE - 0x20000
#### Enable A20. Address line 20 is tied low when the machine boots,
#### which prevents addressing memory about 1 MB. This code fixes it.
# Poll status register while busy.
1: inb $0x64, %al
testb $0x2, %al
jnz 1b
# Send command for writing output port.
movb $0xd1, %al
outb %al, $0x64
# Poll status register while busy.
1: inb $0x64, %al
testb $0x2, %al
jnz 1b
# Enable A20 line.
movb $0xdf, %al
outb %al, $0x60
# Poll status register while busy.
1: inb $0x64, %al
testb $0x2, %al
jnz 1b
#### Create temporary page directory and page table and set page
#### directory base register.
# Create page directory at 0xf000 (60 kB) and fill with zeroes.
mov $0xf00, %ax
mov %ax, %es
subl %eax, %eax
subl %edi, %edi
movl $0x400, %ecx
rep stosl
# Add PDEs to point to page tables for the first 64 MB of RAM.
# Also add identical PDEs starting at LOADER_PHYS_BASE.
# See [IA32-v3a] section 3.7.6 "Page-Directory and Page-Table Entries"
# for a description of the bits in %eax.
movl $0x10007, %eax
movl $0x11, %ecx
subl %edi, %edi
1: movl %eax, %es:(%di)
movl %eax, %es:LOADER_PHYS_BASE >> 20(%di)
addw $4, %di
addl $0x1000, %eax
loop 1b
# Set up page tables for one-to-map linear to physical map for the
# first 64 MB of RAM.
# See [IA32-v3a] section 3.7.6 "Page-Directory and Page-Table Entries"
# for a description of the bits in %eax.
movw $0x1000, %ax
movw %ax, %es
movl $0x7, %eax
movl $0x4000, %ecx
subl %edi, %edi
1: movl %eax, %es:(%di)
addw $4, %di
addl $0x1000, %eax
loop 1b
# Set page directory base register.
movl $0xf000, %eax
movl %eax, %cr3
#### Switch to protected mode.
# First, disable interrupts. We won't set up the IDT until we get
# into C code, so any interrupt would blow us away.
cli
# Protected mode requires a GDT, so point the GDTR to our GDT.
# We need a data32 prefix to ensure that all 32 bits of the GDT
# descriptor are loaded (default is to load only 24 bits).
# The CPU doesn't need an addr32 prefix but ELF doesn't do 16-bit
# relocations.
data32 addr32 lgdt gdtdesc - LOADER_PHYS_BASE - 0x20000
# Then we turn on the following bits in CR0:
# PE (Protect Enable): this turns on protected mode.
# PG (Paging): turns on paging.
# WP (Write Protect): if unset, ring 0 code ignores
# write-protect bits in page tables (!).
# EM (Emulation): forces floating-point instructions to trap.
# We don't support floating point.
movl %cr0, %eax
orl $CR0_PE | CR0_PG | CR0_WP | CR0_EM, %eax
movl %eax, %cr0
# We're now in protected mode in a 16-bit segment. The CPU still has
# the real-mode code segment cached in %cs's segment descriptor. We
# need to reload %cs, and the easiest way is to use a far jump.
# Because we're not running in a 32-bit segment the data32 prefix is
# needed to jump to a 32-bit offset in the target segment.
data32 ljmp $SEL_KCSEG, $1f
# We're now in protected mode in a 32-bit segment.
# Let the assembler know.
.code32
# Reload all the other segment registers and the stack pointer to
# point into our new GDT.
1: mov $SEL_KDSEG, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
mov %ax, %ss
addl $LOADER_PHYS_BASE, %esp
movl $0, %ebp # Null-terminate main()'s backtrace
#### Call main().
call main
# main() shouldn't ever return. If it does, spin.
1: jmp 1b
.endfunc
#### GDT
.align 8
gdt:
.quad 0x0000000000000000 # Null segment. Not used by CPU.
.quad 0x00cf9a000000ffff # System code, base 0, limit 4 GB.
.quad 0x00cf92000000ffff # System data, base 0, limit 4 GB.
gdtdesc:
.word gdtdesc - gdt - 1 # Size of the GDT, minus 1 byte.
.long gdt # Address of the GDT.
#### Physical memory size in 4 kB pages. This is exported to the rest
#### of the kernel.
.globl init_ram_pages
init_ram_pages:
.long 0

65
src/threads/switch.S Normal file
View File

@@ -0,0 +1,65 @@
#include "threads/switch.h"
#### struct thread *switch_threads (struct thread *cur, struct thread *next);
####
#### Switches from CUR, which must be the running thread, to NEXT,
#### which must also be running switch_threads(), returning CUR in
#### NEXT's context.
####
#### This function works by assuming that the thread we're switching
#### into is also running switch_threads(). Thus, all it has to do is
#### preserve a few registers on the stack, then switch stacks and
#### restore the registers. As part of switching stacks we record the
#### current stack pointer in CUR's thread structure.
.globl switch_threads
.func switch_threads
switch_threads:
# Save caller's register state.
#
# Note that the SVR4 ABI allows us to destroy %eax, %ecx, %edx,
# but requires us to preserve %ebx, %ebp, %esi, %edi. See
# [SysV-ABI-386] pages 3-11 and 3-12 for details.
#
# This stack frame must match the one set up by thread_create()
# in size.
pushl %ebx
pushl %ebp
pushl %esi
pushl %edi
# Get offsetof (struct thread, stack).
.globl thread_stack_ofs
mov thread_stack_ofs, %edx
# Save current stack pointer to old thread's stack, if any.
movl SWITCH_CUR(%esp), %eax
movl %esp, (%eax,%edx,1)
# Restore stack pointer from new thread's stack.
movl SWITCH_NEXT(%esp), %ecx
movl (%ecx,%edx,1), %esp
# Restore caller's register state.
popl %edi
popl %esi
popl %ebp
popl %ebx
ret
.endfunc
.globl switch_entry
.func switch_entry
switch_entry:
# Discard switch_threads() arguments.
addl $8, %esp
# Call thread_schedule_tail(prev).
pushl %eax
.globl thread_schedule_tail
call thread_schedule_tail
addl $4, %esp
# Start thread proper.
ret
.endfunc

39
src/threads/switch.h Normal file
View File

@@ -0,0 +1,39 @@
#ifndef THREADS_SWITCH_H
#define THREADS_SWITCH_H
#ifndef __ASSEMBLER__
/* switch_thread()'s stack frame. */
struct switch_threads_frame
{
uint32_t edi; /* 0: Saved %edi. */
uint32_t esi; /* 4: Saved %esi. */
uint32_t ebp; /* 8: Saved %ebp. */
uint32_t ebx; /* 12: Saved %ebx. */
void (*eip) (void); /* 16: Return address. */
struct thread *cur; /* 20: switch_threads()'s CUR argument. */
struct thread *next; /* 24: switch_threads()'s NEXT argument. */
};
/* Switches from CUR, which must be the running thread, to NEXT,
which must also be running switch_threads(), returning CUR in
NEXT's context. */
struct thread *switch_threads (struct thread *cur, struct thread *next);
/* Stack frame for switch_entry(). */
struct switch_entry_frame
{
void (*eip) (void);
};
void switch_entry (void);
/* Pops the CUR and NEXT arguments off the stack, for use in
initializing threads. */
void switch_thunk (void);
#endif
/* Offsets used by switch.S. */
#define SWITCH_CUR 20
#define SWITCH_NEXT 24
#endif /* threads/switch.h */

338
src/threads/synch.c Normal file
View File

@@ -0,0 +1,338 @@
/* This file is derived from source code for the Nachos
instructional operating system. The Nachos copyright notice
is reproduced in full below. */
/* Copyright (c) 1992-1996 The Regents of the University of California.
All rights reserved.
Permission to use, copy, modify, and distribute this software
and its documentation for any purpose, without fee, and
without written agreement is hereby granted, provided that the
above copyright notice and the following two paragraphs appear
in all copies of this software.
IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO
ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE
AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA
HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS"
BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
MODIFICATIONS.
*/
#include "threads/synch.h"
#include <stdio.h>
#include <string.h>
#include "threads/interrupt.h"
#include "threads/thread.h"
/* Initializes semaphore SEMA to VALUE. A semaphore is a
nonnegative integer along with two atomic operators for
manipulating it:
- down or "P": wait for the value to become positive, then
decrement it.
- up or "V": increment the value (and wake up one waiting
thread, if any). */
void
sema_init (struct semaphore *sema, unsigned value)
{
ASSERT (sema != NULL);
sema->value = value;
list_init (&sema->waiters);
}
/* Down or "P" operation on a semaphore. Waits for SEMA's value
to become positive and then atomically decrements it.
This function may sleep, so it must not be called within an
interrupt handler. This function may be called with
interrupts disabled, but if it sleeps then the next scheduled
thread will probably turn interrupts back on. */
void
sema_down (struct semaphore *sema)
{
enum intr_level old_level;
ASSERT (sema != NULL);
ASSERT (!intr_context ());
old_level = intr_disable ();
while (sema->value == 0)
{
list_push_back (&sema->waiters, &thread_current ()->elem);
thread_block ();
}
sema->value--;
intr_set_level (old_level);
}
/* Down or "P" operation on a semaphore, but only if the
semaphore is not already 0. Returns true if the semaphore is
decremented, false otherwise.
This function may be called from an interrupt handler. */
bool
sema_try_down (struct semaphore *sema)
{
enum intr_level old_level;
bool success;
ASSERT (sema != NULL);
old_level = intr_disable ();
if (sema->value > 0)
{
sema->value--;
success = true;
}
else
success = false;
intr_set_level (old_level);
return success;
}
/* Up or "V" operation on a semaphore. Increments SEMA's value
and wakes up one thread of those waiting for SEMA, if any.
This function may be called from an interrupt handler. */
void
sema_up (struct semaphore *sema)
{
enum intr_level old_level;
ASSERT (sema != NULL);
old_level = intr_disable ();
if (!list_empty (&sema->waiters))
thread_unblock (list_entry (list_pop_front (&sema->waiters),
struct thread, elem));
sema->value++;
intr_set_level (old_level);
}
static void sema_test_helper (void *sema_);
/* Self-test for semaphores that makes control "ping-pong"
between a pair of threads. Insert calls to printf() to see
what's going on. */
void
sema_self_test (void)
{
struct semaphore sema[2];
int i;
printf ("Testing semaphores...");
sema_init (&sema[0], 0);
sema_init (&sema[1], 0);
thread_create ("sema-test", PRI_DEFAULT, sema_test_helper, &sema);
for (i = 0; i < 10; i++)
{
sema_up (&sema[0]);
sema_down (&sema[1]);
}
printf ("done.\n");
}
/* Thread function used by sema_self_test(). */
static void
sema_test_helper (void *sema_)
{
struct semaphore *sema = sema_;
int i;
for (i = 0; i < 10; i++)
{
sema_down (&sema[0]);
sema_up (&sema[1]);
}
}
/* Initializes LOCK. A lock can be held by at most a single
thread at any given time. Our locks are not "recursive", that
is, it is an error for the thread currently holding a lock to
try to acquire that lock.
A lock is a specialization of a semaphore with an initial
value of 1. The difference between a lock and such a
semaphore is twofold. First, a semaphore can have a value
greater than 1, but a lock can only be owned by a single
thread at a time. Second, a semaphore does not have an owner,
meaning that one thread can "down" the semaphore and then
another one "up" it, but with a lock the same thread must both
acquire and release it. When these restrictions prove
onerous, it's a good sign that a semaphore should be used,
instead of a lock. */
void
lock_init (struct lock *lock)
{
ASSERT (lock != NULL);
lock->holder = NULL;
sema_init (&lock->semaphore, 1);
}
/* Acquires LOCK, sleeping until it becomes available if
necessary. The lock must not already be held by the current
thread.
This function may sleep, so it must not be called within an
interrupt handler. This function may be called with
interrupts disabled, but interrupts will be turned back on if
we need to sleep. */
void
lock_acquire (struct lock *lock)
{
ASSERT (lock != NULL);
ASSERT (!intr_context ());
ASSERT (!lock_held_by_current_thread (lock));
sema_down (&lock->semaphore);
lock->holder = thread_current ();
}
/* Tries to acquires LOCK and returns true if successful or false
on failure. The lock must not already be held by the current
thread.
This function will not sleep, so it may be called within an
interrupt handler. */
bool
lock_try_acquire (struct lock *lock)
{
bool success;
ASSERT (lock != NULL);
ASSERT (!lock_held_by_current_thread (lock));
success = sema_try_down (&lock->semaphore);
if (success)
lock->holder = thread_current ();
return success;
}
/* Releases LOCK, which must be owned by the current thread.
An interrupt handler cannot acquire a lock, so it does not
make sense to try to release a lock within an interrupt
handler. */
void
lock_release (struct lock *lock)
{
ASSERT (lock != NULL);
ASSERT (lock_held_by_current_thread (lock));
lock->holder = NULL;
sema_up (&lock->semaphore);
}
/* Returns true if the current thread holds LOCK, false
otherwise. (Note that testing whether some other thread holds
a lock would be racy.) */
bool
lock_held_by_current_thread (const struct lock *lock)
{
ASSERT (lock != NULL);
return lock->holder == thread_current ();
}
/* One semaphore in a list. */
struct semaphore_elem
{
struct list_elem elem; /* List element. */
struct semaphore semaphore; /* This semaphore. */
};
/* Initializes condition variable COND. A condition variable
allows one piece of code to signal a condition and cooperating
code to receive the signal and act upon it. */
void
cond_init (struct condition *cond)
{
ASSERT (cond != NULL);
list_init (&cond->waiters);
}
/* Atomically releases LOCK and waits for COND to be signaled by
some other piece of code. After COND is signaled, LOCK is
reacquired before returning. LOCK must be held before calling
this function.
The monitor implemented by this function is "Mesa" style, not
"Hoare" style, that is, sending and receiving a signal are not
an atomic operation. Thus, typically the caller must recheck
the condition after the wait completes and, if necessary, wait
again.
A given condition variable is associated with only a single
lock, but one lock may be associated with any number of
condition variables. That is, there is a one-to-many mapping
from locks to condition variables.
This function may sleep, so it must not be called within an
interrupt handler. This function may be called with
interrupts disabled, but interrupts will be turned back on if
we need to sleep. */
void
cond_wait (struct condition *cond, struct lock *lock)
{
struct semaphore_elem waiter;
ASSERT (cond != NULL);
ASSERT (lock != NULL);
ASSERT (!intr_context ());
ASSERT (lock_held_by_current_thread (lock));
sema_init (&waiter.semaphore, 0);
list_push_back (&cond->waiters, &waiter.elem);
lock_release (lock);
sema_down (&waiter.semaphore);
lock_acquire (lock);
}
/* If any threads are waiting on COND (protected by LOCK), then
this function signals one of them to wake up from its wait.
LOCK must be held before calling this function.
An interrupt handler cannot acquire a lock, so it does not
make sense to try to signal a condition variable within an
interrupt handler. */
void
cond_signal (struct condition *cond, struct lock *lock UNUSED)
{
ASSERT (cond != NULL);
ASSERT (lock != NULL);
ASSERT (!intr_context ());
ASSERT (lock_held_by_current_thread (lock));
if (!list_empty (&cond->waiters))
sema_up (&list_entry (list_pop_front (&cond->waiters),
struct semaphore_elem, elem)->semaphore);
}
/* Wakes up all threads, if any, waiting on COND (protected by
LOCK). LOCK must be held before calling this function.
An interrupt handler cannot acquire a lock, so it does not
make sense to try to signal a condition variable within an
interrupt handler. */
void
cond_broadcast (struct condition *cond, struct lock *lock)
{
ASSERT (cond != NULL);
ASSERT (lock != NULL);
while (!list_empty (&cond->waiters))
cond_signal (cond, lock);
}

51
src/threads/synch.h Normal file
View File

@@ -0,0 +1,51 @@
#ifndef THREADS_SYNCH_H
#define THREADS_SYNCH_H
#include <list.h>
#include <stdbool.h>
/* A counting semaphore. */
struct semaphore
{
unsigned value; /* Current value. */
struct list waiters; /* List of waiting threads. */
};
void sema_init (struct semaphore *, unsigned value);
void sema_down (struct semaphore *);
bool sema_try_down (struct semaphore *);
void sema_up (struct semaphore *);
void sema_self_test (void);
/* Lock. */
struct lock
{
struct thread *holder; /* Thread holding lock (for debugging). */
struct semaphore semaphore; /* Binary semaphore controlling access. */
};
void lock_init (struct lock *);
void lock_acquire (struct lock *);
bool lock_try_acquire (struct lock *);
void lock_release (struct lock *);
bool lock_held_by_current_thread (const struct lock *);
/* Condition variable. */
struct condition
{
struct list waiters; /* List of waiting semaphore_elems. */
};
void cond_init (struct condition *);
void cond_wait (struct condition *, struct lock *);
void cond_signal (struct condition *, struct lock *);
void cond_broadcast (struct condition *, struct lock *);
/* Optimization barrier.
The compiler will not reorder operations across an
optimization barrier. See "Optimization Barriers" in the
reference guide for more information.*/
#define barrier() asm volatile ("" : : : "memory")
#endif /* threads/synch.h */

603
src/threads/thread.c Normal file
View File

@@ -0,0 +1,603 @@
#include "threads/thread.h"
#include <debug.h>
#include <stddef.h>
#include <random.h>
#include <stdio.h>
#include <string.h>
#include "threads/flags.h"
#include "threads/interrupt.h"
#include "threads/intr-stubs.h"
#include "threads/palloc.h"
#include "threads/switch.h"
#include "threads/synch.h"
#include "threads/vaddr.h"
#ifdef USERPROG
#include "userprog/process.h"
#endif
/* Random value for struct thread's `magic' member.
Used to detect stack overflow. See the big comment at the top
of thread.h for details. */
#define THREAD_MAGIC 0xcd6abf4b
/* List of processes in THREAD_READY state, that is, processes
that are ready to run but not actually running. */
static struct list ready_list;
/* List of all processes. Processes are added to this list
when they are first scheduled and removed when they exit. */
static struct list all_list;
/* Idle thread. */
static struct thread *idle_thread;
/* Initial thread, the thread running init.c:main(). */
static struct thread *initial_thread;
/* Lock used by allocate_tid(). */
static struct lock tid_lock;
/* Stack frame for kernel_thread(). */
struct kernel_thread_frame
{
void *eip; /* Return address. */
thread_func *function; /* Function to call. */
void *aux; /* Auxiliary data for function. */
};
/* Statistics. */
static long long idle_ticks; /* # of timer ticks spent idle. */
static long long kernel_ticks; /* # of timer ticks in kernel threads. */
static long long user_ticks; /* # of timer ticks in user programs. */
/* Scheduling. */
#define TIME_SLICE 4 /* # of timer ticks to give each thread. */
static unsigned thread_ticks; /* # of timer ticks since last yield. */
/* If false (default), use round-robin scheduler.
If true, use multi-level feedback queue scheduler.
Controlled by kernel command-line option "-mlfqs". */
bool thread_mlfqs;
static void kernel_thread (thread_func *, void *aux);
static void idle (void *aux UNUSED);
static struct thread *running_thread (void);
static struct thread *next_thread_to_run (void);
static void init_thread (struct thread *, const char *name, int priority);
static bool is_thread (struct thread *) UNUSED;
static void *alloc_frame (struct thread *, size_t size);
static void schedule (void);
void thread_schedule_tail (struct thread *prev);
static tid_t allocate_tid (void);
/* Initializes the threading system by transforming the code
that's currently running into a thread. This can't work in
general and it is possible in this case only because loader.S
was careful to put the bottom of the stack at a page boundary.
Also initializes the run queue and the tid lock.
After calling this function, be sure to initialize the page
allocator before trying to create any threads with
thread_create().
It is not safe to call thread_current() until this function
finishes. */
void
thread_init (void)
{
ASSERT (intr_get_level () == INTR_OFF);
lock_init (&tid_lock);
list_init (&ready_list);
list_init (&all_list);
/* Set up a thread structure for the running thread. */
initial_thread = running_thread ();
init_thread (initial_thread, "main", PRI_DEFAULT);
initial_thread->status = THREAD_RUNNING;
initial_thread->tid = allocate_tid ();
}
/* Starts preemptive thread scheduling by enabling interrupts.
Also creates the idle thread. */
void
thread_start (void)
{
/* Create the idle thread. */
struct semaphore idle_started;
sema_init (&idle_started, 0);
thread_create ("idle", PRI_MIN, idle, &idle_started);
/* Start preemptive thread scheduling. */
intr_enable ();
/* Wait for the idle thread to initialize idle_thread. */
sema_down (&idle_started);
}
/* Returns the number of threads currently in the ready list.
Disables interrupts to avoid any race-conditions on the ready list. */
size_t
threads_ready (void)
{
enum intr_level old_level = intr_disable ();
return list_size (&ready_list);
intr_set_level (old_level);
}
/* Called by the timer interrupt handler at each timer tick.
Thus, this function runs in an external interrupt context. */
void
thread_tick (void)
{
struct thread *t = thread_current ();
/* Update statistics. */
if (t == idle_thread)
idle_ticks++;
#ifdef USERPROG
else if (t->pagedir != NULL)
user_ticks++;
#endif
else
kernel_ticks++;
/* Enforce preemption. */
if (++thread_ticks >= TIME_SLICE)
intr_yield_on_return ();
}
/* Prints thread statistics. */
void
thread_print_stats (void)
{
printf ("Thread: %lld idle ticks, %lld kernel ticks, %lld user ticks\n",
idle_ticks, kernel_ticks, user_ticks);
}
/* Creates a new kernel thread named NAME with the given initial
PRIORITY, which executes FUNCTION passing AUX as the argument,
and adds it to the ready queue. Returns the thread identifier
for the new thread, or TID_ERROR if creation fails.
If thread_start() has been called, then the new thread may be
scheduled before thread_create() returns. It could even exit
before thread_create() returns. Contrariwise, the original
thread may run for any amount of time before the new thread is
scheduled. Use a semaphore or some other form of
synchronization if you need to ensure ordering.
The code provided sets the new thread's `priority' member to
PRIORITY, but no actual priority scheduling is implemented.
Priority scheduling is the goal of Problem 1-3. */
tid_t
thread_create (const char *name, int priority,
thread_func *function, void *aux)
{
struct thread *t;
struct kernel_thread_frame *kf;
struct switch_entry_frame *ef;
struct switch_threads_frame *sf;
tid_t tid;
enum intr_level old_level;
ASSERT (function != NULL);
/* Allocate thread. */
t = palloc_get_page (PAL_ZERO);
if (t == NULL)
return TID_ERROR;
/* Initialize thread. */
init_thread (t, name, priority);
tid = t->tid = allocate_tid ();
/* Prepare thread for first run by initializing its stack.
Do this atomically so intermediate values for the 'stack'
member cannot be observed. */
old_level = intr_disable ();
/* Stack frame for kernel_thread(). */
kf = alloc_frame (t, sizeof *kf);
kf->eip = NULL;
kf->function = function;
kf->aux = aux;
/* Stack frame for switch_entry(). */
ef = alloc_frame (t, sizeof *ef);
ef->eip = (void (*) (void)) kernel_thread;
/* Stack frame for switch_threads(). */
sf = alloc_frame (t, sizeof *sf);
sf->eip = switch_entry;
sf->ebp = 0;
intr_set_level (old_level);
/* Add to run queue. */
thread_unblock (t);
return tid;
}
/* Puts the current thread to sleep. It will not be scheduled
again until awoken by thread_unblock().
This function must be called with interrupts turned off. It
is usually a better idea to use one of the synchronization
primitives in synch.h. */
void
thread_block (void)
{
ASSERT (!intr_context ());
ASSERT (intr_get_level () == INTR_OFF);
thread_current ()->status = THREAD_BLOCKED;
schedule ();
}
/* Transitions a blocked thread T to the ready-to-run state.
This is an error if T is not blocked. (Use thread_yield() to
make the running thread ready.)
This function does not preempt the running thread. This can
be important: if the caller had disabled interrupts itself,
it may expect that it can atomically unblock a thread and
update other data. */
void
thread_unblock (struct thread *t)
{
enum intr_level old_level;
ASSERT (is_thread (t));
old_level = intr_disable ();
ASSERT (t->status == THREAD_BLOCKED);
list_push_back (&ready_list, &t->elem);
t->status = THREAD_READY;
intr_set_level (old_level);
}
/* Returns the name of the running thread. */
const char *
thread_name (void)
{
return thread_current ()->name;
}
/* Returns the running thread.
This is running_thread() plus a couple of sanity checks.
See the big comment at the top of thread.h for details. */
struct thread *
thread_current (void)
{
struct thread *t = running_thread ();
/* Make sure T is really a thread.
If either of these assertions fire, then your thread may
have overflowed its stack. Each thread has less than 4 kB
of stack, so a few big automatic arrays or moderate
recursion can cause stack overflow. */
ASSERT (is_thread (t));
ASSERT (t->status == THREAD_RUNNING);
return t;
}
/* Returns the running thread's tid. */
tid_t
thread_tid (void)
{
return thread_current ()->tid;
}
/* Deschedules the current thread and destroys it. Never
returns to the caller. */
void
thread_exit (void)
{
ASSERT (!intr_context ());
#ifdef USERPROG
process_exit ();
#endif
/* Remove thread from all threads list, set our status to dying,
and schedule another process. That process will destroy us
when it calls thread_schedule_tail(). */
intr_disable ();
list_remove (&thread_current()->allelem);
thread_current ()->status = THREAD_DYING;
schedule ();
NOT_REACHED ();
}
/* Yields the CPU. The current thread is not put to sleep and
may be scheduled again immediately at the scheduler's whim. */
void
thread_yield (void)
{
struct thread *cur = thread_current ();
enum intr_level old_level;
ASSERT (!intr_context ());
old_level = intr_disable ();
if (cur != idle_thread)
list_push_back (&ready_list, &cur->elem);
cur->status = THREAD_READY;
schedule ();
intr_set_level (old_level);
}
/* Invoke function 'func' on all threads, passing along 'aux'.
This function must be called with interrupts off. */
void
thread_foreach (thread_action_func *func, void *aux)
{
struct list_elem *e;
ASSERT (intr_get_level () == INTR_OFF);
for (e = list_begin (&all_list); e != list_end (&all_list);
e = list_next (e))
{
struct thread *t = list_entry (e, struct thread, allelem);
func (t, aux);
}
}
/* Sets the current thread's priority to NEW_PRIORITY. */
void
thread_set_priority (int new_priority)
{
thread_current ()->priority = new_priority;
}
/* Returns the current thread's priority. */
int
thread_get_priority (void)
{
return thread_current ()->priority;
}
/* Sets the current thread's nice value to NICE. */
void
thread_set_nice (int nice UNUSED)
{
/* Not yet implemented. */
}
/* Returns the current thread's nice value. */
int
thread_get_nice (void)
{
/* Not yet implemented. */
return 0;
}
/* Returns 100 times the system load average. */
int
thread_get_load_avg (void)
{
/* Not yet implemented. */
return 0;
}
/* Returns 100 times the current thread's recent_cpu value. */
int
thread_get_recent_cpu (void)
{
/* Not yet implemented. */
return 0;
}
/* Idle thread. Executes when no other thread is ready to run.
The idle thread is initially put on the ready list by
thread_start(). It will be scheduled once initially, at which
point it initializes idle_thread, "up"s the semaphore passed
to it to enable thread_start() to continue, and immediately
blocks. After that, the idle thread never appears in the
ready list. It is returned by next_thread_to_run() as a
special case when the ready list is empty. */
static void
idle (void *idle_started_ UNUSED)
{
struct semaphore *idle_started = idle_started_;
idle_thread = thread_current ();
sema_up (idle_started);
for (;;)
{
/* Let someone else run. */
intr_disable ();
thread_block ();
/* Re-enable interrupts and wait for the next one.
The `sti' instruction disables interrupts until the
completion of the next instruction, so these two
instructions are executed atomically. This atomicity is
important; otherwise, an interrupt could be handled
between re-enabling interrupts and waiting for the next
one to occur, wasting as much as one clock tick worth of
time.
See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3a]
7.11.1 "HLT Instruction". */
asm volatile ("sti; hlt" : : : "memory");
}
}
/* Function used as the basis for a kernel thread. */
static void
kernel_thread (thread_func *function, void *aux)
{
ASSERT (function != NULL);
intr_enable (); /* The scheduler runs with interrupts off. */
function (aux); /* Execute the thread function. */
thread_exit (); /* If function() returns, kill the thread. */
}
/* Returns the running thread. */
struct thread *
running_thread (void)
{
uint32_t *esp;
/* Copy the CPU's stack pointer into `esp', and then round that
down to the start of a page. Because `struct thread' is
always at the beginning of a page and the stack pointer is
somewhere in the middle, this locates the curent thread. */
asm ("mov %%esp, %0" : "=g" (esp));
return pg_round_down (esp);
}
/* Returns true if T appears to point to a valid thread. */
static bool
is_thread (struct thread *t)
{
return t != NULL && t->magic == THREAD_MAGIC;
}
/* Does basic initialization of T as a blocked thread named
NAME. */
static void
init_thread (struct thread *t, const char *name, int priority)
{
enum intr_level old_level;
ASSERT (t != NULL);
ASSERT (PRI_MIN <= priority && priority <= PRI_MAX);
ASSERT (name != NULL);
memset (t, 0, sizeof *t);
t->status = THREAD_BLOCKED;
strlcpy (t->name, name, sizeof t->name);
t->stack = (uint8_t *) t + PGSIZE;
t->priority = priority;
t->magic = THREAD_MAGIC;
old_level = intr_disable ();
list_push_back (&all_list, &t->allelem);
intr_set_level (old_level);
}
/* Allocates a SIZE-byte frame at the top of thread T's stack and
returns a pointer to the frame's base. */
static void *
alloc_frame (struct thread *t, size_t size)
{
/* Stack data is always allocated in word-size units. */
ASSERT (is_thread (t));
ASSERT (size % sizeof (uint32_t) == 0);
t->stack -= size;
return t->stack;
}
/* Chooses and returns the next thread to be scheduled. Should
return a thread from the run queue, unless the run queue is
empty. (If the running thread can continue running, then it
will be in the run queue.) If the run queue is empty, return
idle_thread. */
static struct thread *
next_thread_to_run (void)
{
if (list_empty (&ready_list))
return idle_thread;
else
return list_entry (list_pop_front (&ready_list), struct thread, elem);
}
/* Completes a thread switch by activating the new thread's page
tables, and, if the previous thread is dying, destroying it.
At this function's invocation, we just switched from thread
PREV, the new thread is already running, and interrupts are
still disabled. This function is normally invoked by
thread_schedule() as its final action before returning, but
the first time a thread is scheduled it is called by
switch_entry() (see switch.S).
It's not safe to call printf() until the thread switch is
complete. In practice that means that printf()s should be
added at the end of the function.
After this function and its caller returns, the thread switch
is complete. */
void
thread_schedule_tail (struct thread *prev)
{
struct thread *cur = running_thread ();
ASSERT (intr_get_level () == INTR_OFF);
/* Mark us as running. */
cur->status = THREAD_RUNNING;
/* Start new time slice. */
thread_ticks = 0;
#ifdef USERPROG
/* Activate the new address space. */
process_activate ();
#endif
/* If the thread we switched from is dying, destroy its struct
thread. This must happen late so that thread_exit() doesn't
pull out the rug under itself. (We don't free
initial_thread because its memory was not obtained via
palloc().) */
if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread)
{
ASSERT (prev != cur);
palloc_free_page (prev);
}
}
/* Schedules a new process. At entry, interrupts must be off and
the running process's state must have been changed from
running to some other state. This function finds another
thread to run and switches to it.
It's not safe to call printf() until thread_schedule_tail()
has completed. */
static void
schedule (void)
{
struct thread *cur = running_thread ();
struct thread *next = next_thread_to_run ();
struct thread *prev = NULL;
ASSERT (intr_get_level () == INTR_OFF);
ASSERT (cur->status != THREAD_RUNNING);
ASSERT (is_thread (next));
if (cur != next)
prev = switch_threads (cur, next);
thread_schedule_tail (prev);
}
/* Returns a tid to use for a new thread. */
static tid_t
allocate_tid (void)
{
static tid_t next_tid = 1;
tid_t tid;
lock_acquire (&tid_lock);
tid = next_tid++;
lock_release (&tid_lock);
return tid;
}
/* Offset of `stack' member within `struct thread'.
Used by switch.S, which can't figure it out on its own. */
uint32_t thread_stack_ofs = offsetof (struct thread, stack);

142
src/threads/thread.h Normal file
View File

@@ -0,0 +1,142 @@
#ifndef THREADS_THREAD_H
#define THREADS_THREAD_H
#include <debug.h>
#include <list.h>
#include <stdint.h>
/* States in a thread's life cycle. */
enum thread_status
{
THREAD_RUNNING, /* Running thread. */
THREAD_READY, /* Not running but ready to run. */
THREAD_BLOCKED, /* Waiting for an event to trigger. */
THREAD_DYING /* About to be destroyed. */
};
/* Thread identifier type.
You can redefine this to whatever type you like. */
typedef int tid_t;
#define TID_ERROR ((tid_t) -1) /* Error value for tid_t. */
/* Thread priorities. */
#define PRI_MIN 0 /* Lowest priority. */
#define PRI_DEFAULT 31 /* Default priority. */
#define PRI_MAX 63 /* Highest priority. */
/* A kernel thread or user process.
Each thread structure is stored in its own 4 kB page. The
thread structure itself sits at the very bottom of the page
(at offset 0). The rest of the page is reserved for the
thread's kernel stack, which grows downward from the top of
the page (at offset 4 kB). Here's an illustration:
4 kB +---------------------------------+
| kernel stack |
| | |
| | |
| V |
| grows downward |
| |
| |
| |
| |
| |
| |
| |
| |
+---------------------------------+
| magic |
| : |
| : |
| name |
| status |
0 kB +---------------------------------+
The upshot of this is twofold:
1. First, `struct thread' must not be allowed to grow too
big. If it does, then there will not be enough room for
the kernel stack. Our base `struct thread' is only a
few bytes in size. It probably should stay well under 1
kB.
2. Second, kernel stacks must not be allowed to grow too
large. If a stack overflows, it will corrupt the thread
state. Thus, kernel functions should not allocate large
structures or arrays as non-static local variables. Use
dynamic allocation with malloc() or palloc_get_page()
instead.
The first symptom of either of these problems will probably be
an assertion failure in thread_current(), which checks that
the `magic' member of the running thread's `struct thread' is
set to THREAD_MAGIC. Stack overflow will normally change this
value, triggering the assertion. */
/* The `elem' member has a dual purpose. It can be an element in
the run queue (thread.c), or it can be an element in a
semaphore wait list (synch.c). It can be used these two ways
only because they are mutually exclusive: only a thread in the
ready state is on the run queue, whereas only a thread in the
blocked state is on a semaphore wait list. */
struct thread
{
/* Owned by thread.c. */
tid_t tid; /* Thread identifier. */
enum thread_status status; /* Thread state. */
char name[16]; /* Name (for debugging purposes). */
uint8_t *stack; /* Saved stack pointer. */
int priority; /* Priority. */
struct list_elem allelem; /* List element for all threads list. */
/* Shared between thread.c and synch.c. */
struct list_elem elem; /* List element. */
#ifdef USERPROG
/* Owned by userprog/process.c. */
uint32_t *pagedir; /* Page directory. */
#endif
/* Owned by thread.c. */
unsigned magic; /* Detects stack overflow. */
};
/* If false (default), use round-robin scheduler.
If true, use multi-level feedback queue scheduler.
Controlled by kernel command-line option "mlfqs". */
extern bool thread_mlfqs;
void thread_init (void);
void thread_start (void);
size_t threads_ready(void);
void thread_tick (void);
void thread_print_stats (void);
typedef void thread_func (void *aux);
tid_t thread_create (const char *name, int priority, thread_func *, void *);
void thread_block (void);
void thread_unblock (struct thread *);
struct thread *thread_current (void);
tid_t thread_tid (void);
const char *thread_name (void);
void thread_exit (void) NO_RETURN;
void thread_yield (void);
/* Performs some operation on thread t, given auxiliary data AUX. */
typedef void thread_action_func (struct thread *t, void *aux);
void thread_foreach (thread_action_func *, void *);
int thread_get_priority (void);
void thread_set_priority (int);
int thread_get_nice (void);
void thread_set_nice (int);
int thread_get_recent_cpu (void);
int thread_get_load_avg (void);
#endif /* threads/thread.h */

89
src/threads/vaddr.h Normal file
View File

@@ -0,0 +1,89 @@
#ifndef THREADS_VADDR_H
#define THREADS_VADDR_H
#include <debug.h>
#include <stdint.h>
#include <stdbool.h>
#include "threads/loader.h"
/* Functions and macros for working with virtual addresses.
See pte.h for functions and macros specifically for x86
hardware page tables. */
#define BITMASK(SHIFT, CNT) (((1ul << (CNT)) - 1) << (SHIFT))
/* Page offset (bits 0:12). */
#define PGSHIFT 0 /* Index of first offset bit. */
#define PGBITS 12 /* Number of offset bits. */
#define PGSIZE (1 << PGBITS) /* Bytes in a page. */
#define PGMASK BITMASK(PGSHIFT, PGBITS) /* Page offset bits (0:12). */
/* Offset within a page. */
static inline unsigned pg_ofs (const void *va) {
return (uintptr_t) va & PGMASK;
}
/* Virtual page number. */
static inline uintptr_t pg_no (const void *va) {
return (uintptr_t) va >> PGBITS;
}
/* Round up to nearest page boundary. */
static inline void *pg_round_up (const void *va) {
return (void *) (((uintptr_t) va + PGSIZE - 1) & ~PGMASK);
}
/* Round down to nearest page boundary. */
static inline void *pg_round_down (const void *va) {
return (void *) ((uintptr_t) va & ~PGMASK);
}
/* Base address of the 1:1 physical-to-virtual mapping. Physical
memory is mapped starting at this virtual address. Thus,
physical address 0 is accessible at PHYS_BASE, physical
address address 0x1234 at (uint8_t *) PHYS_BASE + 0x1234, and
so on.
This address also marks the end of user programs' address
space. Up to this point in memory, user programs are allowed
to map whatever they like. At this point and above, the
virtual address space belongs to the kernel. */
#define PHYS_BASE ((void *) LOADER_PHYS_BASE)
/* Returns true if VADDR is a user virtual address. */
static inline bool
is_user_vaddr (const void *vaddr)
{
return vaddr < PHYS_BASE;
}
/* Returns true if VADDR is a kernel virtual address. */
static inline bool
is_kernel_vaddr (const void *vaddr)
{
return vaddr >= PHYS_BASE;
}
/* Returns kernel virtual address at which physical address PADDR
is mapped. */
static inline void *
ptov (uintptr_t paddr)
{
ASSERT ((void *) paddr < PHYS_BASE);
return (void *) (paddr + PHYS_BASE);
}
/* Returns physical address at which kernel virtual address VADDR
is mapped. */
static inline uintptr_t
vtop (const void *vaddr)
{
ASSERT (is_kernel_vaddr (vaddr));
return (uintptr_t) vaddr - (uintptr_t) PHYS_BASE;
}
#endif /* threads/vaddr.h */