provided code

This commit is contained in:
LabTS
2024-10-01 23:37:39 +01:00
commit 8724a2641e
697 changed files with 74252 additions and 0 deletions

View File

@@ -0,0 +1,7 @@
# Percentage of the testing point total designated for each set of tests.
5.0% tests/devices/Rubric.alarmfunc
5.0% tests/devices/Rubric.alarmrobust
45.0% tests/threads/Rubric.priority
0.0% tests/threads/Rubric.priorityCR
45.0% tests/threads/Rubric.mlfqs

View File

@@ -0,0 +1,49 @@
# -*- makefile -*-
# Test names.
tests/threads_TESTS = $(addprefix tests/threads/, \
alarm-priority priority-change priority-donate-one \
priority-donate-multiple priority-donate-multiple2 \
priority-donate-nest priority-donate-sema priority-donate-lower \
priority-fifo priority-preempt priority-sema priority-condvar \
priority-donate-chain priority-preservation \
mlfqs-load-1 mlfqs-load-60 mlfqs-load-avg mlfqs-recent-1 mlfqs-fair-2 \
mlfqs-fair-20 mlfqs-nice-2 mlfqs-nice-10 mlfqs-block)
# Sources for tests.
tests/threads_SRC = tests/threads/tests.c
tests/threads_SRC += tests/threads/alarm-priority.c
tests/threads_SRC += tests/threads/priority-change.c
tests/threads_SRC += tests/threads/priority-donate-one.c
tests/threads_SRC += tests/threads/priority-donate-multiple.c
tests/threads_SRC += tests/threads/priority-donate-multiple2.c
tests/threads_SRC += tests/threads/priority-donate-nest.c
tests/threads_SRC += tests/threads/priority-donate-sema.c
tests/threads_SRC += tests/threads/priority-donate-lower.c
tests/threads_SRC += tests/threads/priority-fifo.c
tests/threads_SRC += tests/threads/priority-preempt.c
tests/threads_SRC += tests/threads/priority-sema.c
tests/threads_SRC += tests/threads/priority-condvar.c
tests/threads_SRC += tests/threads/priority-donate-chain.c
tests/threads_SRC += tests/threads/priority-preservation.c
tests/threads_SRC += tests/threads/mlfqs-load-1.c
tests/threads_SRC += tests/threads/mlfqs-load-60.c
tests/threads_SRC += tests/threads/mlfqs-load-avg.c
tests/threads_SRC += tests/threads/mlfqs-recent-1.c
tests/threads_SRC += tests/threads/mlfqs-fair.c
tests/threads_SRC += tests/threads/mlfqs-block.c
MLFQS_OUTPUTS = \
tests/threads/mlfqs-load-1.output \
tests/threads/mlfqs-load-60.output \
tests/threads/mlfqs-load-avg.output \
tests/threads/mlfqs-recent-1.output \
tests/threads/mlfqs-fair-2.output \
tests/threads/mlfqs-fair-20.output \
tests/threads/mlfqs-nice-2.output \
tests/threads/mlfqs-nice-10.output \
tests/threads/mlfqs-block.output
$(MLFQS_OUTPUTS): KERNELFLAGS += -mlfqs
$(MLFQS_OUTPUTS): TIMEOUT = 480

View File

@@ -0,0 +1,14 @@
Functionality of advanced scheduler:
5 mlfqs-load-1
5 mlfqs-load-60
5 mlfqs-load-avg
5 mlfqs-recent-1
5 mlfqs-fair-2
5 mlfqs-fair-20
5 mlfqs-nice-2
5 mlfqs-nice-10
5 mlfqs-block

View File

@@ -0,0 +1,16 @@
Functionality of priority scheduler:
5 alarm-priority
5 priority-change
5 priority-preempt
5 priority-fifo
5 priority-sema
5 priority-condvar
5 priority-donate-one
5 priority-donate-multiple
5 priority-donate-multiple2
5 priority-donate-nest
10 priority-donate-chain
5 priority-donate-sema
5 priority-donate-lower

View File

@@ -0,0 +1,2 @@
Full correctness of priority scheduler:
10 priority-preservation

View File

@@ -0,0 +1,58 @@
/* Checks that when the alarm clock wakes up threads, the
higher-priority threads run first. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/malloc.h"
#include "threads/synch.h"
#include "threads/thread.h"
#include "devices/timer.h"
static thread_func alarm_priority_thread;
static int64_t wake_time;
static struct semaphore wait_sema;
void
test_alarm_priority (void)
{
int i;
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
wake_time = timer_ticks () + 5 * TIMER_FREQ;
sema_init (&wait_sema, 0);
for (i = 0; i < 10; i++)
{
int priority = PRI_DEFAULT - (i + 5) % 10 - 1;
char name[16];
snprintf (name, sizeof name, "priority %d", priority);
thread_create (name, priority, alarm_priority_thread, NULL);
}
thread_set_priority (PRI_MIN);
for (i = 0; i < 10; i++)
sema_down (&wait_sema);
}
static void
alarm_priority_thread (void *aux UNUSED)
{
/* Busy-wait until the current time changes. */
int64_t start_time = timer_ticks ();
while (timer_elapsed (start_time) == 0)
continue;
/* Now we know we're at the very beginning of a timer tick, so
we can call timer_sleep() without worrying about races
between checking the time and a timer interrupt. */
timer_sleep (wake_time - timer_ticks ());
/* Print a message on wake-up. */
msg ("Thread %s woke up.", thread_name ());
sema_up (&wait_sema);
}

View File

@@ -0,0 +1,19 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(alarm-priority) begin
(alarm-priority) Thread priority 30 woke up.
(alarm-priority) Thread priority 29 woke up.
(alarm-priority) Thread priority 28 woke up.
(alarm-priority) Thread priority 27 woke up.
(alarm-priority) Thread priority 26 woke up.
(alarm-priority) Thread priority 25 woke up.
(alarm-priority) Thread priority 24 woke up.
(alarm-priority) Thread priority 23 woke up.
(alarm-priority) Thread priority 22 woke up.
(alarm-priority) Thread priority 21 woke up.
(alarm-priority) end
EOF
pass;

View File

@@ -0,0 +1,64 @@
/* Checks that recent_cpu and priorities are updated for blocked
threads.
The main thread sleeps for 25 seconds, spins for 5 seconds,
then releases a lock. The "block" thread spins for 20 seconds
then attempts to acquire the lock, which will block for 10
seconds (until the main thread releases it). If recent_cpu
decays properly while the "block" thread sleeps, then the
block thread should be immediately scheduled when the main
thread releases the lock. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/malloc.h"
#include "threads/synch.h"
#include "threads/thread.h"
#include "devices/timer.h"
static void block_thread (void *lock_);
void
test_mlfqs_block (void)
{
int64_t start_time;
struct lock lock;
ASSERT (thread_mlfqs);
msg ("Main thread acquiring lock.");
lock_init (&lock);
lock_acquire (&lock);
msg ("Main thread creating block thread, sleeping 25 seconds...");
thread_create ("block", PRI_DEFAULT, block_thread, &lock);
timer_sleep (25 * TIMER_FREQ);
msg ("Main thread spinning for 5 seconds...");
start_time = timer_ticks ();
while (timer_elapsed (start_time) < 5 * TIMER_FREQ)
continue;
msg ("Main thread releasing lock.");
lock_release (&lock);
msg ("Block thread should have already acquired lock.");
}
static void
block_thread (void *lock_)
{
struct lock *lock = lock_;
int64_t start_time;
msg ("Block thread spinning for 20 seconds...");
start_time = timer_ticks ();
while (timer_elapsed (start_time) < 20 * TIMER_FREQ)
continue;
msg ("Block thread acquiring lock...");
lock_acquire (lock);
msg ("...got it.");
}

View File

@@ -0,0 +1,17 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(mlfqs-block) begin
(mlfqs-block) Main thread acquiring lock.
(mlfqs-block) Main thread creating block thread, sleeping 25 seconds...
(mlfqs-block) Block thread spinning for 20 seconds...
(mlfqs-block) Block thread acquiring lock...
(mlfqs-block) Main thread spinning for 5 seconds...
(mlfqs-block) Main thread releasing lock.
(mlfqs-block) ...got it.
(mlfqs-block) Block thread should have already acquired lock.
(mlfqs-block) end
EOF
pass;

View File

@@ -0,0 +1,7 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
use tests::threads::mlfqs;
check_mlfqs_fair ([0, 0], 50);

View File

@@ -0,0 +1,7 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
use tests::threads::mlfqs;
check_mlfqs_fair ([(0) x 20], 20);

View File

@@ -0,0 +1,124 @@
/* Measures the correctness of the "nice" implementation.
The "fair" tests run either 2 or 20 threads all niced to 0.
The threads should all receive approximately the same number
of ticks. Each test runs for 30 seconds, so the ticks should
also sum to approximately 30 * 100 == 3000 ticks.
The mlfqs-nice-2 test runs 2 threads, one with nice 0, the
other with nice 5, which should receive 1,904 and 1,096 ticks,
respectively, over 30 seconds.
The mlfqs-nice-10 test runs 10 threads with nice 0 through 9.
They should receive 672, 588, 492, 408, 316, 232, 152, 92, 40,
and 8 ticks, respectively, over 30 seconds.
(The above are computed via simulation in mlfqs.pm.) */
#include <stdio.h>
#include <inttypes.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/malloc.h"
#include "threads/palloc.h"
#include "threads/synch.h"
#include "threads/thread.h"
#include "devices/timer.h"
static void test_mlfqs_fair (int thread_cnt, int nice_min, int nice_step);
void
test_mlfqs_fair_2 (void)
{
test_mlfqs_fair (2, 0, 0);
}
void
test_mlfqs_fair_20 (void)
{
test_mlfqs_fair (20, 0, 0);
}
void
test_mlfqs_nice_2 (void)
{
test_mlfqs_fair (2, 0, 5);
}
void
test_mlfqs_nice_10 (void)
{
test_mlfqs_fair (10, 0, 1);
}
#define MAX_THREAD_CNT 20
struct thread_info
{
int64_t start_time;
int tick_count;
int nice;
};
static void load_thread (void *aux);
static void
test_mlfqs_fair (int thread_cnt, int nice_min, int nice_step)
{
struct thread_info info[MAX_THREAD_CNT];
int64_t start_time;
int nice;
int i;
ASSERT (thread_mlfqs);
ASSERT (thread_cnt <= MAX_THREAD_CNT);
ASSERT (nice_min >= -10);
ASSERT (nice_step >= 0);
ASSERT (nice_min + nice_step * (thread_cnt - 1) <= 20);
thread_set_nice (-20);
start_time = timer_ticks ();
msg ("Starting %d threads...", thread_cnt);
nice = nice_min;
for (i = 0; i < thread_cnt; i++)
{
struct thread_info *ti = &info[i];
char name[16];
ti->start_time = start_time;
ti->tick_count = 0;
ti->nice = nice;
snprintf(name, sizeof name, "load %d", i);
thread_create (name, PRI_DEFAULT, load_thread, ti);
nice += nice_step;
}
msg ("Starting threads took %"PRId64" ticks.", timer_elapsed (start_time));
msg ("Sleeping 40 seconds to let threads run, please wait...");
timer_sleep (40 * TIMER_FREQ);
for (i = 0; i < thread_cnt; i++)
msg ("Thread %d received %d ticks.", i, info[i].tick_count);
}
static void
load_thread (void *ti_)
{
struct thread_info *ti = ti_;
int64_t sleep_time = 5 * TIMER_FREQ;
int64_t spin_time = sleep_time + 30 * TIMER_FREQ;
int64_t last_time = 0;
thread_set_nice (ti->nice);
timer_sleep (sleep_time - timer_elapsed (ti->start_time));
while (timer_elapsed (ti->start_time) < spin_time)
{
int64_t cur_time = timer_ticks ();
if (cur_time != last_time)
ti->tick_count++;
last_time = cur_time;
}
}

View File

@@ -0,0 +1,60 @@
/* Verifies that a single busy thread raises the load average to
0.5 in 38 to 45 seconds. The expected time is 42 seconds, as
you can verify:
perl -e '$i++,$a=(59*$a+1)/60while$a<=.5;print "$i\n"'
Then, verifies that 10 seconds of inactivity drop the load
average back below 0.5 again. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/malloc.h"
#include "threads/synch.h"
#include "threads/thread.h"
#include "devices/timer.h"
void
test_mlfqs_load_1 (void)
{
int64_t start_time;
int elapsed;
int load_avg;
ASSERT (thread_mlfqs);
msg ("spinning for up to 45 seconds, please wait...");
start_time = timer_ticks ();
for (;;)
{
load_avg = thread_get_load_avg ();
ASSERT (load_avg >= 0);
elapsed = (int)(timer_elapsed (start_time) / TIMER_FREQ);
if (load_avg > 100)
fail ("load average is %d.%02d "
"but should be between 0 and 1 (after %d seconds)",
load_avg / 100, load_avg % 100, elapsed);
else if (load_avg > 50)
break;
else if (elapsed > 45)
fail ("load average stayed below 0.5 for more than 45 seconds");
}
if (elapsed < 38)
fail ("load average took only %d seconds to rise above 0.5", elapsed);
msg ("load average rose to 0.5 after %d seconds", elapsed);
msg ("sleeping for another 10 seconds, please wait...");
timer_sleep (TIMER_FREQ * 10);
load_avg = thread_get_load_avg ();
if (load_avg < 0)
fail ("load average fell below 0");
if (load_avg > 50)
fail ("load average stayed above 0.5 for more than 10 seconds");
msg ("load average fell back below 0.5 (to %d.%02d)",
load_avg / 100, load_avg % 100);
pass ();
}

View File

@@ -0,0 +1,15 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
our ($test);
my (@output) = read_text_file ("$test.output");
common_checks ("run", @output);
@output = get_core_output ("run", @output);
fail "missing PASS in output"
unless grep ($_ eq '(mlfqs-load-1) PASS', @output);
pass;

View File

@@ -0,0 +1,160 @@
/* Starts 60 threads that each sleep for 10 seconds, then spin in
a tight loop for 60 seconds, and sleep for another 60 seconds.
Every 2 seconds after the initial sleep, the main thread
prints the load average.
The expected output is this (some margin of error is allowed):
After 0 seconds, load average=1.00.
After 2 seconds, load average=2.95.
After 4 seconds, load average=4.84.
After 6 seconds, load average=6.66.
After 8 seconds, load average=8.42.
After 10 seconds, load average=10.13.
After 12 seconds, load average=11.78.
After 14 seconds, load average=13.37.
After 16 seconds, load average=14.91.
After 18 seconds, load average=16.40.
After 20 seconds, load average=17.84.
After 22 seconds, load average=19.24.
After 24 seconds, load average=20.58.
After 26 seconds, load average=21.89.
After 28 seconds, load average=23.15.
After 30 seconds, load average=24.37.
After 32 seconds, load average=25.54.
After 34 seconds, load average=26.68.
After 36 seconds, load average=27.78.
After 38 seconds, load average=28.85.
After 40 seconds, load average=29.88.
After 42 seconds, load average=30.87.
After 44 seconds, load average=31.84.
After 46 seconds, load average=32.77.
After 48 seconds, load average=33.67.
After 50 seconds, load average=34.54.
After 52 seconds, load average=35.38.
After 54 seconds, load average=36.19.
After 56 seconds, load average=36.98.
After 58 seconds, load average=37.74.
After 60 seconds, load average=37.48.
After 62 seconds, load average=36.24.
After 64 seconds, load average=35.04.
After 66 seconds, load average=33.88.
After 68 seconds, load average=32.76.
After 70 seconds, load average=31.68.
After 72 seconds, load average=30.63.
After 74 seconds, load average=29.62.
After 76 seconds, load average=28.64.
After 78 seconds, load average=27.69.
After 80 seconds, load average=26.78.
After 82 seconds, load average=25.89.
After 84 seconds, load average=25.04.
After 86 seconds, load average=24.21.
After 88 seconds, load average=23.41.
After 90 seconds, load average=22.64.
After 92 seconds, load average=21.89.
After 94 seconds, load average=21.16.
After 96 seconds, load average=20.46.
After 98 seconds, load average=19.79.
After 100 seconds, load average=19.13.
After 102 seconds, load average=18.50.
After 104 seconds, load average=17.89.
After 106 seconds, load average=17.30.
After 108 seconds, load average=16.73.
After 110 seconds, load average=16.17.
After 112 seconds, load average=15.64.
After 114 seconds, load average=15.12.
After 116 seconds, load average=14.62.
After 118 seconds, load average=14.14.
After 120 seconds, load average=13.67.
After 122 seconds, load average=13.22.
After 124 seconds, load average=12.78.
After 126 seconds, load average=12.36.
After 128 seconds, load average=11.95.
After 130 seconds, load average=11.56.
After 132 seconds, load average=11.17.
After 134 seconds, load average=10.80.
After 136 seconds, load average=10.45.
After 138 seconds, load average=10.10.
After 140 seconds, load average=9.77.
After 142 seconds, load average=9.45.
After 144 seconds, load average=9.13.
After 146 seconds, load average=8.83.
After 148 seconds, load average=8.54.
After 150 seconds, load average=8.26.
After 152 seconds, load average=7.98.
After 154 seconds, load average=7.72.
After 156 seconds, load average=7.47.
After 158 seconds, load average=7.22.
After 160 seconds, load average=6.98.
After 162 seconds, load average=6.75.
After 164 seconds, load average=6.53.
After 166 seconds, load average=6.31.
After 168 seconds, load average=6.10.
After 170 seconds, load average=5.90.
After 172 seconds, load average=5.70.
After 174 seconds, load average=5.52.
After 176 seconds, load average=5.33.
After 178 seconds, load average=5.16.
*/
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/malloc.h"
#include "threads/synch.h"
#include "threads/thread.h"
#include "devices/timer.h"
static int64_t start_time;
static void load_thread (void *aux);
#define THREAD_CNT 60
void
test_mlfqs_load_60 (void)
{
int i;
ASSERT (thread_mlfqs);
start_time = timer_ticks ();
/* align start_time to TIMER_FREQ to ensure sleeping threads wake up at the
tick on which the load average is recalculated */
start_time = ((start_time / TIMER_FREQ) + 1) * TIMER_FREQ;
msg ("Starting %d niced load threads...", THREAD_CNT);
for (i = 0; i < THREAD_CNT; i++)
{
char name[16];
snprintf(name, sizeof name, "load %d", i);
thread_create (name, PRI_DEFAULT, load_thread, NULL);
}
msg ("Starting threads took %d seconds.",
timer_elapsed (start_time) / TIMER_FREQ);
for (i = 0; i < 90; i++)
{
int64_t sleep_until = start_time + TIMER_FREQ * (2 * i + 10);
int load_avg;
timer_sleep (sleep_until - timer_ticks ());
load_avg = thread_get_load_avg ();
msg ("After %d seconds, load average=%d.%02d.",
i * 2, load_avg / 100, load_avg % 100);
}
}
static void
load_thread (void *aux UNUSED)
{
int64_t sleep_time = 10 * TIMER_FREQ;
int64_t spin_time = sleep_time + 60 * TIMER_FREQ;
int64_t exit_time = spin_time + 60 * TIMER_FREQ;
thread_set_nice (20);
timer_sleep (sleep_time - timer_elapsed (start_time));
while (timer_elapsed (start_time) < spin_time)
continue;
timer_sleep (exit_time - timer_elapsed (start_time));
}

View File

@@ -0,0 +1,36 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
use tests::threads::mlfqs;
our ($test);
my (@output) = read_text_file ("$test.output");
common_checks ("run", @output);
@output = get_core_output ("run", @output);
# Get actual values.
local ($_);
my (@actual);
foreach (@output) {
my ($t, $load_avg) = /After (\d+) seconds, load average=(\d+\.\d+)\./
or next;
$actual[$t] = $load_avg;
}
# Calculate expected values.
my ($load_avg) = 0;
my ($recent) = 0;
my (@expected);
for (my ($t) = 0; $t < 180; $t++) {
my ($ready) = $t < 60 ? 60 : 0;
$load_avg = (59/60) * $load_avg + (1/60) * $ready;
$expected[$t] = $load_avg;
}
mlfqs_compare ("time", "%.2f", \@actual, \@expected, 3.5, [2, 178, 2],
"Some load average values were missing or "
. "differed from those expected "
. "by more than 3.5.");
pass;

View File

@@ -0,0 +1,167 @@
/* Starts 60 threads numbered 0 through 59. Thread #i sleeps for
(10+i) seconds, then spins in a loop for 60 seconds, then
sleeps until a total of 120 seconds have passed. Every 2
seconds, starting 10 seconds in, the main thread prints the
load average.
The expected output is listed below. Some margin of error is
allowed.
If your implementation fails this test but passes most other
tests, then consider whether you are doing too much work in
the timer interrupt. If the timer interrupt handler takes too
long, then the test's main thread will not have enough time to
do its own work (printing a message) and go back to sleep
before the next tick arrives. Then the main thread will be
ready, instead of sleeping, when the tick arrives,
artificially driving up the load average.
After 0 seconds, load average=0.00.
After 2 seconds, load average=0.05.
After 4 seconds, load average=0.16.
After 6 seconds, load average=0.34.
After 8 seconds, load average=0.58.
After 10 seconds, load average=0.87.
After 12 seconds, load average=1.22.
After 14 seconds, load average=1.63.
After 16 seconds, load average=2.09.
After 18 seconds, load average=2.60.
After 20 seconds, load average=3.16.
After 22 seconds, load average=3.76.
After 24 seconds, load average=4.42.
After 26 seconds, load average=5.11.
After 28 seconds, load average=5.85.
After 30 seconds, load average=6.63.
After 32 seconds, load average=7.46.
After 34 seconds, load average=8.32.
After 36 seconds, load average=9.22.
After 38 seconds, load average=10.15.
After 40 seconds, load average=11.12.
After 42 seconds, load average=12.13.
After 44 seconds, load average=13.16.
After 46 seconds, load average=14.23.
After 48 seconds, load average=15.33.
After 50 seconds, load average=16.46.
After 52 seconds, load average=17.62.
After 54 seconds, load average=18.81.
After 56 seconds, load average=20.02.
After 58 seconds, load average=21.26.
After 60 seconds, load average=22.52.
After 62 seconds, load average=23.71.
After 64 seconds, load average=24.80.
After 66 seconds, load average=25.78.
After 68 seconds, load average=26.66.
After 70 seconds, load average=27.45.
After 72 seconds, load average=28.14.
After 74 seconds, load average=28.75.
After 76 seconds, load average=29.27.
After 78 seconds, load average=29.71.
After 80 seconds, load average=30.06.
After 82 seconds, load average=30.34.
After 84 seconds, load average=30.55.
After 86 seconds, load average=30.68.
After 88 seconds, load average=30.74.
After 90 seconds, load average=30.73.
After 92 seconds, load average=30.66.
After 94 seconds, load average=30.52.
After 96 seconds, load average=30.32.
After 98 seconds, load average=30.06.
After 100 seconds, load average=29.74.
After 102 seconds, load average=29.37.
After 104 seconds, load average=28.95.
After 106 seconds, load average=28.47.
After 108 seconds, load average=27.94.
After 110 seconds, load average=27.36.
After 112 seconds, load average=26.74.
After 114 seconds, load average=26.07.
After 116 seconds, load average=25.36.
After 118 seconds, load average=24.60.
After 120 seconds, load average=23.81.
After 122 seconds, load average=23.02.
After 124 seconds, load average=22.26.
After 126 seconds, load average=21.52.
After 128 seconds, load average=20.81.
After 130 seconds, load average=20.12.
After 132 seconds, load average=19.46.
After 134 seconds, load average=18.81.
After 136 seconds, load average=18.19.
After 138 seconds, load average=17.59.
After 140 seconds, load average=17.01.
After 142 seconds, load average=16.45.
After 144 seconds, load average=15.90.
After 146 seconds, load average=15.38.
After 148 seconds, load average=14.87.
After 150 seconds, load average=14.38.
After 152 seconds, load average=13.90.
After 154 seconds, load average=13.44.
After 156 seconds, load average=13.00.
After 158 seconds, load average=12.57.
After 160 seconds, load average=12.15.
After 162 seconds, load average=11.75.
After 164 seconds, load average=11.36.
After 166 seconds, load average=10.99.
After 168 seconds, load average=10.62.
After 170 seconds, load average=10.27.
After 172 seconds, load average=9.93.
After 174 seconds, load average=9.61.
After 176 seconds, load average=9.29.
After 178 seconds, load average=8.98.
*/
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/malloc.h"
#include "threads/synch.h"
#include "threads/thread.h"
#include "devices/timer.h"
static int64_t start_time;
static void load_thread (void *seq_no);
#define THREAD_CNT 60
void
test_mlfqs_load_avg (void)
{
int i;
ASSERT (thread_mlfqs);
start_time = timer_ticks ();
msg ("Starting %d load threads...", THREAD_CNT);
for (i = 0; i < THREAD_CNT; i++)
{
char name[16];
snprintf(name, sizeof name, "load %d", i);
thread_create (name, PRI_DEFAULT, load_thread, (void *) i);
}
msg ("Starting threads took %d seconds.",
timer_elapsed (start_time) / TIMER_FREQ);
thread_set_nice (-20);
for (i = 0; i < 90; i++)
{
int64_t sleep_until = start_time + TIMER_FREQ * (2 * i + 10);
int load_avg;
timer_sleep (sleep_until - timer_ticks ());
load_avg = thread_get_load_avg ();
msg ("After %d seconds, load average=%d.%02d.",
i * 2, load_avg / 100, load_avg % 100);
}
}
static void
load_thread (void *seq_no_)
{
int seq_no = (int) seq_no_;
int sleep_time = TIMER_FREQ * (10 + seq_no);
int spin_time = sleep_time + TIMER_FREQ * THREAD_CNT;
int exit_time = TIMER_FREQ * (THREAD_CNT * 2);
timer_sleep (sleep_time - timer_elapsed (start_time));
while (timer_elapsed (start_time) < spin_time)
continue;
timer_sleep (exit_time - timer_elapsed (start_time));
}

View File

@@ -0,0 +1,36 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
use tests::threads::mlfqs;
our ($test);
my (@output) = read_text_file ("$test.output");
common_checks ("run", @output);
@output = get_core_output ("run", @output);
# Get actual values.
local ($_);
my (@actual);
foreach (@output) {
my ($t, $load_avg) = /After (\d+) seconds, load average=(\d+\.\d+)\./
or next;
$actual[$t] = $load_avg;
}
# Calculate expected values.
my ($load_avg) = 0;
my ($recent) = 0;
my (@expected);
for (my ($t) = 0; $t < 180; $t++) {
my ($ready) = $t < 60 ? $t : $t < 120 ? 120 - $t : 0;
$load_avg = (59/60) * $load_avg + (1/60) * $ready;
$expected[$t] = $load_avg;
}
mlfqs_compare ("time", "%.2f", \@actual, \@expected, 2.5, [2, 178, 2],
"Some load average values were missing or "
. "differed from those expected "
. "by more than 2.5.");
pass;

View File

@@ -0,0 +1,7 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
use tests::threads::mlfqs;
check_mlfqs_fair ([0...9], 25);

View File

@@ -0,0 +1,7 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
use tests::threads::mlfqs;
check_mlfqs_fair ([0, 5], 50);

View File

@@ -0,0 +1,144 @@
/* Checks that recent_cpu is calculated properly for the case of
a single ready process.
The expected output is this (some margin of error is allowed):
After 2 seconds, recent_cpu is 6.40, load_avg is 0.03.
After 4 seconds, recent_cpu is 12.60, load_avg is 0.07.
After 6 seconds, recent_cpu is 18.61, load_avg is 0.10.
After 8 seconds, recent_cpu is 24.44, load_avg is 0.13.
After 10 seconds, recent_cpu is 30.08, load_avg is 0.15.
After 12 seconds, recent_cpu is 35.54, load_avg is 0.18.
After 14 seconds, recent_cpu is 40.83, load_avg is 0.21.
After 16 seconds, recent_cpu is 45.96, load_avg is 0.24.
After 18 seconds, recent_cpu is 50.92, load_avg is 0.26.
After 20 seconds, recent_cpu is 55.73, load_avg is 0.29.
After 22 seconds, recent_cpu is 60.39, load_avg is 0.31.
After 24 seconds, recent_cpu is 64.90, load_avg is 0.33.
After 26 seconds, recent_cpu is 69.27, load_avg is 0.35.
After 28 seconds, recent_cpu is 73.50, load_avg is 0.38.
After 30 seconds, recent_cpu is 77.60, load_avg is 0.40.
After 32 seconds, recent_cpu is 81.56, load_avg is 0.42.
After 34 seconds, recent_cpu is 85.40, load_avg is 0.44.
After 36 seconds, recent_cpu is 89.12, load_avg is 0.45.
After 38 seconds, recent_cpu is 92.72, load_avg is 0.47.
After 40 seconds, recent_cpu is 96.20, load_avg is 0.49.
After 42 seconds, recent_cpu is 99.57, load_avg is 0.51.
After 44 seconds, recent_cpu is 102.84, load_avg is 0.52.
After 46 seconds, recent_cpu is 106.00, load_avg is 0.54.
After 48 seconds, recent_cpu is 109.06, load_avg is 0.55.
After 50 seconds, recent_cpu is 112.02, load_avg is 0.57.
After 52 seconds, recent_cpu is 114.89, load_avg is 0.58.
After 54 seconds, recent_cpu is 117.66, load_avg is 0.60.
After 56 seconds, recent_cpu is 120.34, load_avg is 0.61.
After 58 seconds, recent_cpu is 122.94, load_avg is 0.62.
After 60 seconds, recent_cpu is 125.46, load_avg is 0.64.
After 62 seconds, recent_cpu is 127.89, load_avg is 0.65.
After 64 seconds, recent_cpu is 130.25, load_avg is 0.66.
After 66 seconds, recent_cpu is 132.53, load_avg is 0.67.
After 68 seconds, recent_cpu is 134.73, load_avg is 0.68.
After 70 seconds, recent_cpu is 136.86, load_avg is 0.69.
After 72 seconds, recent_cpu is 138.93, load_avg is 0.70.
After 74 seconds, recent_cpu is 140.93, load_avg is 0.71.
After 76 seconds, recent_cpu is 142.86, load_avg is 0.72.
After 78 seconds, recent_cpu is 144.73, load_avg is 0.73.
After 80 seconds, recent_cpu is 146.54, load_avg is 0.74.
After 82 seconds, recent_cpu is 148.29, load_avg is 0.75.
After 84 seconds, recent_cpu is 149.99, load_avg is 0.76.
After 86 seconds, recent_cpu is 151.63, load_avg is 0.76.
After 88 seconds, recent_cpu is 153.21, load_avg is 0.77.
After 90 seconds, recent_cpu is 154.75, load_avg is 0.78.
After 92 seconds, recent_cpu is 156.23, load_avg is 0.79.
After 94 seconds, recent_cpu is 157.67, load_avg is 0.79.
After 96 seconds, recent_cpu is 159.06, load_avg is 0.80.
After 98 seconds, recent_cpu is 160.40, load_avg is 0.81.
After 100 seconds, recent_cpu is 161.70, load_avg is 0.81.
After 102 seconds, recent_cpu is 162.96, load_avg is 0.82.
After 104 seconds, recent_cpu is 164.18, load_avg is 0.83.
After 106 seconds, recent_cpu is 165.35, load_avg is 0.83.
After 108 seconds, recent_cpu is 166.49, load_avg is 0.84.
After 110 seconds, recent_cpu is 167.59, load_avg is 0.84.
After 112 seconds, recent_cpu is 168.66, load_avg is 0.85.
After 114 seconds, recent_cpu is 169.69, load_avg is 0.85.
After 116 seconds, recent_cpu is 170.69, load_avg is 0.86.
After 118 seconds, recent_cpu is 171.65, load_avg is 0.86.
After 120 seconds, recent_cpu is 172.58, load_avg is 0.87.
After 122 seconds, recent_cpu is 173.49, load_avg is 0.87.
After 124 seconds, recent_cpu is 174.36, load_avg is 0.88.
After 126 seconds, recent_cpu is 175.20, load_avg is 0.88.
After 128 seconds, recent_cpu is 176.02, load_avg is 0.88.
After 130 seconds, recent_cpu is 176.81, load_avg is 0.89.
After 132 seconds, recent_cpu is 177.57, load_avg is 0.89.
After 134 seconds, recent_cpu is 178.31, load_avg is 0.89.
After 136 seconds, recent_cpu is 179.02, load_avg is 0.90.
After 138 seconds, recent_cpu is 179.72, load_avg is 0.90.
After 140 seconds, recent_cpu is 180.38, load_avg is 0.90.
After 142 seconds, recent_cpu is 181.03, load_avg is 0.91.
After 144 seconds, recent_cpu is 181.65, load_avg is 0.91.
After 146 seconds, recent_cpu is 182.26, load_avg is 0.91.
After 148 seconds, recent_cpu is 182.84, load_avg is 0.92.
After 150 seconds, recent_cpu is 183.41, load_avg is 0.92.
After 152 seconds, recent_cpu is 183.96, load_avg is 0.92.
After 154 seconds, recent_cpu is 184.49, load_avg is 0.92.
After 156 seconds, recent_cpu is 185.00, load_avg is 0.93.
After 158 seconds, recent_cpu is 185.49, load_avg is 0.93.
After 160 seconds, recent_cpu is 185.97, load_avg is 0.93.
After 162 seconds, recent_cpu is 186.43, load_avg is 0.93.
After 164 seconds, recent_cpu is 186.88, load_avg is 0.94.
After 166 seconds, recent_cpu is 187.31, load_avg is 0.94.
After 168 seconds, recent_cpu is 187.73, load_avg is 0.94.
After 170 seconds, recent_cpu is 188.14, load_avg is 0.94.
After 172 seconds, recent_cpu is 188.53, load_avg is 0.94.
After 174 seconds, recent_cpu is 188.91, load_avg is 0.95.
After 176 seconds, recent_cpu is 189.27, load_avg is 0.95.
After 178 seconds, recent_cpu is 189.63, load_avg is 0.95.
After 180 seconds, recent_cpu is 189.97, load_avg is 0.95.
*/
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/malloc.h"
#include "threads/synch.h"
#include "threads/thread.h"
#include "devices/timer.h"
/* Sensitive to assumption that recent_cpu updates happen exactly
when timer_ticks() % TIMER_FREQ == 0. */
void
test_mlfqs_recent_1 (void)
{
int64_t start_time;
int last_elapsed = 0;
ASSERT (thread_mlfqs);
do
{
msg ("Sleeping 10 seconds to allow recent_cpu to decay, please wait...");
start_time = timer_ticks ();
timer_sleep (DIV_ROUND_UP (start_time, TIMER_FREQ) - start_time
+ 10 * TIMER_FREQ);
}
while (thread_get_recent_cpu () > 700);
start_time = timer_ticks ();
for (;;)
{
int elapsed = (int)(timer_elapsed (start_time));
if (elapsed % (TIMER_FREQ * 2) == 0 && elapsed > last_elapsed)
{
int recent_cpu = thread_get_recent_cpu ();
int load_avg = thread_get_load_avg ();
int elapsed_seconds = elapsed / TIMER_FREQ;
msg ("After %d seconds, recent_cpu is %d.%02d, load_avg is %d.%02d.",
elapsed_seconds,
recent_cpu / 100, recent_cpu % 100,
load_avg / 100, load_avg % 100);
if (elapsed_seconds >= 180)
break;
}
last_elapsed = elapsed;
}
}

View File

@@ -0,0 +1,31 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
use tests::threads::mlfqs;
our ($test);
my (@output) = read_text_file ("$test.output");
common_checks ("run", @output);
@output = get_core_output ("run", @output);
# Get actual values.
local ($_);
my (@actual);
foreach (@output) {
my ($t, $recent_cpu) = /After (\d+) seconds, recent_cpu is (\d+\.\d+),/
or next;
$actual[$t] = $recent_cpu;
}
# Calculate expected values.
my ($expected_load_avg, $expected_recent_cpu)
= mlfqs_expected_load ([(1) x 180], [(100) x 180]);
my (@expected) = @$expected_recent_cpu;
# Compare actual and expected values.
mlfqs_compare ("time", "%.2f", \@actual, \@expected, 2.5, [2, 178, 2],
"Some recent_cpu values were missing or "
. "differed from those expected "
. "by more than 2.5.");
pass;

146
src/tests/threads/mlfqs.pm Normal file
View File

@@ -0,0 +1,146 @@
# -*- perl -*-
use strict;
use warnings;
sub mlfqs_expected_load {
my ($ready, $recent_delta) = @_;
my (@load_avg) = 0;
my (@recent_cpu) = 0;
my ($load_avg) = 0;
my ($recent_cpu) = 0;
for my $i (0...$#$ready) {
$load_avg = (59/60) * $load_avg + (1/60) * $ready->[$i];
push (@load_avg, $load_avg);
if (defined $recent_delta->[$i]) {
my ($twice_load) = $load_avg * 2;
my ($load_factor) = $twice_load / ($twice_load + 1);
$recent_cpu = ($recent_cpu + $recent_delta->[$i]) * $load_factor;
push (@recent_cpu, $recent_cpu);
}
}
return (\@load_avg, \@recent_cpu);
}
sub mlfqs_expected_ticks {
my (@nice) = @_;
my ($thread_cnt) = scalar (@nice);
my (@recent_cpu) = (0) x $thread_cnt;
my (@slices) = (0) x $thread_cnt;
my (@fifo) = (0) x $thread_cnt;
my ($next_fifo) = 1;
my ($load_avg) = 0;
for my $i (1...750) {
if ($i % 25 == 0) {
# Update load average.
$load_avg = (59/60) * $load_avg + (1/60) * $thread_cnt;
# Update recent_cpu.
my ($twice_load) = $load_avg * 2;
my ($load_factor) = $twice_load / ($twice_load + 1);
$recent_cpu[$_] = $recent_cpu[$_] * $load_factor + $nice[$_]
foreach 0...($thread_cnt - 1);
}
# Update priorities.
my (@priority);
foreach my $j (0...($thread_cnt - 1)) {
my ($priority) = int ($recent_cpu[$j] / 4 + $nice[$j] * 2);
$priority = 0 if $priority < 0;
$priority = 63 if $priority > 63;
push (@priority, $priority);
}
# Choose thread to run.
my $max = 0;
for my $j (1...$#priority) {
if ($priority[$j] < $priority[$max]
|| ($priority[$j] == $priority[$max]
&& $fifo[$j] < $fifo[$max])) {
$max = $j;
}
}
$fifo[$max] = $next_fifo++;
# Run thread.
$recent_cpu[$max] += 4;
$slices[$max] += 4;
}
return @slices;
}
sub check_mlfqs_fair {
my ($nice, $maxdiff) = @_;
our ($test);
my (@output) = read_text_file ("$test.output");
common_checks ("run", @output);
@output = get_core_output ("run", @output);
my (@actual);
local ($_);
foreach (@output) {
my ($id, $count) = /Thread (\d+) received (\d+) ticks\./ or next;
$actual[$id] = $count;
}
my (@expected) = mlfqs_expected_ticks (@$nice);
mlfqs_compare ("thread", "%d",
\@actual, \@expected, $maxdiff, [0, $#$nice, 1],
"Some tick counts were missing or differed from those "
. "expected by more than $maxdiff.");
pass;
}
sub mlfqs_compare {
my ($indep_var, $format,
$actual_ref, $expected_ref, $maxdiff, $t_range, $message) = @_;
my ($t_min, $t_max, $t_step) = @$t_range;
my ($ok) = 1;
for (my ($t) = $t_min; $t <= $t_max; $t += $t_step) {
my ($actual) = $actual_ref->[$t];
my ($expected) = $expected_ref->[$t];
$ok = 0, last
if !defined ($actual) || abs ($actual - $expected) > $maxdiff + .01;
}
return if $ok;
print "$message\n";
mlfqs_row ($indep_var, "actual", "<->", "expected", "explanation");
mlfqs_row ("------", "--------", "---", "--------", '-' x 40);
for (my ($t) = $t_min; $t <= $t_max; $t += $t_step) {
my ($actual) = $actual_ref->[$t];
my ($expected) = $expected_ref->[$t];
my ($diff, $rationale);
if (!defined $actual) {
$actual = 'undef' ;
$diff = '';
$rationale = 'Missing value.';
} else {
my ($delta) = abs ($actual - $expected);
if ($delta > $maxdiff + .01) {
my ($excess) = $delta - $maxdiff;
if ($actual > $expected) {
$diff = '>>>';
$rationale = sprintf "Too big, by $format.", $excess;
} else {
$diff = '<<<';
$rationale = sprintf "Too small, by $format.", $excess;
}
} else {
$diff = ' = ';
$rationale = '';
}
$actual = sprintf ($format, $actual);
}
$expected = sprintf ($format, $expected);
mlfqs_row ($t, $actual, $diff, $expected, $rationale);
}
fail;
}
sub mlfqs_row {
printf "%6s %8s %3s %-8s %s\n", @_;
}
1;

View File

@@ -0,0 +1,31 @@
/* Verifies that lowering a thread's priority so that it is no
longer the highest-priority thread in the system causes it to
yield immediately. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/thread.h"
static thread_func changing_thread;
void
test_priority_change (void)
{
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
msg ("Creating a high-priority thread 2.");
thread_create ("thread 2", PRI_DEFAULT + 1, changing_thread, NULL);
msg ("Thread 2 should have just lowered its priority.");
thread_set_priority (PRI_DEFAULT - 2);
msg ("Thread 2 should have just exited.");
}
static void
changing_thread (void *aux UNUSED)
{
msg ("Thread 2 now lowering priority.");
thread_set_priority (PRI_DEFAULT - 1);
msg ("Thread 2 exiting.");
}

View File

@@ -0,0 +1,14 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-change) begin
(priority-change) Creating a high-priority thread 2.
(priority-change) Thread 2 now lowering priority.
(priority-change) Thread 2 should have just lowered its priority.
(priority-change) Thread 2 exiting.
(priority-change) Thread 2 should have just exited.
(priority-change) end
EOF
pass;

View File

@@ -0,0 +1,53 @@
/* Tests that cond_signal() wakes up the highest-priority thread
waiting in cond_wait(). */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/malloc.h"
#include "threads/synch.h"
#include "threads/thread.h"
#include "devices/timer.h"
static thread_func priority_condvar_thread;
static struct lock lock;
static struct condition condition;
void
test_priority_condvar (void)
{
int i;
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
lock_init (&lock);
cond_init (&condition);
thread_set_priority (PRI_MIN);
for (i = 0; i < 10; i++)
{
int priority = PRI_DEFAULT - (i + 7) % 10 - 1;
char name[16];
snprintf (name, sizeof name, "priority %d", priority);
thread_create (name, priority, priority_condvar_thread, NULL);
}
for (i = 0; i < 10; i++)
{
lock_acquire (&lock);
msg ("Signaling...");
cond_signal (&condition, &lock);
lock_release (&lock);
}
}
static void
priority_condvar_thread (void *aux UNUSED)
{
msg ("Thread %s starting.", thread_name ());
lock_acquire (&lock);
cond_wait (&condition, &lock);
msg ("Thread %s woke up.", thread_name ());
lock_release (&lock);
}

View File

@@ -0,0 +1,39 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-condvar) begin
(priority-condvar) Thread priority 23 starting.
(priority-condvar) Thread priority 22 starting.
(priority-condvar) Thread priority 21 starting.
(priority-condvar) Thread priority 30 starting.
(priority-condvar) Thread priority 29 starting.
(priority-condvar) Thread priority 28 starting.
(priority-condvar) Thread priority 27 starting.
(priority-condvar) Thread priority 26 starting.
(priority-condvar) Thread priority 25 starting.
(priority-condvar) Thread priority 24 starting.
(priority-condvar) Signaling...
(priority-condvar) Thread priority 30 woke up.
(priority-condvar) Signaling...
(priority-condvar) Thread priority 29 woke up.
(priority-condvar) Signaling...
(priority-condvar) Thread priority 28 woke up.
(priority-condvar) Signaling...
(priority-condvar) Thread priority 27 woke up.
(priority-condvar) Signaling...
(priority-condvar) Thread priority 26 woke up.
(priority-condvar) Signaling...
(priority-condvar) Thread priority 25 woke up.
(priority-condvar) Signaling...
(priority-condvar) Thread priority 24 woke up.
(priority-condvar) Signaling...
(priority-condvar) Thread priority 23 woke up.
(priority-condvar) Signaling...
(priority-condvar) Thread priority 22 woke up.
(priority-condvar) Signaling...
(priority-condvar) Thread priority 21 woke up.
(priority-condvar) end
EOF
pass;

View File

@@ -0,0 +1,114 @@
/* The main thread set its priority to PRI_MIN and creates 7 threads
(thread 1..7) with priorities PRI_MIN + 3, 6, 9, 12, ...
The main thread initializes 8 locks: lock 0..7 and acquires lock 0.
When thread[i] starts, it first acquires lock[i] (unless i == 7.)
Subsequently, thread[i] attempts to acquire lock[i-1], which is held by
thread[i-1], except for lock[0], which is held by the main thread.
Because the lock is held, thread[i] donates its priority to thread[i-1],
which donates to thread[i-2], and so on until the main thread
receives the donation.
After threads[1..7] have been created and are blocked on locks[0..7],
the main thread releases lock[0], unblocking thread[1], and being
preempted by it.
Thread[1] then completes acquiring lock[0], then releases lock[0],
then releases lock[1], unblocking thread[2], etc.
Thread[7] finally acquires & releases lock[7] and exits, allowing
thread[6], then thread[5] etc. to run and exit until finally the
main thread exits.
In addition, interloper threads are created at priority levels
p = PRI_MIN + 2, 5, 8, 11, ... which should not be run until the
corresponding thread with priority p + 1 has finished.
Written by Godmar Back <gback@cs.vt.edu> */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/synch.h"
#include "threads/thread.h"
#define NESTING_DEPTH 8
struct lock_pair
{
struct lock *second;
struct lock *first;
};
static thread_func donor_thread_func;
static thread_func interloper_thread_func;
void
test_priority_donate_chain (void)
{
int i;
struct lock locks[NESTING_DEPTH - 1];
struct lock_pair lock_pairs[NESTING_DEPTH];
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
thread_set_priority (PRI_MIN);
for (i = 0; i < NESTING_DEPTH - 1; i++)
lock_init (&locks[i]);
lock_acquire (&locks[0]);
msg ("%s got lock.", thread_name ());
for (i = 1; i < NESTING_DEPTH; i++)
{
char name[16];
int thread_priority;
snprintf (name, sizeof name, "thread %d", i);
thread_priority = PRI_MIN + i * 3;
lock_pairs[i].first = i < NESTING_DEPTH - 1 ? locks + i: NULL;
lock_pairs[i].second = locks + i - 1;
thread_create (name, thread_priority, donor_thread_func, lock_pairs + i);
msg ("%s should have priority %d. Actual priority: %d.",
thread_name (), thread_priority, thread_get_priority ());
snprintf (name, sizeof name, "interloper %d", i);
thread_create (name, thread_priority - 1, interloper_thread_func, NULL);
}
lock_release (&locks[0]);
msg ("%s finishing with priority %d.", thread_name (),
thread_get_priority ());
}
static void
donor_thread_func (void *locks_)
{
struct lock_pair *locks = locks_;
if (locks->first)
lock_acquire (locks->first);
lock_acquire (locks->second);
msg ("%s got lock", thread_name ());
lock_release (locks->second);
msg ("%s should have priority %d. Actual priority: %d",
thread_name (), (NESTING_DEPTH - 1) * 3,
thread_get_priority ());
if (locks->first)
lock_release (locks->first);
msg ("%s finishing with priority %d.", thread_name (),
thread_get_priority ());
}
static void
interloper_thread_func (void *arg_ UNUSED)
{
msg ("%s finished.", thread_name ());
}
// vim: sw=2

View File

@@ -0,0 +1,46 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-donate-chain) begin
(priority-donate-chain) main got lock.
(priority-donate-chain) main should have priority 3. Actual priority: 3.
(priority-donate-chain) main should have priority 6. Actual priority: 6.
(priority-donate-chain) main should have priority 9. Actual priority: 9.
(priority-donate-chain) main should have priority 12. Actual priority: 12.
(priority-donate-chain) main should have priority 15. Actual priority: 15.
(priority-donate-chain) main should have priority 18. Actual priority: 18.
(priority-donate-chain) main should have priority 21. Actual priority: 21.
(priority-donate-chain) thread 1 got lock
(priority-donate-chain) thread 1 should have priority 21. Actual priority: 21
(priority-donate-chain) thread 2 got lock
(priority-donate-chain) thread 2 should have priority 21. Actual priority: 21
(priority-donate-chain) thread 3 got lock
(priority-donate-chain) thread 3 should have priority 21. Actual priority: 21
(priority-donate-chain) thread 4 got lock
(priority-donate-chain) thread 4 should have priority 21. Actual priority: 21
(priority-donate-chain) thread 5 got lock
(priority-donate-chain) thread 5 should have priority 21. Actual priority: 21
(priority-donate-chain) thread 6 got lock
(priority-donate-chain) thread 6 should have priority 21. Actual priority: 21
(priority-donate-chain) thread 7 got lock
(priority-donate-chain) thread 7 should have priority 21. Actual priority: 21
(priority-donate-chain) thread 7 finishing with priority 21.
(priority-donate-chain) interloper 7 finished.
(priority-donate-chain) thread 6 finishing with priority 18.
(priority-donate-chain) interloper 6 finished.
(priority-donate-chain) thread 5 finishing with priority 15.
(priority-donate-chain) interloper 5 finished.
(priority-donate-chain) thread 4 finishing with priority 12.
(priority-donate-chain) interloper 4 finished.
(priority-donate-chain) thread 3 finishing with priority 9.
(priority-donate-chain) interloper 3 finished.
(priority-donate-chain) thread 2 finishing with priority 6.
(priority-donate-chain) interloper 2 finished.
(priority-donate-chain) thread 1 finishing with priority 3.
(priority-donate-chain) interloper 1 finished.
(priority-donate-chain) main finishing with priority 0.
(priority-donate-chain) end
EOF
pass;

View File

@@ -0,0 +1,51 @@
/* The main thread acquires a lock. Then it creates a
higher-priority thread that blocks acquiring the lock, causing
it to donate their priorities to the main thread. The main
thread attempts to lower its priority, which should not take
effect until the donation is released. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/synch.h"
#include "threads/thread.h"
static thread_func acquire_thread_func;
void
test_priority_donate_lower (void)
{
struct lock lock;
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
lock_init (&lock);
lock_acquire (&lock);
thread_create ("acquire", PRI_DEFAULT + 10, acquire_thread_func, &lock);
msg ("Main thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 10, thread_get_priority ());
msg ("Lowering base priority...");
thread_set_priority (PRI_DEFAULT - 10);
msg ("Main thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 10, thread_get_priority ());
lock_release (&lock);
msg ("acquire must already have finished.");
msg ("Main thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT - 10, thread_get_priority ());
}
static void
acquire_thread_func (void *lock_)
{
struct lock *lock = lock_;
lock_acquire (lock);
msg ("acquire: got the lock");
lock_release (lock);
msg ("acquire: done");
}

View File

@@ -0,0 +1,16 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-donate-lower) begin
(priority-donate-lower) Main thread should have priority 41. Actual priority: 41.
(priority-donate-lower) Lowering base priority...
(priority-donate-lower) Main thread should have priority 41. Actual priority: 41.
(priority-donate-lower) acquire: got the lock
(priority-donate-lower) acquire: done
(priority-donate-lower) acquire must already have finished.
(priority-donate-lower) Main thread should have priority 21. Actual priority: 21.
(priority-donate-lower) end
EOF
pass;

View File

@@ -0,0 +1,77 @@
/* The main thread acquires locks A and B, then it creates two
higher-priority threads. Each of these threads blocks
acquiring one of the locks and thus donate their priority to
the main thread. The main thread releases the locks in turn
and relinquishes its donated priorities.
Based on a test originally submitted for Stanford's CS 140 in
winter 1999 by Matt Franklin <startled@leland.stanford.edu>,
Greg Hutchins <gmh@leland.stanford.edu>, Yu Ping Hu
<yph@cs.stanford.edu>. Modified by arens. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/synch.h"
#include "threads/thread.h"
static thread_func a_thread_func;
static thread_func b_thread_func;
void
test_priority_donate_multiple (void)
{
struct lock a, b;
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
lock_init (&a);
lock_init (&b);
lock_acquire (&a);
lock_acquire (&b);
thread_create ("a", PRI_DEFAULT + 1, a_thread_func, &a);
msg ("Main thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 1, thread_get_priority ());
thread_create ("b", PRI_DEFAULT + 2, b_thread_func, &b);
msg ("Main thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 2, thread_get_priority ());
lock_release (&b);
msg ("Thread b should have just finished.");
msg ("Main thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 1, thread_get_priority ());
lock_release (&a);
msg ("Thread a should have just finished.");
msg ("Main thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT, thread_get_priority ());
}
static void
a_thread_func (void *lock_)
{
struct lock *lock = lock_;
lock_acquire (lock);
msg ("Thread a acquired lock a.");
lock_release (lock);
msg ("Thread a finished.");
}
static void
b_thread_func (void *lock_)
{
struct lock *lock = lock_;
lock_acquire (lock);
msg ("Thread b acquired lock b.");
lock_release (lock);
msg ("Thread b finished.");
}

View File

@@ -0,0 +1,19 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-donate-multiple) begin
(priority-donate-multiple) Main thread should have priority 32. Actual priority: 32.
(priority-donate-multiple) Main thread should have priority 33. Actual priority: 33.
(priority-donate-multiple) Thread b acquired lock b.
(priority-donate-multiple) Thread b finished.
(priority-donate-multiple) Thread b should have just finished.
(priority-donate-multiple) Main thread should have priority 32. Actual priority: 32.
(priority-donate-multiple) Thread a acquired lock a.
(priority-donate-multiple) Thread a finished.
(priority-donate-multiple) Thread a should have just finished.
(priority-donate-multiple) Main thread should have priority 31. Actual priority: 31.
(priority-donate-multiple) end
EOF
pass;

View File

@@ -0,0 +1,90 @@
/* The main thread acquires locks A and B, then it creates three
higher-priority threads. The first two of these threads block
acquiring one of the locks and thus donate their priority to
the main thread. The main thread releases the locks in turn
and relinquishes its donated priorities, allowing the third thread
to run.
In this test, the main thread releases the locks in a different
order compared to priority-donate-multiple.c.
Written by Godmar Back <gback@cs.vt.edu>.
Based on a test originally submitted for Stanford's CS 140 in
winter 1999 by Matt Franklin <startled@leland.stanford.edu>,
Greg Hutchins <gmh@leland.stanford.edu>, Yu Ping Hu
<yph@cs.stanford.edu>. Modified by arens. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/synch.h"
#include "threads/thread.h"
static thread_func a_thread_func;
static thread_func b_thread_func;
static thread_func c_thread_func;
void
test_priority_donate_multiple2 (void)
{
struct lock a, b;
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
lock_init (&a);
lock_init (&b);
lock_acquire (&a);
lock_acquire (&b);
thread_create ("a", PRI_DEFAULT + 3, a_thread_func, &a);
msg ("Main thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 3, thread_get_priority ());
thread_create ("c", PRI_DEFAULT + 1, c_thread_func, NULL);
thread_create ("b", PRI_DEFAULT + 5, b_thread_func, &b);
msg ("Main thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 5, thread_get_priority ());
lock_release (&a);
msg ("Main thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 5, thread_get_priority ());
lock_release (&b);
msg ("Threads b, a, c should have just finished, in that order.");
msg ("Main thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT, thread_get_priority ());
}
static void
a_thread_func (void *lock_)
{
struct lock *lock = lock_;
lock_acquire (lock);
msg ("Thread a acquired lock a.");
lock_release (lock);
msg ("Thread a finished.");
}
static void
b_thread_func (void *lock_)
{
struct lock *lock = lock_;
lock_acquire (lock);
msg ("Thread b acquired lock b.");
lock_release (lock);
msg ("Thread b finished.");
}
static void
c_thread_func (void *a_ UNUSED)
{
msg ("Thread c finished.");
}

View File

@@ -0,0 +1,19 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-donate-multiple2) begin
(priority-donate-multiple2) Main thread should have priority 34. Actual priority: 34.
(priority-donate-multiple2) Main thread should have priority 36. Actual priority: 36.
(priority-donate-multiple2) Main thread should have priority 36. Actual priority: 36.
(priority-donate-multiple2) Thread b acquired lock b.
(priority-donate-multiple2) Thread b finished.
(priority-donate-multiple2) Thread a acquired lock a.
(priority-donate-multiple2) Thread a finished.
(priority-donate-multiple2) Thread c finished.
(priority-donate-multiple2) Threads b, a, c should have just finished, in that order.
(priority-donate-multiple2) Main thread should have priority 31. Actual priority: 31.
(priority-donate-multiple2) end
EOF
pass;

View File

@@ -0,0 +1,94 @@
/* Low-priority main thread L acquires lock A. Medium-priority
thread M then acquires lock B then blocks on acquiring lock A.
High-priority thread H then blocks on acquiring lock B. Thus,
thread H donates its priority to M, which in turn donates it
to thread L.
Based on a test originally submitted for Stanford's CS 140 in
winter 1999 by Matt Franklin <startled@leland.stanford.edu>,
Greg Hutchins <gmh@leland.stanford.edu>, Yu Ping Hu
<yph@cs.stanford.edu>. Modified by arens. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/synch.h"
#include "threads/thread.h"
struct locks
{
struct lock *a;
struct lock *b;
};
static thread_func medium_thread_func;
static thread_func high_thread_func;
void
test_priority_donate_nest (void)
{
struct lock a, b;
struct locks locks;
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
lock_init (&a);
lock_init (&b);
lock_acquire (&a);
locks.a = &a;
locks.b = &b;
thread_create ("medium", PRI_DEFAULT + 1, medium_thread_func, &locks);
thread_yield ();
msg ("Low thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 1, thread_get_priority ());
thread_create ("high", PRI_DEFAULT + 2, high_thread_func, &b);
thread_yield ();
msg ("Low thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 2, thread_get_priority ());
lock_release (&a);
thread_yield ();
msg ("Medium thread should just have finished.");
msg ("Low thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT, thread_get_priority ());
}
static void
medium_thread_func (void *locks_)
{
struct locks *locks = locks_;
lock_acquire (locks->b);
lock_acquire (locks->a);
msg ("Medium thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 2, thread_get_priority ());
msg ("Medium thread got the lock.");
lock_release (locks->a);
thread_yield ();
lock_release (locks->b);
thread_yield ();
msg ("High thread should have just finished.");
msg ("Middle thread finished.");
}
static void
high_thread_func (void *lock_)
{
struct lock *lock = lock_;
lock_acquire (lock);
msg ("High thread got the lock.");
lock_release (lock);
msg ("High thread finished.");
}

View File

@@ -0,0 +1,19 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-donate-nest) begin
(priority-donate-nest) Low thread should have priority 32. Actual priority: 32.
(priority-donate-nest) Low thread should have priority 33. Actual priority: 33.
(priority-donate-nest) Medium thread should have priority 33. Actual priority: 33.
(priority-donate-nest) Medium thread got the lock.
(priority-donate-nest) High thread got the lock.
(priority-donate-nest) High thread finished.
(priority-donate-nest) High thread should have just finished.
(priority-donate-nest) Middle thread finished.
(priority-donate-nest) Medium thread should just have finished.
(priority-donate-nest) Low thread should have priority 31. Actual priority: 31.
(priority-donate-nest) end
EOF
pass;

View File

@@ -0,0 +1,65 @@
/* The main thread acquires a lock. Then it creates two
higher-priority threads that block acquiring the lock, causing
them to donate their priorities to the main thread. When the
main thread releases the lock, the other threads should
acquire it in priority order.
Based on a test originally submitted for Stanford's CS 140 in
winter 1999 by Matt Franklin <startled@leland.stanford.edu>,
Greg Hutchins <gmh@leland.stanford.edu>, Yu Ping Hu
<yph@cs.stanford.edu>. Modified by arens. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/synch.h"
#include "threads/thread.h"
static thread_func acquire1_thread_func;
static thread_func acquire2_thread_func;
void
test_priority_donate_one (void)
{
struct lock lock;
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
lock_init (&lock);
lock_acquire (&lock);
thread_create ("acquire1", PRI_DEFAULT + 1, acquire1_thread_func, &lock);
msg ("This thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 1, thread_get_priority ());
thread_create ("acquire2", PRI_DEFAULT + 2, acquire2_thread_func, &lock);
msg ("This thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 2, thread_get_priority ());
lock_release (&lock);
msg ("acquire2, acquire1 must already have finished, in that order.");
msg ("This should be the last line before finishing this test.");
}
static void
acquire1_thread_func (void *lock_)
{
struct lock *lock = lock_;
lock_acquire (lock);
msg ("acquire1: got the lock");
lock_release (lock);
msg ("acquire1: done");
}
static void
acquire2_thread_func (void *lock_)
{
struct lock *lock = lock_;
lock_acquire (lock);
msg ("acquire2: got the lock");
lock_release (lock);
msg ("acquire2: done");
}

View File

@@ -0,0 +1,17 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-donate-one) begin
(priority-donate-one) This thread should have priority 32. Actual priority: 32.
(priority-donate-one) This thread should have priority 33. Actual priority: 33.
(priority-donate-one) acquire2: got the lock
(priority-donate-one) acquire2: done
(priority-donate-one) acquire1: got the lock
(priority-donate-one) acquire1: done
(priority-donate-one) acquire2, acquire1 must already have finished, in that order.
(priority-donate-one) This should be the last line before finishing this test.
(priority-donate-one) end
EOF
pass;

View File

@@ -0,0 +1,82 @@
/* Low priority thread L acquires a lock, then blocks downing a
semaphore. Medium priority thread M then blocks waiting on
the same semaphore. Next, high priority thread H attempts to
acquire the lock, donating its priority to L.
Next, the main thread ups the semaphore, waking up L. L
releases the lock, which wakes up H. H "up"s the semaphore,
waking up M. H terminates, then M, then L, and finally the
main thread.
Written by Godmar Back <gback@cs.vt.edu>. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/synch.h"
#include "threads/thread.h"
struct lock_and_sema
{
struct lock lock;
struct semaphore sema;
};
static thread_func l_thread_func;
static thread_func m_thread_func;
static thread_func h_thread_func;
void
test_priority_donate_sema (void)
{
struct lock_and_sema ls;
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
lock_init (&ls.lock);
sema_init (&ls.sema, 0);
thread_create ("low", PRI_DEFAULT + 1, l_thread_func, &ls);
thread_create ("med", PRI_DEFAULT + 3, m_thread_func, &ls);
thread_create ("high", PRI_DEFAULT + 5, h_thread_func, &ls);
sema_up (&ls.sema);
msg ("Main thread finished.");
}
static void
l_thread_func (void *ls_)
{
struct lock_and_sema *ls = ls_;
lock_acquire (&ls->lock);
msg ("Thread L acquired lock.");
sema_down (&ls->sema);
msg ("Thread L downed semaphore.");
lock_release (&ls->lock);
msg ("Thread L finished.");
}
static void
m_thread_func (void *ls_)
{
struct lock_and_sema *ls = ls_;
sema_down (&ls->sema);
msg ("Thread M finished.");
}
static void
h_thread_func (void *ls_)
{
struct lock_and_sema *ls = ls_;
lock_acquire (&ls->lock);
msg ("Thread H acquired lock.");
sema_up (&ls->sema);
lock_release (&ls->lock);
msg ("Thread H finished.");
}

View File

@@ -0,0 +1,16 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-donate-sema) begin
(priority-donate-sema) Thread L acquired lock.
(priority-donate-sema) Thread L downed semaphore.
(priority-donate-sema) Thread H acquired lock.
(priority-donate-sema) Thread H finished.
(priority-donate-sema) Thread M finished.
(priority-donate-sema) Thread L finished.
(priority-donate-sema) Main thread finished.
(priority-donate-sema) end
EOF
pass;

View File

@@ -0,0 +1,99 @@
/* Creates several threads all at the same priority and ensures
that they consistently run in the same round-robin order.
Based on a test originally submitted for Stanford's CS 140 in
winter 1999 by by Matt Franklin
<startled@leland.stanford.edu>, Greg Hutchins
<gmh@leland.stanford.edu>, Yu Ping Hu <yph@cs.stanford.edu>.
Modified by arens. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "devices/timer.h"
#include "threads/malloc.h"
#include "threads/synch.h"
#include "threads/thread.h"
struct simple_thread_data
{
int id; /* Sleeper ID. */
int iterations; /* Iterations so far. */
struct lock *lock; /* Lock on output. */
int **op; /* Output buffer position. */
};
#define THREAD_CNT 16
#define ITER_CNT 16
static thread_func simple_thread_func;
void
test_priority_fifo (void)
{
struct simple_thread_data data[THREAD_CNT];
struct lock lock;
int *output, *op;
int i, cnt;
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
msg ("%d threads will iterate %d times in the same order each time.",
THREAD_CNT, ITER_CNT);
msg ("If the order varies then there is a bug.");
output = op = malloc (sizeof *output * THREAD_CNT * ITER_CNT * 2);
ASSERT (output != NULL);
lock_init (&lock);
thread_set_priority (PRI_DEFAULT + 2);
for (i = 0; i < THREAD_CNT; i++)
{
char name[16];
struct simple_thread_data *d = data + i;
snprintf (name, sizeof name, "%d", i);
d->id = i;
d->iterations = 0;
d->lock = &lock;
d->op = &op;
thread_create (name, PRI_DEFAULT + 1, simple_thread_func, d);
}
thread_set_priority (PRI_DEFAULT);
/* All the other threads now run to termination here. */
ASSERT (lock.holder == NULL);
cnt = 0;
for (; output < op; output++)
{
struct simple_thread_data *d;
ASSERT (*output >= 0 && *output < THREAD_CNT);
d = data + *output;
if (cnt % THREAD_CNT == 0)
printf ("(priority-fifo) iteration:");
printf (" %d", d->id);
if (++cnt % THREAD_CNT == 0)
printf ("\n");
d->iterations++;
}
}
static void
simple_thread_func (void *data_)
{
struct simple_thread_data *data = data_;
int i;
for (i = 0; i < ITER_CNT; i++)
{
lock_acquire (data->lock);
*(*data->op)++ = data->id;
lock_release (data->lock);
thread_yield ();
}
}

View File

@@ -0,0 +1,63 @@
# -*- perl -*-
# The expected output looks like this:
#
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# (priority-fifo) iteration: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# A different permutation of 0...15 is acceptable, but every line must
# be in the same order.
use strict;
use warnings;
use tests::tests;
our ($test);
my (@output) = read_text_file ("$test.output");
common_checks ("run", @output);
my ($thread_cnt) = 16;
my ($iter_cnt) = 16;
my (@order);
my (@t) = (-1) x $thread_cnt;
my (@iterations) = grep (/iteration:/, @output);
fail "No iterations found in output.\n" if !@iterations;
my (@numbering) = $iterations[0] =~ /(\d+)/g;
fail "First iteration does not list exactly $thread_cnt threads.\n"
if @numbering != $thread_cnt;
my (@sorted_numbering) = sort { $a <=> $b } @numbering;
for my $i (0...$#sorted_numbering) {
if ($sorted_numbering[$i] != $i) {
fail "First iteration does not list all threads "
. "0...$#sorted_numbering\n";
}
}
for my $i (1...$#iterations) {
if ($iterations[$i] ne $iterations[0]) {
fail "Iteration $i differs from iteration 0\n";
}
}
fail "$iter_cnt iterations expected but " . scalar (@iterations) . " found\n"
if $iter_cnt != @iterations;
pass;

View File

@@ -0,0 +1,41 @@
/* Ensures that a high-priority thread really preempts.
Based on a test originally submitted for Stanford's CS 140 in
winter 1999 by by Matt Franklin
<startled@leland.stanford.edu>, Greg Hutchins
<gmh@leland.stanford.edu>, Yu Ping Hu <yph@cs.stanford.edu>.
Modified by arens. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/synch.h"
#include "threads/thread.h"
static thread_func simple_thread_func;
void
test_priority_preempt (void)
{
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
thread_create ("high-priority", PRI_DEFAULT + 1, simple_thread_func, NULL);
msg ("The high-priority thread should have already completed.");
}
static void
simple_thread_func (void *aux UNUSED)
{
int i;
for (i = 0; i < 5; i++)
{
msg ("Thread %s iteration %d", thread_name (), i);
thread_yield ();
}
msg ("Thread %s done!", thread_name ());
}

View File

@@ -0,0 +1,16 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-preempt) begin
(priority-preempt) Thread high-priority iteration 0
(priority-preempt) Thread high-priority iteration 1
(priority-preempt) Thread high-priority iteration 2
(priority-preempt) Thread high-priority iteration 3
(priority-preempt) Thread high-priority iteration 4
(priority-preempt) Thread high-priority done!
(priority-preempt) The high-priority thread should have already completed.
(priority-preempt) end
EOF
pass;

View File

@@ -0,0 +1,84 @@
/* The main thread creates and acquires a lock. Then it creates two
higher-priority threads (one medium priority and one high priority)
that block on acquiring the lock, causing them to donate their
priorities to the main thread.
When the main thread releases the lock, the high priority thread
should acquire the lock, but then it will immediately drop its
priority below that of the main thread.
If the lock donations have been correctly preserved, then the
high priority thread should still run with medium priority, and
thus continue running without yielding to the main thread.
*/
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/synch.h"
#include "threads/thread.h"
static thread_func medium_thread_func;
static thread_func high_thread_func;
void
test_priority_preservation (void)
{
msg ("main-thread starting...");
struct lock lock;
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
/* Make sure our priority is the default. */
ASSERT (thread_get_priority () == PRI_DEFAULT);
lock_init (&lock);
lock_acquire (&lock);
msg("main-thread creating medium-priority thread...");
thread_create ("medium-priority", PRI_DEFAULT + 5, medium_thread_func, &lock);
msg ("main-thread continuing...");
msg ("This thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 5, thread_get_priority ());
msg("main-thread creating high-priority thread...");
thread_create ("high-priority", PRI_DEFAULT + 10, high_thread_func, &lock);
msg ("main-thread continuing...");
msg ("This thread should have priority %d. Actual priority: %d.",
PRI_DEFAULT + 10, thread_get_priority ());
msg ("main-thread now releasing the lock...");
lock_release (&lock);
msg ("medium-priority thread must already have finished.");
msg ("This should be the last line before finishing this test.");
}
static void
medium_thread_func (void *lock_)
{
msg ("medium-priority thread starting...");
struct lock *lock = lock_;
msg ("medium-priority thread trying to acquire the lock...");
lock_acquire (lock);
msg ("medium-priority thread got the lock.");
lock_release (lock);
msg ("medium-priority thread done.");
}
static void
high_thread_func (void *lock_)
{
msg ("high-priority thread starting...");
struct lock *lock = lock_;
msg ("high-priority thread trying to acquire the lock...");
lock_acquire (lock);
msg ("high-priority thread got the lock.");
msg ("high-priority thread about to drop to low priority...");
thread_set_priority (PRI_DEFAULT - 10);
msg ("This thread should still have effective priority %d. Actual priority: %d.",
PRI_DEFAULT +5, thread_get_priority ());
lock_release (lock);
msg ("We should not see this message, as pintos will close when the main-thread terminates.");
}

View File

@@ -0,0 +1,28 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-preservation) begin
(priority-preservation) main-thread starting...
(priority-preservation) main-thread creating medium-priority thread...
(priority-preservation) medium-priority thread starting...
(priority-preservation) medium-priority thread trying to acquire the lock...
(priority-preservation) main-thread continuing...
(priority-preservation) This thread should have priority 36. Actual priority: 36.
(priority-preservation) main-thread creating high-priority thread...
(priority-preservation) high-priority thread starting...
(priority-preservation) high-priority thread trying to acquire the lock...
(priority-preservation) main-thread continuing...
(priority-preservation) This thread should have priority 41. Actual priority: 41.
(priority-preservation) main-thread now releasing the lock...
(priority-preservation) high-priority thread got the lock.
(priority-preservation) high-priority thread about to drop to low priority...
(priority-preservation) This thread should still have effective priority 36. Actual priority: 36.
(priority-preservation) medium-priority thread got the lock.
(priority-preservation) medium-priority thread done.
(priority-preservation) medium-priority thread must already have finished.
(priority-preservation) This should be the last line before finishing this test.
(priority-preservation) end
EOF
pass;

View File

@@ -0,0 +1,45 @@
/* Tests that the highest-priority thread waiting on a semaphore
is the first to wake up. */
#include <stdio.h>
#include "tests/threads/tests.h"
#include "threads/init.h"
#include "threads/malloc.h"
#include "threads/synch.h"
#include "threads/thread.h"
#include "devices/timer.h"
static thread_func priority_sema_thread;
static struct semaphore sema;
void
test_priority_sema (void)
{
int i;
/* This test does not work with the MLFQS. */
ASSERT (!thread_mlfqs);
sema_init (&sema, 0);
thread_set_priority (PRI_MIN);
for (i = 0; i < 10; i++)
{
int priority = PRI_DEFAULT - (i + 3) % 10 - 1;
char name[16];
snprintf (name, sizeof name, "priority %d", priority);
thread_create (name, priority, priority_sema_thread, NULL);
}
for (i = 0; i < 10; i++)
{
sema_up (&sema);
msg ("Back in main thread.");
}
}
static void
priority_sema_thread (void *aux UNUSED)
{
sema_down (&sema);
msg ("Thread %s woke up.", thread_name ());
}

View File

@@ -0,0 +1,29 @@
# -*- perl -*-
use strict;
use warnings;
use tests::tests;
check_expected ([<<'EOF']);
(priority-sema) begin
(priority-sema) Thread priority 30 woke up.
(priority-sema) Back in main thread.
(priority-sema) Thread priority 29 woke up.
(priority-sema) Back in main thread.
(priority-sema) Thread priority 28 woke up.
(priority-sema) Back in main thread.
(priority-sema) Thread priority 27 woke up.
(priority-sema) Back in main thread.
(priority-sema) Thread priority 26 woke up.
(priority-sema) Back in main thread.
(priority-sema) Thread priority 25 woke up.
(priority-sema) Back in main thread.
(priority-sema) Thread priority 24 woke up.
(priority-sema) Back in main thread.
(priority-sema) Thread priority 23 woke up.
(priority-sema) Back in main thread.
(priority-sema) Thread priority 22 woke up.
(priority-sema) Back in main thread.
(priority-sema) Thread priority 21 woke up.
(priority-sema) Back in main thread.
(priority-sema) end
EOF
pass;

View File

@@ -0,0 +1,4 @@
#include "tests/threads/tests.h"
#include <debug.h>
#include <string.h>
#include <stdio.h>

13
src/tests/threads/tests.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef TESTS_THREADS_TESTS_H
#define TESTS_THREADS_TESTS_H
#include "tests/devices/tests.h"
typedef void test_func (void);
extern void run_test (const char *);
extern void msg (const char *, ...);
extern void fail (const char *, ...);
extern void pass (void);
#endif /* tests/threads/tests.h */