fix mlfqs tests, complete BSD scheduling

This commit is contained in:
2024-10-17 02:38:57 +01:00
parent c5e41db9b0
commit 044c383a0f
3 changed files with 34 additions and 9 deletions

View File

@@ -199,10 +199,6 @@ static void
timer_interrupt (struct intr_frame *args UNUSED)
{
ticks++;
if (ticks % TIMER_FREQ == 0)
{
calculate_recent_cpu ();
}
for (struct list_elem *e = list_begin (&sleeping_threads);
e != list_end (&sleeping_threads); e = list_next (e))
{
@@ -212,7 +208,6 @@ timer_interrupt (struct intr_frame *args UNUSED)
else
break;
}
thread_increment_recent_cpu ();
thread_tick ();
}

View File

@@ -233,6 +233,7 @@ lock_release (struct lock *lock)
lock->holder = NULL;
sema_up (&lock->semaphore);
thread_yield ();
}
/* Returns true if the current thread holds LOCK, false

View File

@@ -73,6 +73,8 @@ static bool is_thread (struct thread *) UNUSED;
static void *alloc_frame (struct thread *, size_t size);
static int calculate_bsd_priority (fp32_t recent_cpu, int nice);
static void thread_update_recent_cpu (struct thread *t, void *aux UNUSED);
static bool thread_priority_less (const struct list_elem *a,
const struct list_elem *b, void *aux UNUSED);
static void schedule (void);
void thread_schedule_tail (struct thread *prev);
static tid_t allocate_tid (void);
@@ -157,8 +159,11 @@ thread_tick (void)
int64_t ticks = timer_ticks ();
if (thread_mlfqs && (ticks % TIMER_FREQ == 0))
{
size_t ready = threads_ready ();
if (t != idle_thread)
ready++;
fp32_t old_coeff = fp_mul (fp_div_int (int_to_fp (59), 60), load_avg);
fp32_t new_coeff = fp_div_int (int_to_fp (threads_ready ()), 60);
fp32_t new_coeff = fp_div_int (int_to_fp (ready), 60);
load_avg = fp_add (old_coeff, new_coeff);
thread_foreach (thread_update_recent_cpu, NULL);
@@ -284,6 +289,9 @@ thread_unblock (struct thread *t)
old_level = intr_disable ();
ASSERT (t->status == THREAD_BLOCKED);
if (thread_mlfqs)
list_insert_ordered (&ready_list, &t->elem, thread_priority_less, NULL);
else
list_push_back (&ready_list, &t->elem);
t->status = THREAD_READY;
intr_set_level (old_level);
@@ -355,7 +363,13 @@ thread_yield (void)
old_level = intr_disable ();
if (cur != idle_thread)
{
if (thread_mlfqs)
list_insert_ordered (&ready_list, &cur->elem, thread_priority_less,
NULL);
else
list_push_back (&ready_list, &cur->elem);
}
cur->status = THREAD_READY;
schedule ();
intr_set_level (old_level);
@@ -417,6 +431,10 @@ thread_set_nice (int nice)
struct thread *t = thread_current ();
t->nice = nice;
int priority = calculate_bsd_priority (t->recent_cpu, t->nice);
struct thread *next_t
= list_entry (list_begin (&ready_list), struct thread, elem);
if (priority < next_t->priority)
thread_yield ();
}
/* Returns the current thread's nice value. */
@@ -622,6 +640,17 @@ calculate_bsd_priority (fp32_t recent_cpu, int nice)
return priority;
}
/* Returns true if thread a's priority is strictly greater than
thread b's priority. */
static bool
thread_priority_less (const struct list_elem *a, const struct list_elem *b,
void *aux UNUSED)
{
struct thread *ta = list_entry (a, struct thread, elem);
struct thread *tb = list_entry (b, struct thread, elem);
return ta->priority > tb->priority;
}
/* ss's state must have been changed from
running to some other state. This function finds another
thread to run and switches to it.