fix mlfqs tests, complete BSD scheduling
This commit is contained in:
@@ -199,10 +199,6 @@ static void
|
|||||||
timer_interrupt (struct intr_frame *args UNUSED)
|
timer_interrupt (struct intr_frame *args UNUSED)
|
||||||
{
|
{
|
||||||
ticks++;
|
ticks++;
|
||||||
if (ticks % TIMER_FREQ == 0)
|
|
||||||
{
|
|
||||||
calculate_recent_cpu ();
|
|
||||||
}
|
|
||||||
for (struct list_elem *e = list_begin (&sleeping_threads);
|
for (struct list_elem *e = list_begin (&sleeping_threads);
|
||||||
e != list_end (&sleeping_threads); e = list_next (e))
|
e != list_end (&sleeping_threads); e = list_next (e))
|
||||||
{
|
{
|
||||||
@@ -212,7 +208,6 @@ timer_interrupt (struct intr_frame *args UNUSED)
|
|||||||
else
|
else
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
thread_increment_recent_cpu ();
|
|
||||||
thread_tick ();
|
thread_tick ();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -233,6 +233,7 @@ lock_release (struct lock *lock)
|
|||||||
|
|
||||||
lock->holder = NULL;
|
lock->holder = NULL;
|
||||||
sema_up (&lock->semaphore);
|
sema_up (&lock->semaphore);
|
||||||
|
thread_yield ();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns true if the current thread holds LOCK, false
|
/* Returns true if the current thread holds LOCK, false
|
||||||
|
|||||||
@@ -73,6 +73,8 @@ static bool is_thread (struct thread *) UNUSED;
|
|||||||
static void *alloc_frame (struct thread *, size_t size);
|
static void *alloc_frame (struct thread *, size_t size);
|
||||||
static int calculate_bsd_priority (fp32_t recent_cpu, int nice);
|
static int calculate_bsd_priority (fp32_t recent_cpu, int nice);
|
||||||
static void thread_update_recent_cpu (struct thread *t, void *aux UNUSED);
|
static void thread_update_recent_cpu (struct thread *t, void *aux UNUSED);
|
||||||
|
static bool thread_priority_less (const struct list_elem *a,
|
||||||
|
const struct list_elem *b, void *aux UNUSED);
|
||||||
static void schedule (void);
|
static void schedule (void);
|
||||||
void thread_schedule_tail (struct thread *prev);
|
void thread_schedule_tail (struct thread *prev);
|
||||||
static tid_t allocate_tid (void);
|
static tid_t allocate_tid (void);
|
||||||
@@ -157,8 +159,11 @@ thread_tick (void)
|
|||||||
int64_t ticks = timer_ticks ();
|
int64_t ticks = timer_ticks ();
|
||||||
if (thread_mlfqs && (ticks % TIMER_FREQ == 0))
|
if (thread_mlfqs && (ticks % TIMER_FREQ == 0))
|
||||||
{
|
{
|
||||||
|
size_t ready = threads_ready ();
|
||||||
|
if (t != idle_thread)
|
||||||
|
ready++;
|
||||||
fp32_t old_coeff = fp_mul (fp_div_int (int_to_fp (59), 60), load_avg);
|
fp32_t old_coeff = fp_mul (fp_div_int (int_to_fp (59), 60), load_avg);
|
||||||
fp32_t new_coeff = fp_div_int (int_to_fp (threads_ready ()), 60);
|
fp32_t new_coeff = fp_div_int (int_to_fp (ready), 60);
|
||||||
load_avg = fp_add (old_coeff, new_coeff);
|
load_avg = fp_add (old_coeff, new_coeff);
|
||||||
|
|
||||||
thread_foreach (thread_update_recent_cpu, NULL);
|
thread_foreach (thread_update_recent_cpu, NULL);
|
||||||
@@ -284,7 +289,10 @@ thread_unblock (struct thread *t)
|
|||||||
|
|
||||||
old_level = intr_disable ();
|
old_level = intr_disable ();
|
||||||
ASSERT (t->status == THREAD_BLOCKED);
|
ASSERT (t->status == THREAD_BLOCKED);
|
||||||
list_push_back (&ready_list, &t->elem);
|
if (thread_mlfqs)
|
||||||
|
list_insert_ordered (&ready_list, &t->elem, thread_priority_less, NULL);
|
||||||
|
else
|
||||||
|
list_push_back (&ready_list, &t->elem);
|
||||||
t->status = THREAD_READY;
|
t->status = THREAD_READY;
|
||||||
intr_set_level (old_level);
|
intr_set_level (old_level);
|
||||||
}
|
}
|
||||||
@@ -355,7 +363,13 @@ thread_yield (void)
|
|||||||
|
|
||||||
old_level = intr_disable ();
|
old_level = intr_disable ();
|
||||||
if (cur != idle_thread)
|
if (cur != idle_thread)
|
||||||
list_push_back (&ready_list, &cur->elem);
|
{
|
||||||
|
if (thread_mlfqs)
|
||||||
|
list_insert_ordered (&ready_list, &cur->elem, thread_priority_less,
|
||||||
|
NULL);
|
||||||
|
else
|
||||||
|
list_push_back (&ready_list, &cur->elem);
|
||||||
|
}
|
||||||
cur->status = THREAD_READY;
|
cur->status = THREAD_READY;
|
||||||
schedule ();
|
schedule ();
|
||||||
intr_set_level (old_level);
|
intr_set_level (old_level);
|
||||||
@@ -417,6 +431,10 @@ thread_set_nice (int nice)
|
|||||||
struct thread *t = thread_current ();
|
struct thread *t = thread_current ();
|
||||||
t->nice = nice;
|
t->nice = nice;
|
||||||
int priority = calculate_bsd_priority (t->recent_cpu, t->nice);
|
int priority = calculate_bsd_priority (t->recent_cpu, t->nice);
|
||||||
|
struct thread *next_t
|
||||||
|
= list_entry (list_begin (&ready_list), struct thread, elem);
|
||||||
|
if (priority < next_t->priority)
|
||||||
|
thread_yield ();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns the current thread's nice value. */
|
/* Returns the current thread's nice value. */
|
||||||
@@ -622,6 +640,17 @@ calculate_bsd_priority (fp32_t recent_cpu, int nice)
|
|||||||
return priority;
|
return priority;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Returns true if thread a's priority is strictly greater than
|
||||||
|
thread b's priority. */
|
||||||
|
static bool
|
||||||
|
thread_priority_less (const struct list_elem *a, const struct list_elem *b,
|
||||||
|
void *aux UNUSED)
|
||||||
|
{
|
||||||
|
struct thread *ta = list_entry (a, struct thread, elem);
|
||||||
|
struct thread *tb = list_entry (b, struct thread, elem);
|
||||||
|
return ta->priority > tb->priority;
|
||||||
|
}
|
||||||
|
|
||||||
/* ss's state must have been changed from
|
/* ss's state must have been changed from
|
||||||
running to some other state. This function finds another
|
running to some other state. This function finds another
|
||||||
thread to run and switches to it.
|
thread to run and switches to it.
|
||||||
|
|||||||
Reference in New Issue
Block a user