Merge complete code base into master #15
@@ -251,19 +251,27 @@ lock_acquire (struct lock *lock)
|
||||
ASSERT (!lock_held_by_current_thread (lock));
|
||||
|
||||
struct thread *t = thread_current ();
|
||||
enum intr_level old_level = intr_disable ();
|
||||
|
||||
if (lock->holder != NULL)
|
||||
{
|
||||
t->waiting_lock = lock;
|
||||
donate_priority (lock->holder);
|
||||
}
|
||||
if(!thread_mlfqs)
|
||||
{
|
||||
|
||||
enum intr_level old_level = intr_disable ();
|
||||
|
||||
if (lock->holder != NULL)
|
||||
{
|
||||
t->waiting_lock = lock;
|
||||
donate_priority (lock->holder);
|
||||
}
|
||||
|
||||
intr_set_level (old_level);
|
||||
}
|
||||
|
||||
|
||||
intr_set_level (old_level);
|
||||
|
||||
sema_down (&lock->semaphore);
|
||||
lock->holder = thread_current ();
|
||||
t->waiting_lock = NULL;
|
||||
if (!thread_mlfqs)
|
||||
t->waiting_lock = NULL;
|
||||
}
|
||||
|
||||
/* Tries to acquires LOCK and returns true if successful or false
|
||||
@@ -297,50 +305,53 @@ lock_release (struct lock *lock)
|
||||
ASSERT (lock != NULL);
|
||||
ASSERT (lock_held_by_current_thread (lock));
|
||||
|
||||
struct thread *current_thread = thread_current ();
|
||||
struct thread *max_donor = NULL;
|
||||
if (!thread_mlfqs)
|
||||
{
|
||||
struct thread *current_thread = thread_current ();
|
||||
struct thread *max_donor = NULL;
|
||||
|
||||
struct list orphan_list;
|
||||
list_init (&orphan_list);
|
||||
struct list orphan_list;
|
||||
list_init (&orphan_list);
|
||||
|
||||
enum intr_level old_level = intr_disable ();
|
||||
/* Loop through current thread's donors, removing the ones waiting for the
|
||||
lock being released and keeping track of them (within orphan_list).
|
||||
Also identifies the highest priority donor thread among them. */
|
||||
struct list_elem *tail = list_tail (¤t_thread->donors_list);
|
||||
struct list_elem *e = list_begin (¤t_thread->donors_list);
|
||||
while (e != tail)
|
||||
{
|
||||
struct thread *donor = list_entry (e, struct thread, donor_elem);
|
||||
struct list_elem *next = list_next (e);
|
||||
enum intr_level old_level = intr_disable ();
|
||||
/* Loop through current thread's donors, removing the ones waiting for the
|
||||
lock being released and keeping track of them (within orphan_list).
|
||||
Also identifies the highest priority donor thread among them. */
|
||||
struct list_elem *tail = list_tail (¤t_thread->donors_list);
|
||||
struct list_elem *e = list_begin (¤t_thread->donors_list);
|
||||
while (e != tail)
|
||||
{
|
||||
struct thread *donor = list_entry (e, struct thread, donor_elem);
|
||||
struct list_elem *next = list_next (e);
|
||||
|
||||
/* Excludes donors that aren't waiting for the lock being released,
|
||||
and tracks the rest. */
|
||||
if (donor->waiting_lock == lock)
|
||||
{
|
||||
list_remove (e);
|
||||
list_push_back (&orphan_list, e);
|
||||
/* Excludes donors that aren't waiting for the lock being released,
|
||||
and tracks the rest. */
|
||||
if (donor->waiting_lock == lock)
|
||||
{
|
||||
list_remove (e);
|
||||
list_push_back (&orphan_list, e);
|
||||
|
||||
/* Identify highest priority donor. */
|
||||
if (max_donor == NULL || donor->priority > max_donor->priority)
|
||||
max_donor = donor;
|
||||
}
|
||||
/* Identify highest priority donor. */
|
||||
if (max_donor == NULL || donor->priority > max_donor->priority)
|
||||
max_donor = donor;
|
||||
}
|
||||
|
||||
e = next;
|
||||
}
|
||||
e = next;
|
||||
}
|
||||
|
||||
/* If there exists a maximum donor thread waiting for this lock to be
|
||||
released, transfer the remaining orphaned donors to its donor list. */
|
||||
if (max_donor != NULL)
|
||||
{
|
||||
while (!list_empty (&orphan_list))
|
||||
list_push_back (&max_donor->donors_list, list_pop_front (&orphan_list));
|
||||
}
|
||||
/* If there exists a maximum donor thread waiting for this lock to be
|
||||
released, transfer the remaining orphaned donors to its donor list. */
|
||||
if (max_donor != NULL)
|
||||
{
|
||||
while (!list_empty (&orphan_list))
|
||||
list_push_back (&max_donor->donors_list, list_pop_front (&orphan_list));
|
||||
}
|
||||
|
||||
intr_set_level (old_level);
|
||||
/* Removal of donors to this thread may change its effective priority,
|
||||
so recalculate. */
|
||||
thread_recalculate_priority ();
|
||||
intr_set_level (old_level);
|
||||
/* Removal of donors to this thread may change its effective priority,
|
||||
so recalculate. */
|
||||
thread_recalculate_priority ();
|
||||
}
|
||||
|
||||
lock->holder = NULL;
|
||||
sema_up (&lock->semaphore);
|
||||
|
||||
@@ -73,8 +73,6 @@ static bool is_thread (struct thread *) UNUSED;
|
||||
static void *alloc_frame (struct thread *, size_t size);
|
||||
static int calculate_bsd_priority (fp32_t recent_cpu, int nice);
|
||||
static void thread_update_recent_cpu (struct thread *t, void *aux UNUSED);
|
||||
static bool thread_priority_less (const struct list_elem *a,
|
||||
const struct list_elem *b, void *aux UNUSED);
|
||||
static void schedule (void);
|
||||
void thread_schedule_tail (struct thread *prev);
|
||||
static tid_t allocate_tid (void);
|
||||
@@ -164,7 +162,7 @@ thread_tick (void)
|
||||
size_t ready = threads_ready ();
|
||||
if (t != idle_thread)
|
||||
ready++;
|
||||
fp32_t old_coeff = fp_mul (fp_div_int (int_to_fp (59), 60), load_avg);
|
||||
fp32_t old_coeff = fp_div_int (fp_mul_int(load_avg, 59), 60);
|
||||
fp32_t new_coeff = fp_div_int (int_to_fp (ready), 60);
|
||||
load_avg = fp_add (old_coeff, new_coeff);
|
||||
|
||||
@@ -293,11 +291,9 @@ thread_unblock (struct thread *t)
|
||||
old_level = intr_disable ();
|
||||
ASSERT (t->status == THREAD_BLOCKED);
|
||||
|
||||
if (thread_mlfqs)
|
||||
list_insert_ordered (&ready_list, &t->elem, thread_priority_less, NULL);
|
||||
else
|
||||
/* Insert the thread back into the ready list in priority order. */
|
||||
list_insert_ordered(&ready_list, &t->elem, priority_more, NULL);
|
||||
|
||||
/* Insert the thread back into the ready list in priority order. */
|
||||
list_insert_ordered(&ready_list, &t->elem, priority_more, NULL);
|
||||
|
||||
t->status = THREAD_READY;
|
||||
intr_set_level (old_level);
|
||||
@@ -371,12 +367,10 @@ thread_yield (void)
|
||||
|
||||
if (cur != idle_thread)
|
||||
{
|
||||
if (thread_mlfqs)
|
||||
list_insert_ordered (&ready_list, &cur->elem, thread_priority_less,
|
||||
NULL);
|
||||
else
|
||||
/* Insert the thread back into the ready list in priority order. */
|
||||
list_insert_ordered(&ready_list, &cur->elem, priority_more, NULL);
|
||||
|
||||
/* Insert the thread back into the ready list in priority order. */
|
||||
list_insert_ordered(&ready_list, &cur->elem, priority_more, NULL);
|
||||
|
||||
}
|
||||
|
||||
cur->status = THREAD_READY;
|
||||
@@ -473,13 +467,15 @@ thread_update_recent_cpu (struct thread *t, void *aux UNUSED)
|
||||
= fp_add_int (fp_mul (recent_cpu_coeff, curr_recent_cpu), t->nice);
|
||||
// recent_cpu was updated, update priority.
|
||||
t->priority = calculate_bsd_priority (t->recent_cpu, t->nice);
|
||||
|
||||
}
|
||||
/* Recalculates the effective priority of the current thread. */
|
||||
void
|
||||
thread_recalculate_priority (void)
|
||||
{
|
||||
struct thread *t = thread_current ();
|
||||
|
||||
|
||||
ASSERT(!thread_mlfqs)
|
||||
|
||||
enum intr_level old_level = intr_disable ();
|
||||
t->priority = t->base_priority;
|
||||
|
||||
@@ -639,8 +635,6 @@ init_thread (struct thread *t, const char *name, int nice, int priority,
|
||||
strlcpy (t->name, name, sizeof t->name);
|
||||
t->stack = (uint8_t *) t + PGSIZE;
|
||||
|
||||
t->priority
|
||||
= thread_mlfqs ? calculate_bsd_priority (recent_cpu, nice) : priority;
|
||||
t->nice = nice;
|
||||
t->recent_cpu = recent_cpu;
|
||||
|
||||
@@ -649,7 +643,7 @@ init_thread (struct thread *t, const char *name, int nice, int priority,
|
||||
t->magic = THREAD_MAGIC;
|
||||
|
||||
list_init (&t->donors_list);
|
||||
t->priority = t->base_priority;
|
||||
t->priority = thread_mlfqs ? calculate_bsd_priority (recent_cpu, nice) : t->base_priority;
|
||||
t->waiting_lock = NULL;
|
||||
|
||||
old_level = intr_disable ();
|
||||
@@ -734,6 +728,9 @@ thread_schedule_tail (struct thread *prev)
|
||||
static int
|
||||
calculate_bsd_priority (fp32_t recent_cpu, int nice)
|
||||
{
|
||||
|
||||
ASSERT(thread_mlfqs);
|
||||
|
||||
int priority = PRI_MAX - (fp_round (recent_cpu) / 4) - (nice * 2);
|
||||
if (priority < PRI_MIN)
|
||||
return PRI_MIN;
|
||||
@@ -742,17 +739,6 @@ calculate_bsd_priority (fp32_t recent_cpu, int nice)
|
||||
return priority;
|
||||
}
|
||||
|
||||
/* Returns true if thread a's priority is strictly greater than
|
||||
thread b's priority. */
|
||||
static bool
|
||||
thread_priority_less (const struct list_elem *a, const struct list_elem *b,
|
||||
void *aux UNUSED)
|
||||
{
|
||||
struct thread *ta = list_entry (a, struct thread, elem);
|
||||
struct thread *tb = list_entry (b, struct thread, elem);
|
||||
return ta->priority > tb->priority;
|
||||
}
|
||||
|
||||
/* Schedules a new process. At entry, interrupts must be off and
|
||||
the running process's state must have been changed from
|
||||
running to some other state. This function finds another
|
||||
|
||||
Reference in New Issue
Block a user