Linux操作系统内核抢占补丁的基本原理( 二 )


} while (0)
#define spin_lock(lock)
do {
ctx_sw_off();进入自旋锁时禁止抢占
_raw_spin_lock(lock);
} while(0)
#define spin_trylock(lock) ({ctx_sw_off(); _raw_spin_trylock(lock) ? 锁定并
测试原来是否上锁
1 : ({ctx_sw_on(); 0;});})
#define spin_unlock(lock)
do {
_raw_spin_unlock(lock);
ctx_sw_on();离开自旋锁时允许并完成内核抢占
} while (0)
#define read_lock(lock) ({ctx_sw_off(); _raw_read_lock(lock);})
#define read_unlock(lock) ({_raw_read_unlock(lock); ctx_sw_on();})
#define write_lock(lock) ({ctx_sw_off(); _raw_write_lock(lock);})
#define write_unlock(lock) ({_raw_write_unlock(lock); ctx_sw_on();})
#define write_trylock(lock) ({ctx_sw_off(); _raw_write_trylock(lock) ?
1 : ({ctx_sw_on(); 0;});})
...
include/asm/softirq.h:
#define cpu_bh_disable(cpu) do { ctx_sw_off(); local_bh_count(cpu); barrie
r(); } while (0)
#define cpu_bh_enable(cpu) do { barrier(); local_bh_count(cpu)--;ctx_sw_on()
} while (0)
kernel/schedule.c:
#ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void)
{
while (current->need_resched) {
ctx_sw_off();
current->state |= TASK_PREEMPTED;
schedule();
current->state &= ~TASK_PREEMPTED;
ctx_sw_on_no_preempt();
}
}
#endif
asmlinkage void schedule(void)
{
struct schedule_data * sched_data;
struct task_struct *prev, *next, *p;
struct list_head *tmp;
int this_cpu, c;
#ifdef CONFIG_PREEMPT
ctx_sw_off();
#endif
if (!current->active_mm) BUG();
need_resched_back:
prev = current;
this_cpu = prev->processor;
if (in_interrupt())
goto scheduling_in_interrupt;
release_kernel_lock(prev, this_cpu);
/* Do "administrative" work here while we don"t hold any locks */
if (softirq_active(this_cpu) & softirq_mask(this_cpu))
goto handle_softirq;
handle_softirq_back:
/*
* "sched_data" is protected by the fact that we can run
* only one process per CPU.
*/
sched_data = https://www.rkxy.com.cn/dnjc/& aligned_data[this_cpu].schedule_data;
spin_lock_irq(&runqueue_lock);
/* move an exhausted RR process to be last.. */
if (prev->policy == SCHED_RR)
goto move_rr_last;
move_rr_back:
switch (prev->state) {
case TASK_INTERRUPTIBLE:
if (signal_pending(prev)) {
prev->state = TASK_RUNNING;
break;
}
default:
#ifdef CONFIG_PREEMPT
if (prev->state & TASK_PREEMPTED)
break; 如果是内核抢占调度,则保留运行队列
#endif
del_from_runqueue(prev);
#ifdef CONFIG_PREEMPT
case TASK_PREEMPTED:
#endif
case TASK_RUNNING:
}
prev->need_resched = 0;
/*
* this is the scheduler proper:
*/
repeat_schedule:
/*
* Default process to select..
*/
next = idle_task(this_cpu);
c = -1000;
if (task_on_runqueue(prev))
goto still_running;
still_running_back:
list_for_each(tmp, &runqueue_head) {
p = list_entry(tmp, struct task_struct, run_list);
if (can_schedule(p, this_cpu)) {
int weight = goodness(p, this_cpu, prev->active_mm);
if (weight > c)
c = weight, next = p;
}
}
/* Do we need to re-calculate counters? */
if (!c)
goto recalculate;
/*
* from this point on nothing can prevent us from
* switching to the next task, save this fact in
* sched_data.
*/
sched_data->curr = next;
#ifdef CONFIG_SMP
next->has_cpu = 1;
next->processor = this_cpu;
#endif
spin_unlock_irq(&runqueue_lock);
if (prev == next)
goto same_process;
#ifdef CONFIG_SMP
/*
* maintain the per-process "last schedule" value.
* (this has to be recalculated even if we reschedule to
* the same process) Currently this is only used on SMP,
* and it"s approximate, so we do not have to maintain
* it while holding the runqueue spinlock.
*/
sched_data->last_schedule = get_cycles();
/*
* We drop the scheduler lock early (it"s a global spinlock),
* thus we have to lock the previous process from getting
* rescheduled during switch_to().
*/
#endif /* CONFIG_SMP */
kstat.context_swtch;

推荐阅读