1 Chapter 4 Interrupts and Exceptions
1 Chapter 4 Interrupts and Exceptions
1 Chapter 4 Interrupts and Exceptions
IRQn
INT IDT[32+n]
PIC IRQn_interrupt()
do_IRQ(n)
The int instruction allows a User Mode process to issue any interru
pt signal with any vector in 0 and 255
To block illegal int from a user-mode process, set DPL of gate descript
or to 0
When an int from a user-mode process, its CPL (3) > DPL (0) “gener
al protection” exception
IRQn
INT IDT[32+n]
PIC IRQn_interrupt()
do_IRQ(n)
struct hw_interrupt_type {
const char * typename;
unsigned int (*startup) (unsigned int irq);
void (*shutdown) (unsigned int irq);
void (*enable) (unsigned int irq);
void (*disable) (unsigned int irq);
void (*ack) (unsigned int irq);
void (*end) (unsigned int irq);
void (*set_affinity) (unsigned int irq, cpumask_t dest);
};
Chapter 4 Interrupts and Exceptions 45
IRQ Descriptors
0 i 224
irq_desc
hw_interrupt_type
irq_desc_t
:
irqaction irqaction
vector=0 ENTRY(divide_error)
ENTRY(irq_entries_start) pushl $0 # no error code
.rept NR_IRQS pushl $do_divide_error
ALIGN ALIGN
error_code:
1: pushl $vector-256
pushl %ds
jmp common_interrupt
pushl %eax
.data
xorl %eax, %eax
.long 1b
pushl %edx
.text
decl %eax # eax = -1
vector=vector+1
pushl %ecx
.endr
pushl %ebx
cld
ALIGN movl %es, %ecx
common_interrupt: movl ORIG_EAX(%esp), %esi # get the error code
SAVE_ALL movl ES(%esp), %edi # get the function address
call do_IRQ movl %eax, ORIG_EAX(%esp)
jmp ret_from_intr movl %ecx, ES(%esp)
movl %esp, %edx
#define BUILD_INTERRUPT(name, nr) \ pushl %esi # push the error code
ENTRY(name) \ pushl %edx # push the pt_regs pointer
pushl $nr-256; \ movl $(__USER_DS), %edx
SAVE_ALL \ movl %edx, %ds
call smp_/**/name; \ movl %edx, %es
jmp ret_from_intr; call *%edi
addl $8, %esp
jmp ret_from_exception
out:
desc->handler->end(irq);
spin_unlock(&desc->lock);
irq_exit();
return 1;
}
vector=0 ENTRY(divide_error)
ENTRY(irq_entries_start) pushl $0 # no error code
.rept NR_IRQS pushl $do_divide_error
ALIGN ALIGN
error_code:
1: pushl $vector-256
pushl %ds
jmp common_interrupt
pushl %eax
.data
xorl %eax, %eax
.long 1b
pushl %edx
.text
decl %eax # eax = -1
vector=vector+1
pushl %ecx
.endr
pushl %ebx
cld
ALIGN movl %es, %ecx
common_interrupt: movl ORIG_EAX(%esp), %esi # get the error code
SAVE_ALL movl ES(%esp), %edi # get the function address
call do_IRQ movl %eax, ORIG_EAX(%esp)
jmp ret_from_intr movl %ecx, ES(%esp)
movl %esp, %edx
#define BUILD_INTERRUPT(name, nr) \ pushl %esi # push the error code
ENTRY(name) \ pushl %edx # push the pt_regs pointer
pushl $nr-256; \ movl $(__USER_DS), %edx
SAVE_ALL \ movl %edx, %ds
call smp_/**/name; \ movl %edx, %es
jmp ret_from_intr; call *%edi
addl $8, %esp
jmp ret_from_exception
out:
desc->handler->end(irq);
spin_unlock(&desc->lock);
irq_exit();
return 1;
}
/* This contains the irq mask for both 8259A irq controllers, */
unsigned int cached_irq_mask = 0xffff;
spin_unlock_irqrestore(&i8259A_lock, flags);
}
spin_lock_irqsave(&i8259A_lock, flags);
if (cached_irq_mask & irqmask) // 判斷是否指定的 IRQ line 已經被 mask
// 8259A 在 IMR Reg 中相應位置被設為 1 情況下
// 仍向 CPU 發出相應的中斷信號 , 因此是ㄧ個假中斷
goto spurious_8259A_irq;
cached_irq_mask |= irqmask;
handle_real_irq:
if (irq & 8) { // slave
inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
// mask 此 IRQ line
outb(cached_slave_mask, PIC_SLAVE_IMR);
// 寫入 0x60+(irq&7) 'Specific EOI' 操作 slave IRQ (irq&7)
outb(0x60+(irq&7), PIC_SLAVE_CMD); /* 'Specific EOI' to slave */
// 再寫入 0x60+PIC_CASCADE_IR 'Specific EOI' 操作 master IRQ2
outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
} else { // master
inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
}
spin_unlock_irqrestore(&i8259A_lock, flags);
return;
Chapter 4 Interrupts and Exceptions 77
spurious_8259A_irq:
/** this is the slow path - should happen rarely. */
if (i8259A_irq_real(irq))
/*
* oops, the IRQ _is_ in service according to the
* 8259A - not spurious, go handle it.
*/
goto handle_real_irq;
{
static int spurious_irq_mask;
/*
* At this point we can be sure the IRQ is spurious,
* lets ACK and report it. [once per IRQ]
*/
atomic_inc(&irq_err_count); // 累加 irq_err_count
/*
* Theoretically we do not have to handle this IRQ,
* but in Linux this does not cause problems and is
* simpler for us.
*/
// 在 Linux 中 , 按照處理真實 IRQ 方式處理 spurous IRQ 不會有問題
goto handle_real_irq;
}
}
Chapter 4 Interrupts and Exceptions 78
/*
* This function assumes to be called rarely. Switching between
* 8259A registers is slow.
* This has to be protected by the irq controller spinlock
* before being called.
*/
static inline int i8259A_irq_real(unsigned int irq)
{
int value;
int irqmask = 1<<irq;
#ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
// release lock
spin_unlock_irqrestore(&desc->lock, flags);
}
spin_lock (&rtc_lock);
rtc_irq_data += 0x100;
rtc_irq_data &= ~0xff;
if (is_hpet_enabled()) {
rtc_irq_data |= (unsigned long)irq & 0xF0;
} else {
rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0);
}
spin_lock(&rtc_task_lock);
if (rtc_callback) rtc_callback->func(rtc_callback->private_data);
spin_unlock(&rtc_task_lock);
wake_up_interruptible(&rtc_wait);
kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
return IRQ_HANDLED;
}
8042 chip
SR(0x 64)
IR(0x 60)
CR(0x
64)
0R(0x
60)
i8042_aux_values.irq = I8042_AUX_IRQ;//12
i8042_kbd_values.irq = I8042_KBD_IRQ;//1
if (!i8042_noaux && !i8042_check_aux(&i8042_aux_values)) {
//check if aux is available
if (!i8042_nomux && !i8042_check_mux(&i8042_aux_values)){
//check if mux is avalilable
for (i = 0; i < 4; i++) {
i8042_init_mux_values(i8042_mux_values + i, i8042_mux_port + i, i);
i8042_port_register(i8042_mux_values + i, i8042_mux_port + i);
}
}else{
i8042_port_register(&i8042_aux_values, &i8042_aux_port);
}
}
i8042_port_register(&i8042_kbd_values, &i8042_kbd_port);
}
if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) {
//enable mouse or keyboard
printk(KERN_WARNING "i8042.c: Can't write CTR while registering.\n");
values->exists = 0;
return -1;
}
serio_register_port(port);
return 0;
}
…(next page)
irq_ret:
ret = 1;
return PSMOUSE_FULL_PACKET;
}
No hard and fast rules exist about what work to perform where
Research work needed
kstat_this_cpu.irqs[irq]++;
if (!(action->flags & SA_INTERRUPT))
spin_lock(&desc->lock); local_irq_enable();
desc->handler->ack(irq);
status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); do {
status |= IRQ_PENDING; /* we _want_ to handle it */ status |= action->flags;
for (;;) {
retval |= action->handler(irq,
irqreturn_t action_ret; action->dev_id, regs);
spin_unlock(&desc->lock); action = action->next;
… } while (action);
action_ret = handle_IRQ_event(irq, ®s, action);
…
spin_lock(&desc->lock);
if (status & SA_SAMPLE_RANDOM)
desc->status &= ~IRQ_PENDING; add_interrupt_randomness(irq);
} local_irq_disable();
desc->status &= ~IRQ_INPROGRESS; return retval;
}
out:
desc->handler->end(irq);
spin_unlock(&desc->lock);
irq_exit();
return 1;
}
struct tasklet_struct
{
struct tasklet_struct *next; // next tasklet in the list
unsigned long state; // state of the tasklet
atomic_t count; // reference counter: 0 == enabled, !0 = disabled
void (*func)(unsigned long); // handler function
unsigned long data; // args to handler function
};
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
local_irq_save(flags);
t->next = __get_cpu_var(tasklet_vec).list;
__get_cpu_var(tasklet_vec).list = t;
raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_restore(flags);
}
#ifdef CONFIG_SMP
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN,
&(t)->state);
}
#else
#define tasklet_trylock(t) 1
#endif
struct workqueue_struct {
struct cpu_workqueue_struct cpu_wq[NR_CPUS];
const char *name;
struct list_head list; /* Empty if single thread */
};
spinlock_t lock;
if (!list_empty(&cwq->worklist))
/* Block and flush all signals */
sigfillset(&blocked); run_workqueue(cwq);
sigprocmask(SIG_BLOCK, &blocked, NULL); set_current_state(TASK_INTERRUPTI
BLE);
flush_signals(current); }
__set_current_state(TASK_RUNNING);
/* SIG_IGN makes children autoreap: see do return 0;
_notify_parent(). */
}
sa.sa.sa_handler = SIG_IGN;
sa.sa.sa_flags = 0;
siginitset(&sa.sa.sa_mask, sigmask(SIGCHL
D));
do_sigaction(SIGCHLD, &sa, (struct k_sigact
ion *)0);
struct __wait_queue {
unsigned int flags;
#define WQ_FLAG_EXCLUSIVE 0x01
struct task_struct * task;
wait_queue_func_t func;
struct list_head task_list;
};
#define local_bh_disable() \
do { preempt_count() += SOFTIRQ_OFFSET; \ barrier(); } while (0)
void local_bh_enable(void)
{
__local_bh_enable();
if (unlikely(!in_interrupt() && local_softirq_pending()))
invoke_softirq();
}
ret_from_intr:
schedule_tail()
ret_from_sys_call:
Nested Kernel no System yes
control paths? call tracing? tracesys_exit:
yes no
syscall_trace()
Virtual no
reschedule:
v86 mode? Need yes
schedule()
yes reschedule?
Restore
Chapterhardware context
4 Interrupts and Exceptions 176
Returning from Interrupt
Return from an interrupt path is
much more complicated than the
entry path
It is a good place to do other tasks,
unrelated to the interrupt, but need
to done fairly frequently
These include checking for pending
signals or if a reschedule is needed
Chapter 4 Interrupts and Exceptions 177
General Implementation Issue
Number of kernel control paths being
concurrenly executed
Pending process switch requests
Pending signals