[32] | 1 | /* interrupt.h */
|
---|
| 2 | #ifndef _LINUX_INTERRUPT_H
|
---|
| 3 | #define _LINUX_INTERRUPT_H
|
---|
| 4 |
|
---|
| 5 | #include <linux/kernel.h>
|
---|
[679] | 6 | #include <linux/bitops.h>
|
---|
[32] | 7 | #include <asm/atomic.h>
|
---|
[679] | 8 | #include <asm/hardirq.h>
|
---|
| 9 | #include <linux/workqueue.h>
|
---|
[32] | 10 |
|
---|
[438] | 11 | /*
|
---|
| 12 | * For 2.4.x compatibility, 2.4.x can use
|
---|
| 13 | *
|
---|
| 14 | * typedef void irqreturn_t;
|
---|
| 15 | * #define IRQ_NONE
|
---|
| 16 | * #define IRQ_HANDLED
|
---|
| 17 | * #define IRQ_RETVAL(x)
|
---|
| 18 | *
|
---|
| 19 | * To mix old-style and new-style irq handler returns.
|
---|
| 20 | *
|
---|
| 21 | * IRQ_NONE means we didn't handle it.
|
---|
| 22 | * IRQ_HANDLED means that we did have a valid interrupt and handled it.
|
---|
| 23 | * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
|
---|
| 24 | */
|
---|
| 25 | typedef int irqreturn_t;
|
---|
| 26 |
|
---|
| 27 | #define IRQ_NONE (0)
|
---|
| 28 | #define IRQ_HANDLED (1)
|
---|
| 29 | #define IRQ_RETVAL(x) ((x) != 0)
|
---|
| 30 |
|
---|
[441] | 31 | #include <linux/signal.h>
|
---|
| 32 | #define IRQF_SHARED SA_SHIRQ
|
---|
| 33 | #define IRQF_DISABLED SA_INTERRUPT
|
---|
| 34 | #define IRQF_SAMPLE_RANDOM SA_SAMPLE_RANDOM
|
---|
| 35 | #define IRQF_PERCPU SA_PERCPU
|
---|
| 36 | #ifdef SA_PROBEIRQ
|
---|
| 37 | #define IRQF_PROBE_SHARED SA_PROBEIRQ
|
---|
| 38 | #else
|
---|
| 39 | #define IRQF_PROBE_SHARED 0 /* dummy */
|
---|
| 40 | #endif
|
---|
| 41 |
|
---|
[32] | 42 | struct irqaction {
|
---|
| 43 | void (*handler)(int, void *, struct pt_regs *);
|
---|
| 44 | unsigned long flags;
|
---|
| 45 | unsigned long mask;
|
---|
| 46 | const char *name;
|
---|
| 47 | void *dev_id;
|
---|
| 48 | struct irqaction *next;
|
---|
| 49 | };
|
---|
| 50 |
|
---|
| 51 | extern volatile unsigned char bh_running;
|
---|
| 52 |
|
---|
| 53 | extern atomic_t bh_mask_count[32];
|
---|
| 54 | extern unsigned long bh_active;
|
---|
| 55 | extern unsigned long bh_mask;
|
---|
| 56 | extern void (*bh_base[32])(void);
|
---|
| 57 |
|
---|
| 58 | void do_bottom_half(void);
|
---|
| 59 |
|
---|
| 60 | /* Who gets which entry in bh_base. Things which will occur most often
|
---|
| 61 | should come first - in which case NET should be up the top with SERIAL/TQUEUE! */
|
---|
| 62 |
|
---|
| 63 | enum {
|
---|
| 64 | TIMER_BH = 0,
|
---|
| 65 | CONSOLE_BH,
|
---|
| 66 | TQUEUE_BH,
|
---|
| 67 | DIGI_BH,
|
---|
| 68 | SERIAL_BH,
|
---|
| 69 | RISCOM8_BH,
|
---|
| 70 | SPECIALIX_BH,
|
---|
| 71 | AURORA_BH,
|
---|
| 72 | ESP_BH,
|
---|
| 73 | NET_BH,
|
---|
| 74 | SCSI_BH,
|
---|
| 75 | IMMEDIATE_BH,
|
---|
| 76 | KEYBOARD_BH,
|
---|
| 77 | CYCLADES_BH,
|
---|
| 78 | CM206_BH,
|
---|
| 79 | JS_BH,
|
---|
| 80 | MACSERIAL_BH,
|
---|
| 81 | ISICOM_BH
|
---|
| 82 | };
|
---|
[125] | 83 | /* Tasklets --- multithreaded analogue of BHs.
|
---|
[32] | 84 |
|
---|
[125] | 85 | Main feature differing them of generic softirqs: tasklet
|
---|
| 86 | is running only on one CPU simultaneously.
|
---|
| 87 |
|
---|
| 88 | Main feature differing them of BHs: different tasklets
|
---|
| 89 | may be run simultaneously on different CPUs.
|
---|
| 90 |
|
---|
| 91 | Properties:
|
---|
| 92 | * If tasklet_schedule() is called, then tasklet is guaranteed
|
---|
| 93 | to be executed on some cpu at least once after this.
|
---|
| 94 | * If the tasklet is already scheduled, but its excecution is still not
|
---|
| 95 | started, it will be executed only once.
|
---|
| 96 | * If this tasklet is already running on another CPU (or schedule is called
|
---|
| 97 | from tasklet itself), it is rescheduled for later.
|
---|
| 98 | * Tasklet is strictly serialized wrt itself, but not
|
---|
| 99 | wrt another tasklets. If client needs some intertask synchronization,
|
---|
| 100 | he makes it with spinlocks.
|
---|
| 101 | */
|
---|
| 102 |
|
---|
[305] | 103 | struct tasklet_struct {
|
---|
| 104 | struct tasklet_struct *next; /* linked list of active bh's */
|
---|
| 105 | unsigned long sync; /* must be initialized to zero */
|
---|
| 106 | void (*func)(void *); /* function to call */
|
---|
| 107 | void *data; /* argument to function */
|
---|
[125] | 108 | };
|
---|
| 109 |
|
---|
| 110 | extern void tasklet_hi_schedule(struct tasklet_struct *t);
|
---|
| 111 |
|
---|
[410] | 112 | #define tasklet_schedule tasklet_hi_schedule
|
---|
| 113 |
|
---|
[125] | 114 | extern void tasklet_init(struct tasklet_struct *t,
|
---|
| 115 | void (*func)(unsigned long), unsigned long data);
|
---|
| 116 |
|
---|
[32] | 117 | /*
|
---|
| 118 | * Autoprobing for irqs:
|
---|
| 119 | *
|
---|
| 120 | * probe_irq_on() and probe_irq_off() provide robust primitives
|
---|
| 121 | * for accurate IRQ probing during kernel initialization. They are
|
---|
| 122 | * reasonably simple to use, are not "fooled" by spurious interrupts,
|
---|
| 123 | * and, unlike other attempts at IRQ probing, they do not get hung on
|
---|
| 124 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
|
---|
| 125 | *
|
---|
| 126 | * For reasonably foolproof probing, use them as follows:
|
---|
| 127 | *
|
---|
| 128 | * 1. clear and/or mask the device's internal interrupt.
|
---|
| 129 | * 2. sti();
|
---|
| 130 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
|
---|
| 131 | * 4. enable the device and cause it to trigger an interrupt.
|
---|
| 132 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
|
---|
| 133 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
|
---|
| 134 | * 7. service the device to clear its pending interrupt.
|
---|
| 135 | * 8. loop again if paranoia is required.
|
---|
| 136 | *
|
---|
| 137 | * probe_irq_on() returns a mask of allocated irq's.
|
---|
| 138 | *
|
---|
| 139 | * probe_irq_off() takes the mask as a parameter,
|
---|
| 140 | * and returns the irq number which occurred,
|
---|
| 141 | * or zero if none occurred, or a negative irq number
|
---|
| 142 | * if more than one irq occurred.
|
---|
| 143 | */
|
---|
| 144 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */
|
---|
| 145 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
|
---|
| 146 |
|
---|
[441] | 147 | typedef irqreturn_t (*snd_irq_handler_t)(int, void *);
|
---|
| 148 | #define irq_handler_t snd_irq_handler_t
|
---|
| 149 | #undef irq_handler_t
|
---|
| 150 | #define irq_handler_t snd_irq_handler_t
|
---|
| 151 |
|
---|
| 152 | int request_irq(unsigned int, irq_handler_t handler,
|
---|
| 153 | unsigned long, const char *, void *);
|
---|
| 154 |
|
---|
[679] | 155 | static inline void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id) {}
|
---|
[717] | 156 |
|
---|
[772] | 157 | extern int __must_check
|
---|
| 158 | devm_request_threaded_irq(struct device *dev, unsigned int irq,
|
---|
| 159 | irq_handler_t handler, irq_handler_t thread_fn,
|
---|
| 160 | unsigned long irqflags, const char *devname,
|
---|
| 161 | void *dev_id);
|
---|
| 162 |
|
---|
| 163 | static inline int __must_check
|
---|
| 164 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
|
---|
| 165 | unsigned long irqflags, const char *devname, void *dev_id)
|
---|
| 166 | {
|
---|
| 167 | return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
|
---|
| 168 | devname, dev_id);
|
---|
| 169 | }
|
---|
[32] | 170 | #endif
|
---|