1 | /* $Id: tqueue.h,v 1.1.1.1 2003/07/02 13:57:02 eleph Exp $ */
|
---|
2 |
|
---|
3 | /*
|
---|
4 | * tqueue.h --- task queue handling for Linux.
|
---|
5 | *
|
---|
6 | * Mostly based on a proposed bottom-half replacement code written by
|
---|
7 | * Kai Petzke, wpp@marie.physik.tu-berlin.de.
|
---|
8 | *
|
---|
9 | * Modified for use in the Linux kernel by Theodore Ts'o,
|
---|
10 | * tytso@mit.edu. Any bugs are my fault, not Kai's.
|
---|
11 | *
|
---|
12 | * The original comment follows below.
|
---|
13 | */
|
---|
14 |
|
---|
15 | #ifndef _LINUX_TQUEUE_H
|
---|
16 | #define _LINUX_TQUEUE_H
|
---|
17 |
|
---|
18 | #include <linux/spinlock.h>
|
---|
19 | #include <asm/bitops.h>
|
---|
20 | //#include <asm/system.h>
|
---|
21 |
|
---|
22 | /*
|
---|
23 | * New proposed "bottom half" handlers:
|
---|
24 | * (C) 1994 Kai Petzke, wpp@marie.physik.tu-berlin.de
|
---|
25 | *
|
---|
26 | * Advantages:
|
---|
27 | * - Bottom halfs are implemented as a linked list. You can have as many
|
---|
28 | * of them, as you want.
|
---|
29 | * - No more scanning of a bit field is required upon call of a bottom half.
|
---|
30 | * - Support for chained bottom half lists. The run_task_queue() function can be
|
---|
31 | * used as a bottom half handler. This is for example useful for bottom
|
---|
32 | * halfs, which want to be delayed until the next clock tick.
|
---|
33 | *
|
---|
34 | * Problems:
|
---|
35 | * - The queue_task_irq() inline function is only atomic with respect to itself.
|
---|
36 | * Problems can occur, when queue_task_irq() is called from a normal system
|
---|
37 | * call, and an interrupt comes in. No problems occur, when queue_task_irq()
|
---|
38 | * is called from an interrupt or bottom half, and interrupted, as run_task_queue()
|
---|
39 | * will not be executed/continued before the last interrupt returns. If in
|
---|
40 | * doubt, use queue_task(), not queue_task_irq().
|
---|
41 | * - Bottom halfs are called in the reverse order that they were linked into
|
---|
42 | * the list.
|
---|
43 | */
|
---|
44 |
|
---|
45 | struct tq_struct {
|
---|
46 | struct tq_struct *next; /* linked list of active bh's */
|
---|
47 | unsigned long sync; /* must be initialized to zero */
|
---|
48 | void (*routine)(void *); /* function to call */
|
---|
49 | void *data; /* argument to function */
|
---|
50 | };
|
---|
51 |
|
---|
52 | typedef struct tq_struct * task_queue;
|
---|
53 |
|
---|
54 | #define DECLARE_TASK_QUEUE(q) task_queue q = NULL
|
---|
55 |
|
---|
56 | extern task_queue tq_timer, tq_immediate, tq_scheduler, tq_disk;
|
---|
57 |
|
---|
58 | /*
|
---|
59 | * To implement your own list of active bottom halfs, use the following
|
---|
60 | * two definitions:
|
---|
61 | *
|
---|
62 | * struct tq_struct *my_bh = NULL;
|
---|
63 | * struct tq_struct run_my_bh = {
|
---|
64 | * 0, 0, (void (*)(void *)) run_task_queue, &my_bh
|
---|
65 | * };
|
---|
66 | *
|
---|
67 | * To activate a bottom half on your list, use:
|
---|
68 | *
|
---|
69 | * queue_task(tq_pointer, &my_bh);
|
---|
70 | *
|
---|
71 | * To run the bottom halfs on your list put them on the immediate list by:
|
---|
72 | *
|
---|
73 | * queue_task(&run_my_bh, &tq_immediate);
|
---|
74 | *
|
---|
75 | * This allows you to do deferred procession. For example, you could
|
---|
76 | * have a bottom half list tq_timer, which is marked active by the timer
|
---|
77 | * interrupt.
|
---|
78 | */
|
---|
79 |
|
---|
80 | extern spinlock_t tqueue_lock;
|
---|
81 |
|
---|
82 | /*
|
---|
83 | * queue_task
|
---|
84 | */
|
---|
85 | extern __inline__ void queue_task(struct tq_struct *bh_pointer,
|
---|
86 | task_queue *bh_list)
|
---|
87 | {
|
---|
88 | if (!test_and_set_bit(0,&bh_pointer->sync)) {
|
---|
89 | unsigned long flags;
|
---|
90 | spin_lock_irqsave(&tqueue_lock, flags);
|
---|
91 | bh_pointer->next = *bh_list;
|
---|
92 | *bh_list = bh_pointer;
|
---|
93 | spin_unlock_irqrestore(&tqueue_lock, flags);
|
---|
94 | }
|
---|
95 | }
|
---|
96 |
|
---|
97 | #define mb()
|
---|
98 | /*
|
---|
99 | * Call all "bottom halfs" on a given list.
|
---|
100 | */
|
---|
101 | extern __inline__ void run_task_queue(task_queue *list)
|
---|
102 | {
|
---|
103 | if (*list) {
|
---|
104 | unsigned long flags;
|
---|
105 | struct tq_struct *p;
|
---|
106 |
|
---|
107 | spin_lock_irqsave(&tqueue_lock, flags);
|
---|
108 | p = *list;
|
---|
109 | *list = NULL;
|
---|
110 | spin_unlock_irqrestore(&tqueue_lock, flags);
|
---|
111 |
|
---|
112 | while (p) {
|
---|
113 | void *arg;
|
---|
114 | void (*f) (void *);
|
---|
115 | struct tq_struct *save_p;
|
---|
116 | arg = p -> data;
|
---|
117 | f = p -> routine;
|
---|
118 | save_p = p;
|
---|
119 | p = p -> next;
|
---|
120 | mb();
|
---|
121 | save_p -> sync = 0;
|
---|
122 | (*f)(arg);
|
---|
123 | }
|
---|
124 | }
|
---|
125 | }
|
---|
126 |
|
---|
127 | #endif /* _LINUX_TQUEUE_H */
|
---|