summaryrefslogtreecommitdiff
path: root/release/src/linux/linux/include/linux/tqueue.h
blob: 4a730f0ad90c36dc4e0a2a48b89a78f29ca6fdf1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
/*
 * tqueue.h --- task queue handling for Linux.
 *
 * Mostly based on a proposed bottom-half replacement code written by
 * Kai Petzke, wpp@marie.physik.tu-berlin.de.
 *
 * Modified for use in the Linux kernel by Theodore Ts'o,
 * tytso@mit.edu.  Any bugs are my fault, not Kai's.
 *
 * The original comment follows below.
 */

#ifndef _LINUX_TQUEUE_H
#define _LINUX_TQUEUE_H

#include <linux/spinlock.h>
#include <linux/list.h>
#include <asm/bitops.h>
#include <asm/system.h>

/*
 * New proposed "bottom half" handlers:
 * (C) 1994 Kai Petzke, wpp@marie.physik.tu-berlin.de
 *
 * Advantages:
 * - Bottom halfs are implemented as a linked list.  You can have as many
 *   of them, as you want.
 * - No more scanning of a bit field is required upon call of a bottom half.
 * - Support for chained bottom half lists.  The run_task_queue() function can be
 *   used as a bottom half handler.  This is for example useful for bottom
 *   halfs, which want to be delayed until the next clock tick.
 *
 * Notes:
 * - Bottom halfs are called in the reverse order that they were linked into
 *   the list.
 */

struct tq_struct {
	struct list_head list;		/* linked list of active bh's */
	unsigned long sync;		/* must be initialized to zero */
	void (*routine)(void *);	/* function to call */
	void *data;			/* argument to function */
};

/*
 * Emit code to initialise a tq_struct's routine and data pointers
 */
#define PREPARE_TQUEUE(_tq, _routine, _data)			\
	do {							\
		(_tq)->routine = _routine;			\
		(_tq)->data = _data;				\
	} while (0)

/*
 * Emit code to initialise all of a tq_struct
 */
#define INIT_TQUEUE(_tq, _routine, _data)			\
	do {							\
		INIT_LIST_HEAD(&(_tq)->list);			\
		(_tq)->sync = 0;				\
		PREPARE_TQUEUE((_tq), (_routine), (_data));	\
	} while (0)

typedef struct list_head task_queue;

#define DECLARE_TASK_QUEUE(q)	LIST_HEAD(q)
#define TQ_ACTIVE(q)		(!list_empty(&q))

extern task_queue tq_timer, tq_immediate, tq_disk;

/*
 * To implement your own list of active bottom halfs, use the following
 * two definitions:
 *
 * DECLARE_TASK_QUEUE(my_tqueue);
 * struct tq_struct my_task = {
 * 	routine: (void (*)(void *)) my_routine,
 *	data: &my_data
 * };
 *
 * To activate a bottom half on a list, use:
 *
 *	queue_task(&my_task, &my_tqueue);
 *
 * To later run the queued tasks use
 *
 *	run_task_queue(&my_tqueue);
 *
 * This allows you to do deferred processing.  For example, you could
 * have a task queue called tq_timer, which is executed within the timer
 * interrupt.
 */

extern spinlock_t tqueue_lock;

/*
 * Queue a task on a tq.  Return non-zero if it was successfully
 * added.
 */
static inline int queue_task(struct tq_struct *bh_pointer, task_queue *bh_list)
{
	int ret = 0;
	if (!test_and_set_bit(0,&bh_pointer->sync)) {
		unsigned long flags;
		spin_lock_irqsave(&tqueue_lock, flags);
		list_add_tail(&bh_pointer->list, bh_list);
		spin_unlock_irqrestore(&tqueue_lock, flags);
		ret = 1;
	}
	return ret;
}

/*
 * Call all "bottom halfs" on a given list.
 */

extern void __run_task_queue(task_queue *list);

static inline void run_task_queue(task_queue *list)
{
	if (TQ_ACTIVE(*list))
		__run_task_queue(list);
}

#endif /* _LINUX_TQUEUE_H */