summaryrefslogtreecommitdiff
path: root/release/src/linux/linux/include/asm-i386/locks.h
blob: ffcab0afb658459c733aab2c3ac1a40be9269203 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
/*
 *	SMP locks primitives for building ix86 locks
 *	(not yet used).
 *
 *		Alan Cox, alan@redhat.com, 1995
 */
 
/*
 *	This would be much easier but far less clear and easy
 *	to borrow for other processors if it was just assembler.
 */

static __inline__ void prim_spin_lock(struct spinlock *sp)
{
	int processor=smp_processor_id();
	
	/*
	 *	Grab the lock bit
	 */
	 
	while(lock_set_bit(0,&sp->lock))
	{
		/*
		 *	Failed, but that's cos we own it!
		 */
		 
		if(sp->cpu==processor)
		{
			sp->users++;
			return 0;
		}
		/*
		 *	Spin in the cache S state if possible
		 */
		while(sp->lock)
		{
			/*
			 *	Wait for any invalidates to go off
			 */
			 
			if(smp_invalidate_needed&(1<<processor))
				while(lock_clear_bit(processor,&smp_invalidate_needed))
					local_flush_tlb();
			sp->spins++;
		}
		/*
		 *	Someone wrote the line, we go 'I' and get
		 *	the cache entry. Now try to regrab
		 */
	}
	sp->users++;sp->cpu=processor;
	return 1;
}

/*
 *	Release a spin lock
 */
 
static __inline__ int prim_spin_unlock(struct spinlock *sp)
{
	/* This is safe. The decrement is still guarded by the lock. A multilock would
	   not be safe this way */
	if(!--sp->users)
	{
		sp->cpu= NO_PROC_ID;lock_clear_bit(0,&sp->lock);
		return 1;
	}
	return 0;
}


/*
 *	Non blocking lock grab
 */
 
static __inline__ int prim_spin_lock_nb(struct spinlock *sp)
{
	if(lock_set_bit(0,&sp->lock))
		return 0;		/* Locked already */
	sp->users++;
	return 1;			/* We got the lock */
}


/*
 *	These wrap the locking primitives up for usage
 */
 
static __inline__ void spinlock(struct spinlock *sp)
{
	if(sp->priority<current->lock_order)
		panic("lock order violation: %s (%d)\n", sp->name, current->lock_order);
	if(prim_spin_lock(sp))
	{
		/*
		 *	We got a new lock. Update the priority chain
		 */
		sp->oldpri=current->lock_order;
		current->lock_order=sp->priority;
	}
}

static __inline__ void spinunlock(struct spinlock *sp)
{
	int pri;
	if(current->lock_order!=sp->priority)
		panic("lock release order violation %s (%d)\n", sp->name, current->lock_order);
	pri=sp->oldpri;
	if(prim_spin_unlock(sp))
	{
		/*
		 *	Update the debugging lock priority chain. We dumped
		 *	our last right to the lock.
		 */
		current->lock_order=sp->pri;
	}	
}

static __inline__ void spintestlock(struct spinlock *sp)
{
	/*
	 *	We do no sanity checks, it's legal to optimistically
	 *	get a lower lock.
	 */
	prim_spin_lock_nb(sp);
}

static __inline__ void spintestunlock(struct spinlock *sp)
{
	/*
	 *	A testlock doesn't update the lock chain so we
	 *	must not update it on free
	 */
	prim_spin_unlock(sp);
}