0
|
1 // Mutual exclusion spin locks.
|
|
2
|
|
3 #include "types.h"
|
|
4 #include "defs.h"
|
|
5 #include "param.h"
|
|
6 #include "arm.h"
|
|
7 #include "memlayout.h"
|
|
8 #include "mmu.h"
|
|
9 #include "proc.h"
|
|
10 #include "spinlock.h"
|
|
11
|
52
|
12 #define __ncode __code
|
|
13 #
|
0
|
14 void initlock(struct spinlock *lk, char *name)
|
|
15 {
|
|
16 lk->name = name;
|
|
17 lk->locked = 0;
|
|
18 lk->cpu = 0;
|
|
19 }
|
|
20
|
|
21 // For single CPU systems, there is no need for spinlock.
|
|
22 // Add the support when multi-processor is supported.
|
|
23
|
|
24
|
|
25 // Acquire the lock.
|
|
26 // Loops (spins) until the lock is acquired.
|
|
27 // Holding a lock for a long time may cause
|
|
28 // other CPUs to waste time spinning to acquire it.
|
|
29 void acquire(struct spinlock *lk)
|
|
30 {
|
|
31 pushcli(); // disable interrupts to avoid deadlock.
|
|
32 lk->locked = 1; // set the lock status to make the kernel happy
|
|
33
|
|
34 #if 0
|
|
35 if(holding(lk))
|
|
36 panic("acquire");
|
|
37
|
|
38 // The xchg is atomic.
|
|
39 // It also serializes, so that reads after acquire are not
|
|
40 // reordered before it.
|
|
41 while(xchg(&lk->locked, 1) != 0)
|
|
42 ;
|
|
43
|
|
44 // Record info about lock acquisition for debugging.
|
|
45 lk->cpu = cpu;
|
|
46 getcallerpcs(get_fp(), lk->pcs);
|
|
47
|
|
48 #endif
|
|
49 }
|
|
50
|
37
|
51 /*
|
35
|
52 void cbc_acquire(struct spinlock *lk, __code (*next)(int ret))
|
|
53 {
|
|
54 pushcli(); // disable interrupts to avoid deadlock.
|
|
55 lk->locked = 1; // set the lock status to make the kernel happy
|
|
56
|
|
57 #if 0
|
|
58 if(holding(lk))
|
|
59 panic("acquire");
|
|
60
|
|
61 // The xchg is atomic.
|
|
62 // It also serializes, so that reads after acquire are not
|
|
63 // reordered before it.
|
|
64 while(xchg(&lk->locked, 1) != 0)
|
|
65 ;
|
|
66
|
|
67 // Record info about lock acquisition for debugging.
|
|
68 lk->cpu = cpu;
|
|
69 getcallerpcs(get_fp(), lk->pcs);
|
|
70
|
|
71 #endif
|
|
72 goto next();
|
|
73 }
|
37
|
74 */
|
35
|
75
|
37
|
76 /*
|
0
|
77 // Release the lock.
|
35
|
78 void cbc_release(struct spinlock *lk, __code (*next)(int ret))
|
|
79 {
|
|
80 #if 0
|
|
81 if(!holding(lk))
|
|
82 panic("release");
|
|
83
|
|
84 lk->pcs[0] = 0;
|
|
85 lk->cpu = 0;
|
|
86
|
|
87 // The xchg serializes, so that reads before release are
|
|
88 // not reordered after it. The 1996 PentiumPro manual (Volume 3,
|
|
89 // 7.2) says reads can be carried out speculatively and in
|
|
90 // any order, which implies we need to serialize here.
|
|
91 // But the 2007 Intel 64 Architecture Memory Ordering White
|
|
92 // Paper says that Intel 64 and IA-32 will not move a load
|
|
93 // after a store. So lock->locked = 0 would work here.
|
|
94 // The xchg being asm volatile ensures gcc emits it after
|
|
95 // the above assignments (and after the critical section).
|
|
96 xchg(&lk->locked, 0);
|
|
97 #endif
|
|
98
|
|
99 lk->locked = 0; // set the lock state to keep the kernel happy
|
|
100 popcli();
|
|
101 goto next();
|
|
102 }
|
37
|
103 */
|
35
|
104
|
0
|
105 void release(struct spinlock *lk)
|
|
106 {
|
|
107 #if 0
|
|
108 if(!holding(lk))
|
|
109 panic("release");
|
|
110
|
|
111 lk->pcs[0] = 0;
|
|
112 lk->cpu = 0;
|
|
113
|
|
114 // The xchg serializes, so that reads before release are
|
|
115 // not reordered after it. The 1996 PentiumPro manual (Volume 3,
|
|
116 // 7.2) says reads can be carried out speculatively and in
|
|
117 // any order, which implies we need to serialize here.
|
|
118 // But the 2007 Intel 64 Architecture Memory Ordering White
|
|
119 // Paper says that Intel 64 and IA-32 will not move a load
|
|
120 // after a store. So lock->locked = 0 would work here.
|
|
121 // The xchg being asm volatile ensures gcc emits it after
|
|
122 // the above assignments (and after the critical section).
|
|
123 xchg(&lk->locked, 0);
|
|
124 #endif
|
|
125
|
|
126 lk->locked = 0; // set the lock state to keep the kernel happy
|
|
127 popcli();
|
|
128 }
|
|
129
|
|
130
|
|
131 // Check whether this cpu is holding the lock.
|
|
132 int holding(struct spinlock *lock)
|
|
133 {
|
|
134 return lock->locked; // && lock->cpu == cpus;
|
|
135 }
|
|
136
|