00001 // Mutual exclusion spin locks. 00002 00003 #include "types.h" 00004 #include "defs.h" 00005 #include "param.h" 00006 #include "x86.h" 00007 #include "mmu.h" 00008 #include "proc.h" 00009 #include "spinlock.h" 00010 00011 void 00012 initlock(struct spinlock *lk, char *name) 00013 { 00014 lk->name = name; 00015 lk->locked = 0; 00016 lk->cpu = 0; 00017 } 00018 00019 // Acquire the lock. 00020 // Loops (spins) until the lock is acquired. 00021 // Holding a lock for a long time may cause 00022 // other CPUs to waste time spinning to acquire it. 00023 void 00024 acquire(struct spinlock *lk) 00025 { 00026 pushcli(); 00027 if(holding(lk)) 00028 panic("acquire"); 00029 00030 // The xchg is atomic. 00031 // It also serializes, so that reads after acquire are not 00032 // reordered before it. 00033 while(xchg(&lk->locked, 1) != 0) 00034 ; 00035 00036 // Record info about lock acquisition for debugging. 00037 lk->cpu = cpu; 00038 getcallerpcs(&lk, lk->pcs); 00039 } 00040 00041 // Release the lock. 00042 void 00043 release(struct spinlock *lk) 00044 { 00045 if(!holding(lk)) 00046 panic("release"); 00047 00048 lk->pcs[0] = 0; 00049 lk->cpu = 0; 00050 00051 // The xchg serializes, so that reads before release are 00052 // not reordered after it. The 1996 PentiumPro manual (Volume 3, 00053 // 7.2) says reads can be carried out speculatively and in 00054 // any order, which implies we need to serialize here. 00055 // But the 2007 Intel 64 Architecture Memory Ordering White 00056 // Paper says that Intel 64 and IA-32 will not move a load 00057 // after a store. So lock->locked = 0 would work here. 00058 // The xchg being asm volatile ensures gcc emits it after 00059 // the above assignments (and after the critical section). 00060 xchg(&lk->locked, 0); 00061 00062 popcli(); 00063 } 00064 00065 // Record the current call stack in pcs[] by following the %ebp chain. 00066 void 00067 getcallerpcs(void *v, uint pcs[]) 00068 { 00069 uint *ebp; 00070 int i; 00071 00072 ebp = (uint*)v - 2; 00073 for(i = 0; i < 10; i++){ 00074 if(ebp == 0 || ebp == (uint*)0xffffffff) 00075 break; 00076 pcs[i] = ebp[1]; // saved %eip 00077 ebp = (uint*)ebp[0]; // saved %ebp 00078 } 00079 for(; i < 10; i++) 00080 pcs[i] = 0; 00081 } 00082 00083 // Check whether this cpu is holding the lock. 00084 int 00085 holding(struct spinlock *lock) 00086 { 00087 return lock->locked && lock->cpu == cpu; 00088 } 00089 00090 00091 // Pushcli/popcli are like cli/sti except that they are matched: 00092 // it takes two popcli to undo two pushcli. Also, if interrupts 00093 // are off, then pushcli, popcli leaves them off. 00094 00095 void 00096 pushcli(void) 00097 { 00098 int eflags; 00099 00100 eflags = readeflags(); 00101 cli(); 00102 if(cpu->ncli++ == 0) 00103 cpu->intena = eflags & FL_IF; 00104 } 00105 00106 void 00107 popcli(void) 00108 { 00109 if(readeflags()&FL_IF) 00110 panic("popcli - interruptible"); 00111 if(--cpu->ncli < 0) 00112 panic("popcli"); 00113 if(cpu->ncli == 0 && cpu->intena) 00114 sti(); 00115 } 00116
1.5.6