spinlock.h
上传用户:jlfgdled
上传日期:2013-04-10
资源大小:33168k
文件大小:4k
- /*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999, 2000 by Ralf Baechle
- * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- */
- #ifndef _ASM_SPINLOCK_H
- #define _ASM_SPINLOCK_H
- /*
- * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
- typedef struct {
- volatile unsigned int lock;
- } spinlock_t;
- #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
- #define spin_lock_init(x) do { (x)->lock = 0; } while(0)
- #define spin_is_locked(x) ((x)->lock != 0)
- #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
- /*
- * Simple spin lock operations. There are two variants, one clears IRQ's
- * on the local processor, one does not.
- *
- * We make no fairness assumptions. They have a cost.
- */
- static inline void spin_lock(spinlock_t *lock)
- {
- unsigned int tmp;
- __asm__ __volatile__(
- ".settnoreorderttt# spin_lockn"
- "1:tllt%1, %2nt"
- "bnezt%1, 1bnt"
- " lit%1, 1nt"
- "sct%1, %0nt"
- "beqzt%1, 1bnt"
- " syncnt"
- ".settreorder"
- : "=m" (lock->lock), "=&r" (tmp)
- : "m" (lock->lock)
- : "memory");
- }
- static inline void spin_unlock(spinlock_t *lock)
- {
- __asm__ __volatile__(
- ".settnoreorderttt# spin_unlocknt"
- "syncnt"
- "swt$0, %0nt"
- ".settreorder"
- : "=m" (lock->lock)
- : "m" (lock->lock)
- : "memory");
- }
- static inline unsigned int spin_trylock(spinlock_t *lock)
- {
- unsigned int temp, res;
- __asm__ __volatile__(
- ".settnoreorderttt# spin_trylocknt"
- "1:tllt%0, %1nt"
- "ort%2, %0, %3nt"
- "sct%2, %1nt"
- "beqzt%2, 1bnt"
- " andt%2, %0, %3nt"
- ".settreorder"
- : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
- : "r" (1), "m" (lock->lock)
- : "memory");
- return res == 0;
- }
- /*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- *
- * NOTE! it is quite common to have readers in interrupts but no interrupt
- * writers. For those circumstances we can "mix" irq-safe locks - any writer
- * needs to get a irq-safe write-lock, but readers can get non-irqsafe
- * read-locks.
- */
- typedef struct {
- volatile unsigned int lock;
- } rwlock_t;
- #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
- #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
- static inline void read_lock(rwlock_t *rw)
- {
- unsigned int tmp;
- __asm__ __volatile__(
- ".settnoreorderttt# read_lockn"
- "1:tllt%1, %2nt"
- "bltzt%1, 1bnt"
- " addut%1, 1nt"
- "sct%1, %0nt"
- "beqzt%1, 1bnt"
- " syncnt"
- ".settreorder"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
- : "memory");
- }
- /* Note the use of sub, not subu which will make the kernel die with an
- overflow exception if we ever try to unlock an rwlock that is already
- unlocked or is being held by a writer. */
- static inline void read_unlock(rwlock_t *rw)
- {
- unsigned int tmp;
- __asm__ __volatile__(
- ".settnoreorderttt# read_unlockn"
- "1:tllt%1, %2nt"
- "subt%1, 1nt"
- "sct%1, %0nt"
- "beqzt%1, 1bnt"
- " syncnt"
- ".settreorder"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
- : "memory");
- }
- static inline void write_lock(rwlock_t *rw)
- {
- unsigned int tmp;
- __asm__ __volatile__(
- ".settnoreorderttt# write_lockn"
- "1:tllt%1, %2nt"
- "bnezt%1, 1bnt"
- " luit%1, 0x8000nt"
- "sct%1, %0nt"
- "beqzt%1, 1bnt"
- " syncnt"
- ".settreorder"
- : "=m" (rw->lock), "=&r" (tmp)
- : "m" (rw->lock)
- : "memory");
- }
- static inline void write_unlock(rwlock_t *rw)
- {
- __asm__ __volatile__(
- ".settnoreorderttt# write_unlocknt"
- "syncnt"
- "swt$0, %0nt"
- ".settreorder"
- : "=m" (rw->lock)
- : "m" (rw->lock)
- : "memory");
- }
- #endif /* _ASM_SPINLOCK_H */