semaphore-helper.h
上传用户:lgb322
上传日期:2013-02-24
资源大小:30529k
文件大小:4k
- /*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * Copyright (C) 1996 Linus Torvalds
- * Copyright (C) 1999 Andrea Arcangeli
- * Copyright (C) 1999 Ralf Baechle
- * Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) 2000 MIPS Technologies, Inc.
- */
- #ifndef _ASM_SEMAPHORE_HELPER_H
- #define _ASM_SEMAPHORE_HELPER_H
- #include <linux/config.h>
- #define sem_read(a) ((a)->counter)
- #define sem_inc(a) (((a)->counter)++)
- #define sem_dec(a) (((a)->counter)--)
- /*
- * These two _must_ execute atomically wrt each other.
- */
- static inline void wake_one_more(struct semaphore * sem)
- {
- atomic_inc(&sem->waking);
- }
- #ifdef CONFIG_CPU_HAS_LLSC
- static inline int
- waking_non_zero(struct semaphore *sem)
- {
- int ret, tmp;
- __asm__ __volatile__(
- "1:tllt%1, %2nt"
- "blezt%1, 2fnt"
- "subut%0, %1, 1nt"
- "sct%0, %2nt"
- "beqzt%0, 1bnt"
- "2:"
- : "=r" (ret), "=r" (tmp), "=m" (sem->waking)
- : "0"(0));
- return ret;
- }
- #else /* !CONFIG_CPU_HAS_LLSC */
- /*
- * It doesn't make sense, IMHO, to endlessly turn interrupts off and on again.
- * Do it once and that's it. ll/sc *has* it's advantages. HK
- */
- static inline int waking_non_zero(struct semaphore *sem)
- {
- unsigned long flags;
- int ret = 0;
- save_and_cli(flags);
- if (sem_read(&sem->waking) > 0) {
- sem_dec(&sem->waking);
- ret = 1;
- }
- restore_flags(flags);
- return ret;
- }
- #endif /* !CONFIG_CPU_HAS_LLSC */
- #ifdef CONFIG_CPU_HAS_LLDSCD
- /*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible decrement
- * simultaneously and atomicly with the sem->waking adjustment,
- * otherwise we can race with wake_one_more.
- *
- * This is accomplished by doing a 64-bit ll/sc on the 2 32-bit words.
- *
- * This is crazy. Normally it stricly forbidden to use 64-bit operations
- * in the 32-bit MIPS kernel. In this case it's however ok because if an
- * interrupt has destroyed the upper half of registers sc will fail.
- * Note also that this will not work for MIPS32 CPUS!
- *
- * Pseudocode:
- *
- * If(sem->waking > 0) {
- * Decrement(sem->waking)
- * Return(SUCCESS)
- * } else If(segnal_pending(tsk)) {
- * Increment(sem->count)
- * Return(-EINTR)
- * } else {
- * Return(SLEEP)
- * }
- */
- static inline int
- waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
- {
- long ret, tmp;
- __asm__ __volatile__(
- ".settpushnt"
- ".settmips3nt"
- ".settnoatn"
- "0:tlldt%1, %2nt"
- "lit%0, 0nt"
- "sllt$1, %1, 0nt"
- "blezt$1, 1fnt"
- "daddiut%1, %1, -1nt"
- "lit%0, 1nt"
- "bt2fn"
- "1:tbeqzt%3, 2fnt"
- "lit%0, %4nt"
- "dlit$1, 0x0000000100000000nt"
- "daddut%1, %1, $1n"
- "2:tscdt%1, %2nt"
- "beqzt%1, 0bnt"
- ".settpop"
- : "=&r" (ret), "=&r" (tmp), "=m" (*sem)
- : "r" (signal_pending(tsk)), "i" (-EINTR));
- return ret;
- }
- /*
- * waking_non_zero_trylock is unused. we do everything in
- * down_trylock and let non-ll/sc hosts bounce around.
- */
- static inline int
- waking_non_zero_trylock(struct semaphore *sem)
- {
- #if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
- #endif
- return 0;
- }
- #else /* !CONFIG_CPU_HAS_LLDSCD */
- static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
- {
- int ret = 0;
- unsigned long flags;
- save_and_cli(flags);
- if (sem_read(&sem->waking) > 0) {
- sem_dec(&sem->waking);
- ret = 1;
- } else if (signal_pending(tsk)) {
- sem_inc(&sem->count);
- ret = -EINTR;
- }
- restore_flags(flags);
- return ret;
- }
- static inline int waking_non_zero_trylock(struct semaphore *sem)
- {
- int ret = 1;
- unsigned long flags;
- save_and_cli(flags);
- if (sem_read(&sem->waking) <= 0)
- sem_inc(&sem->count);
- else {
- sem_dec(&sem->waking);
- ret = 0;
- }
- restore_flags(flags);
- return ret;
- }
- #endif /* !CONFIG_CPU_HAS_LLDSCD */
- #endif /* _ASM_SEMAPHORE_HELPER_H */