rwsem.h
上传用户:lgb322
上传日期:2013-02-24
资源大小:30529k
文件大小:6k
源码类别:

嵌入式Linux

开发平台:

Unix_Linux

  1. /* $Id: rwsem.h,v 1.5 2001/11/18 00:12:56 davem Exp $
  2.  * rwsem.h: R/W semaphores implemented using CAS
  3.  *
  4.  * Written by David S. Miller (davem@redhat.com), 2001.
  5.  * Derived from asm-i386/rwsem.h
  6.  */
  7. #ifndef _SPARC64_RWSEM_H
  8. #define _SPARC64_RWSEM_H
  9. #ifndef _LINUX_RWSEM_H
  10. #error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
  11. #endif
  12. #ifdef __KERNEL__
  13. #include <linux/list.h>
  14. #include <linux/spinlock.h>
  15. struct rwsem_waiter;
  16. extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
  17. extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
  18. extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
  19. struct rw_semaphore {
  20. signed int count;
  21. #define RWSEM_UNLOCKED_VALUE 0x00000000
  22. #define RWSEM_ACTIVE_BIAS 0x00000001
  23. #define RWSEM_ACTIVE_MASK 0x0000ffff
  24. #define RWSEM_WAITING_BIAS 0xffff0000
  25. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  26. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  27. spinlock_t wait_lock;
  28. struct list_head wait_list;
  29. };
  30. #define __RWSEM_INITIALIZER(name) 
  31. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
  32. #define DECLARE_RWSEM(name) 
  33. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  34. static inline void init_rwsem(struct rw_semaphore *sem)
  35. {
  36. sem->count = RWSEM_UNLOCKED_VALUE;
  37. spin_lock_init(&sem->wait_lock);
  38. INIT_LIST_HEAD(&sem->wait_list);
  39. }
  40. static inline void __down_read(struct rw_semaphore *sem)
  41. {
  42. __asm__ __volatile__(
  43. "! beginning __down_readn"
  44. "1:tlduw [%0], %%g5nt"
  45. "add %%g5, 1, %%g7nt"
  46. "cas [%0], %%g5, %%g7nt"
  47. "cmp %%g5, %%g7nt"
  48. "bne,pn %%icc, 1bnt"
  49. " add %%g7, 1, %%g7nt"
  50. "cmp %%g7, 0nt"
  51. "bl,pn %%icc, 3fnt"
  52. " membar #StoreLoad | #StoreStoren"
  53. "2:nt"
  54. ".subsection 2n"
  55. "3:tmov %0, %%g5nt"
  56. "save %%sp, -160, %%spnt"
  57. "mov %%g1, %%l1nt"
  58. "mov %%g2, %%l2nt"
  59. "mov %%g3, %%l3nt"
  60. "call %1nt"
  61. " mov %%g5, %%o0nt"
  62. "mov %%l1, %%g1nt"
  63. "mov %%l2, %%g2nt"
  64. "ba,pt %%xcc, 2bnt"
  65. " restore %%l3, %%g0, %%g3nt"
  66. ".previousnt"
  67. "! ending __down_read"
  68. : : "r" (sem), "i" (rwsem_down_read_failed)
  69. : "g5", "g7", "memory", "cc");
  70. }
  71. static inline void __down_write(struct rw_semaphore *sem)
  72. {
  73. __asm__ __volatile__(
  74. "! beginning __down_writent"
  75. "sethi %%hi(%2), %%g1nt"
  76. "or %%g1, %%lo(%2), %%g1n"
  77. "1:tlduw [%0], %%g5nt"
  78. "add %%g5, %%g1, %%g7nt"
  79. "cas [%0], %%g5, %%g7nt"
  80. "cmp %%g5, %%g7nt"
  81. "bne,pn %%icc, 1bnt"
  82. " cmp %%g7, 0nt"
  83. "bne,pn %%icc, 3fnt"
  84. " membar #StoreLoad | #StoreStoren"
  85. "2:nt"
  86. ".subsection 2n"
  87. "3:tmov %0, %%g5nt"
  88. "save %%sp, -160, %%spnt"
  89. "mov %%g2, %%l2nt"
  90. "mov %%g3, %%l3nt"
  91. "call %1nt"
  92. " mov %%g5, %%o0nt"
  93. "mov %%l2, %%g2nt"
  94. "ba,pt %%xcc, 2bnt"
  95. " restore %%l3, %%g0, %%g3nt"
  96. ".previousnt"
  97. "! ending __down_write"
  98. : : "r" (sem), "i" (rwsem_down_write_failed),
  99.     "i" (RWSEM_ACTIVE_WRITE_BIAS)
  100. : "g1", "g5", "g7", "memory", "cc");
  101. }
  102. static inline void __up_read(struct rw_semaphore *sem)
  103. {
  104. __asm__ __volatile__(
  105. "! beginning __up_readnt"
  106. "1:tlduw [%0], %%g5nt"
  107. "sub %%g5, 1, %%g7nt"
  108. "cas [%0], %%g5, %%g7nt"
  109. "cmp %%g5, %%g7nt"
  110. "bne,pn %%icc, 1bnt"
  111. " cmp %%g7, 0nt"
  112. "bl,pn %%icc, 3fnt"
  113. " membar #StoreLoad | #StoreStoren"
  114. "2:nt"
  115. ".subsection 2n"
  116. "3:tsethi %%hi(%2), %%g1nt"
  117. "sub %%g7, 1, %%g7nt"
  118. "or %%g1, %%lo(%2), %%g1nt"
  119. "andcc %%g7, %%g1, %%g0nt"
  120. "bne,pn %%icc, 2bnt"
  121. " mov %0, %%g5nt"
  122. "save %%sp, -160, %%spnt"
  123. "mov %%g2, %%l2nt"
  124. "mov %%g3, %%l3nt"
  125. "call %1nt"
  126. " mov %%g5, %%o0nt"
  127. "mov %%l2, %%g2nt"
  128. "ba,pt %%xcc, 2bnt"
  129. " restore %%l3, %%g0, %%g3nt"
  130. ".previousnt"
  131. "! ending __up_read"
  132. : : "r" (sem), "i" (rwsem_wake),
  133.     "i" (RWSEM_ACTIVE_MASK)
  134. : "g1", "g5", "g7", "memory", "cc");
  135. }
  136. static inline void __up_write(struct rw_semaphore *sem)
  137. {
  138. __asm__ __volatile__(
  139. "! beginning __up_writent"
  140. "sethi %%hi(%2), %%g1nt"
  141. "or %%g1, %%lo(%2), %%g1n"
  142. "1:tlduw [%0], %%g5nt"
  143. "sub %%g5, %%g1, %%g7nt"
  144. "cas [%0], %%g5, %%g7nt"
  145. "cmp %%g5, %%g7nt"
  146. "bne,pn %%icc, 1bnt"
  147. " sub %%g7, %%g1, %%g7nt"
  148. "cmp %%g7, 0nt"
  149. "bl,pn %%icc, 3fnt"
  150. " membar #StoreLoad | #StoreStoren"
  151. "2:nt"
  152. ".subsection 2n"
  153. "3:tmov %0, %%g5nt"
  154. "save %%sp, -160, %%spnt"
  155. "mov %%g2, %%l2nt"
  156. "mov %%g3, %%l3nt"
  157. "call %1nt"
  158. " mov %%g5, %%o0nt"
  159. "mov %%l2, %%g2nt"
  160. "ba,pt %%xcc, 2bnt"
  161. " restore %%l3, %%g0, %%g3nt"
  162. ".previousnt"
  163. "! ending __up_write"
  164. : : "r" (sem), "i" (rwsem_wake),
  165.     "i" (RWSEM_ACTIVE_WRITE_BIAS)
  166. : "g1", "g5", "g7", "memory", "cc");
  167. }
  168. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  169. {
  170. int tmp = delta;
  171. __asm__ __volatile__(
  172. "1:tlduw [%2], %%g5nt"
  173. "add %%g5, %1, %%g7nt"
  174. "cas [%2], %%g5, %%g7nt"
  175. "cmp %%g5, %%g7nt"
  176. "bne,pn %%icc, 1bnt"
  177. " membar #StoreLoad | #StoreStorent"
  178. "mov %%g7, %0nt"
  179. : "=&r" (tmp)
  180. : "0" (tmp), "r" (sem)
  181. : "g5", "g7", "memory");
  182. return tmp + delta;
  183. }
  184. #define rwsem_atomic_add rwsem_atomic_update
  185. static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 __old, __u16 __new)
  186. {
  187. u32 old = (sem->count & 0xffff0000) | (u32) __old;
  188. u32 new = (old & 0xffff0000) | (u32) __new;
  189. u32 prev;
  190. again:
  191. __asm__ __volatile__("cas [%2], %3, %0nt"
  192.      "membar #StoreLoad | #StoreStore"
  193.      : "=&r" (prev)
  194.      : "0" (new), "r" (sem), "r" (old)
  195.      : "memory");
  196. /* To give the same semantics as x86 cmpxchgw, keep trying
  197.  * if only the upper 16-bits changed.
  198.  */
  199. if (prev != old &&
  200.     ((prev & 0xffff) == (old & 0xffff)))
  201. goto again;
  202. return prev & 0xffff;
  203. }
  204. static inline signed long rwsem_cmpxchg(struct rw_semaphore *sem, signed long old, signed long new)
  205. {
  206. return cmpxchg(&sem->count,old,new);
  207. }
  208. #endif /* __KERNEL__ */
  209. #endif /* _SPARC64_RWSEM_H */