rwsem-spinlock.c
上传用户:jlfgdled
上传日期:2013-04-10
资源大小:33168k
文件大小:6k
源码类别:

Linux/Unix编程

开发平台:

Unix_Linux

  1. /* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock
  2.  *                                   implementation
  3.  *
  4.  * Copyright (c) 2001   David Howells (dhowells@redhat.com).
  5.  * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
  6.  * - Derived also from comments by Linus
  7.  *
  8.  * Trylock by Brian Watson (Brian.J.Watson@compaq.com).
  9.  */
  10. #include <linux/rwsem.h>
  11. #include <linux/sched.h>
  12. #include <linux/module.h>
  13. struct rwsem_waiter {
  14. struct list_head list;
  15. struct task_struct *task;
  16. unsigned int flags;
  17. #define RWSEM_WAITING_FOR_READ 0x00000001
  18. #define RWSEM_WAITING_FOR_WRITE 0x00000002
  19. };
  20. #if RWSEM_DEBUG
  21. void rwsemtrace(struct rw_semaphore *sem, const char *str)
  22. {
  23. if (sem->debug)
  24. printk("[%d] %s({%d,%d})n",
  25.        current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
  26. }
  27. #endif
  28. /*
  29.  * initialise the semaphore
  30.  */
  31. void init_rwsem(struct rw_semaphore *sem)
  32. {
  33. sem->activity = 0;
  34. spin_lock_init(&sem->wait_lock);
  35. INIT_LIST_HEAD(&sem->wait_list);
  36. #if RWSEM_DEBUG
  37. sem->debug = 0;
  38. #endif
  39. }
  40. /*
  41.  * handle the lock being released whilst there are processes blocked on it that can now run
  42.  * - if we come here, then:
  43.  *   - the 'active count' _reached_ zero
  44.  *   - the 'waiting count' is non-zero
  45.  * - the spinlock must be held by the caller
  46.  * - woken process blocks are discarded from the list after having flags zeroised
  47.  */
  48. static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
  49. {
  50. struct rwsem_waiter *waiter;
  51. int woken;
  52. rwsemtrace(sem,"Entering __rwsem_do_wake");
  53. waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
  54. /* try to grant a single write lock if there's a writer at the front of the queue
  55.  * - we leave the 'waiting count' incremented to signify potential contention
  56.  */
  57. if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
  58. sem->activity = -1;
  59. list_del(&waiter->list);
  60. waiter->flags = 0;
  61. wake_up_process(waiter->task);
  62. goto out;
  63. }
  64. /* grant an infinite number of read locks to the readers at the front of the queue */
  65. woken = 0;
  66. do {
  67. list_del(&waiter->list);
  68. waiter->flags = 0;
  69. wake_up_process(waiter->task);
  70. woken++;
  71. if (list_empty(&sem->wait_list))
  72. break;
  73. waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
  74. } while (waiter->flags&RWSEM_WAITING_FOR_READ);
  75. sem->activity += woken;
  76.  out:
  77. rwsemtrace(sem,"Leaving __rwsem_do_wake");
  78. return sem;
  79. }
  80. /*
  81.  * wake a single writer
  82.  */
  83. static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
  84. {
  85. struct rwsem_waiter *waiter;
  86. sem->activity = -1;
  87. waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
  88. list_del(&waiter->list);
  89. waiter->flags = 0;
  90. wake_up_process(waiter->task);
  91. return sem;
  92. }
  93. /*
  94.  * get a read lock on the semaphore
  95.  */
  96. void __down_read(struct rw_semaphore *sem)
  97. {
  98. struct rwsem_waiter waiter;
  99. struct task_struct *tsk;
  100. rwsemtrace(sem,"Entering __down_read");
  101. spin_lock(&sem->wait_lock);
  102. if (sem->activity>=0 && list_empty(&sem->wait_list)) {
  103. /* granted */
  104. sem->activity++;
  105. spin_unlock(&sem->wait_lock);
  106. goto out;
  107. }
  108. tsk = current;
  109. set_task_state(tsk,TASK_UNINTERRUPTIBLE);
  110. /* set up my own style of waitqueue */
  111. waiter.task = tsk;
  112. waiter.flags = RWSEM_WAITING_FOR_READ;
  113. list_add_tail(&waiter.list,&sem->wait_list);
  114. /* we don't need to touch the semaphore struct anymore */
  115. spin_unlock(&sem->wait_lock);
  116. /* wait to be given the lock */
  117. for (;;) {
  118. if (!waiter.flags)
  119. break;
  120. schedule();
  121. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  122. }
  123. tsk->state = TASK_RUNNING;
  124.  out:
  125. rwsemtrace(sem,"Leaving __down_read");
  126. }
  127. /*
  128.  * trylock for reading -- returns 1 if successful, 0 if contention
  129.  */
  130. int __down_read_trylock(struct rw_semaphore *sem)
  131. {
  132. int ret = 0;
  133. rwsemtrace(sem,"Entering __down_read_trylock");
  134. spin_lock(&sem->wait_lock);
  135. if (sem->activity>=0 && list_empty(&sem->wait_list)) {
  136. /* granted */
  137. sem->activity++;
  138. ret = 1;
  139. }
  140. spin_unlock(&sem->wait_lock);
  141. rwsemtrace(sem,"Leaving __down_read_trylock");
  142. return ret;
  143. }
  144. /*
  145.  * get a write lock on the semaphore
  146.  * - note that we increment the waiting count anyway to indicate an exclusive lock
  147.  */
  148. void __down_write(struct rw_semaphore *sem)
  149. {
  150. struct rwsem_waiter waiter;
  151. struct task_struct *tsk;
  152. rwsemtrace(sem,"Entering __down_write");
  153. spin_lock(&sem->wait_lock);
  154. if (sem->activity==0 && list_empty(&sem->wait_list)) {
  155. /* granted */
  156. sem->activity = -1;
  157. spin_unlock(&sem->wait_lock);
  158. goto out;
  159. }
  160. tsk = current;
  161. set_task_state(tsk,TASK_UNINTERRUPTIBLE);
  162. /* set up my own style of waitqueue */
  163. waiter.task = tsk;
  164. waiter.flags = RWSEM_WAITING_FOR_WRITE;
  165. list_add_tail(&waiter.list,&sem->wait_list);
  166. /* we don't need to touch the semaphore struct anymore */
  167. spin_unlock(&sem->wait_lock);
  168. /* wait to be given the lock */
  169. for (;;) {
  170. if (!waiter.flags)
  171. break;
  172. schedule();
  173. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  174. }
  175. tsk->state = TASK_RUNNING;
  176.  out:
  177. rwsemtrace(sem,"Leaving __down_write");
  178. }
  179. /*
  180.  * trylock for writing -- returns 1 if successful, 0 if contention
  181.  */
  182. int __down_write_trylock(struct rw_semaphore *sem)
  183. {
  184. int ret = 0;
  185. rwsemtrace(sem,"Entering __down_write_trylock");
  186. spin_lock(&sem->wait_lock);
  187. if (sem->activity==0 && list_empty(&sem->wait_list)) {
  188. /* granted */
  189. sem->activity = -1;
  190. ret = 1;
  191. }
  192. spin_unlock(&sem->wait_lock);
  193. rwsemtrace(sem,"Leaving __down_write_trylock");
  194. return ret;
  195. }
  196. /*
  197.  * release a read lock on the semaphore
  198.  */
  199. void __up_read(struct rw_semaphore *sem)
  200. {
  201. rwsemtrace(sem,"Entering __up_read");
  202. spin_lock(&sem->wait_lock);
  203. if (--sem->activity==0 && !list_empty(&sem->wait_list))
  204. sem = __rwsem_wake_one_writer(sem);
  205. spin_unlock(&sem->wait_lock);
  206. rwsemtrace(sem,"Leaving __up_read");
  207. }
  208. /*
  209.  * release a write lock on the semaphore
  210.  */
  211. void __up_write(struct rw_semaphore *sem)
  212. {
  213. rwsemtrace(sem,"Entering __up_write");
  214. spin_lock(&sem->wait_lock);
  215. sem->activity = 0;
  216. if (!list_empty(&sem->wait_list))
  217. sem = __rwsem_do_wake(sem);
  218. spin_unlock(&sem->wait_lock);
  219. rwsemtrace(sem,"Leaving __up_write");
  220. }
  221. EXPORT_SYMBOL(init_rwsem);
  222. EXPORT_SYMBOL(__down_read);
  223. EXPORT_SYMBOL(__down_write);
  224. EXPORT_SYMBOL(__up_read);
  225. EXPORT_SYMBOL(__up_write);
  226. #if RWSEM_DEBUG
  227. EXPORT_SYMBOL(rwsemtrace);
  228. #endif