mlock.c
上传用户:jlfgdled
上传日期:2013-04-10
资源大小:33168k
文件大小:7k
源码类别:

Linux/Unix编程

开发平台:

Unix_Linux

  1. /*
  2.  * linux/mm/mlock.c
  3.  *
  4.  *  (C) Copyright 1995 Linus Torvalds
  5.  */
  6. #include <linux/slab.h>
  7. #include <linux/shm.h>
  8. #include <linux/mman.h>
  9. #include <linux/smp_lock.h>
  10. #include <linux/pagemap.h>
  11. #include <asm/uaccess.h>
  12. #include <asm/pgtable.h>
  13. static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
  14. {
  15. spin_lock(&vma->vm_mm->page_table_lock);
  16. vma->vm_flags = newflags;
  17. spin_unlock(&vma->vm_mm->page_table_lock);
  18. return 0;
  19. }
  20. static inline int mlock_fixup_start(struct vm_area_struct * vma,
  21. unsigned long end, int newflags)
  22. {
  23. struct vm_area_struct * n;
  24. n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  25. if (!n)
  26. return -EAGAIN;
  27. *n = *vma;
  28. n->vm_end = end;
  29. n->vm_flags = newflags;
  30. n->vm_raend = 0;
  31. if (n->vm_file)
  32. get_file(n->vm_file);
  33. if (n->vm_ops && n->vm_ops->open)
  34. n->vm_ops->open(n);
  35. vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
  36. lock_vma_mappings(vma);
  37. spin_lock(&vma->vm_mm->page_table_lock);
  38. vma->vm_start = end;
  39. __insert_vm_struct(current->mm, n);
  40. spin_unlock(&vma->vm_mm->page_table_lock);
  41. unlock_vma_mappings(vma);
  42. return 0;
  43. }
  44. static inline int mlock_fixup_end(struct vm_area_struct * vma,
  45. unsigned long start, int newflags)
  46. {
  47. struct vm_area_struct * n;
  48. n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  49. if (!n)
  50. return -EAGAIN;
  51. *n = *vma;
  52. n->vm_start = start;
  53. n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
  54. n->vm_flags = newflags;
  55. n->vm_raend = 0;
  56. if (n->vm_file)
  57. get_file(n->vm_file);
  58. if (n->vm_ops && n->vm_ops->open)
  59. n->vm_ops->open(n);
  60. lock_vma_mappings(vma);
  61. spin_lock(&vma->vm_mm->page_table_lock);
  62. vma->vm_end = start;
  63. __insert_vm_struct(current->mm, n);
  64. spin_unlock(&vma->vm_mm->page_table_lock);
  65. unlock_vma_mappings(vma);
  66. return 0;
  67. }
  68. static inline int mlock_fixup_middle(struct vm_area_struct * vma,
  69. unsigned long start, unsigned long end, int newflags)
  70. {
  71. struct vm_area_struct * left, * right;
  72. left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  73. if (!left)
  74. return -EAGAIN;
  75. right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  76. if (!right) {
  77. kmem_cache_free(vm_area_cachep, left);
  78. return -EAGAIN;
  79. }
  80. *left = *vma;
  81. *right = *vma;
  82. left->vm_end = start;
  83. right->vm_start = end;
  84. right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
  85. vma->vm_flags = newflags;
  86. left->vm_raend = 0;
  87. right->vm_raend = 0;
  88. if (vma->vm_file)
  89. atomic_add(2, &vma->vm_file->f_count);
  90. if (vma->vm_ops && vma->vm_ops->open) {
  91. vma->vm_ops->open(left);
  92. vma->vm_ops->open(right);
  93. }
  94. vma->vm_raend = 0;
  95. vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
  96. lock_vma_mappings(vma);
  97. spin_lock(&vma->vm_mm->page_table_lock);
  98. vma->vm_start = start;
  99. vma->vm_end = end;
  100. vma->vm_flags = newflags;
  101. __insert_vm_struct(current->mm, left);
  102. __insert_vm_struct(current->mm, right);
  103. spin_unlock(&vma->vm_mm->page_table_lock);
  104. unlock_vma_mappings(vma);
  105. return 0;
  106. }
  107. static int mlock_fixup(struct vm_area_struct * vma, 
  108. unsigned long start, unsigned long end, unsigned int newflags)
  109. {
  110. int pages, retval;
  111. if (newflags == vma->vm_flags)
  112. return 0;
  113. if (start == vma->vm_start) {
  114. if (end == vma->vm_end)
  115. retval = mlock_fixup_all(vma, newflags);
  116. else
  117. retval = mlock_fixup_start(vma, end, newflags);
  118. } else {
  119. if (end == vma->vm_end)
  120. retval = mlock_fixup_end(vma, start, newflags);
  121. else
  122. retval = mlock_fixup_middle(vma, start, end, newflags);
  123. }
  124. if (!retval) {
  125. /* keep track of amount of locked VM */
  126. pages = (end - start) >> PAGE_SHIFT;
  127. if (newflags & VM_LOCKED) {
  128. pages = -pages;
  129. make_pages_present(start, end);
  130. }
  131. vma->vm_mm->locked_vm -= pages;
  132. }
  133. return retval;
  134. }
  135. static int do_mlock(unsigned long start, size_t len, int on)
  136. {
  137. unsigned long nstart, end, tmp;
  138. struct vm_area_struct * vma, * next;
  139. int error;
  140. if (on && !capable(CAP_IPC_LOCK))
  141. return -EPERM;
  142. len = PAGE_ALIGN(len);
  143. end = start + len;
  144. if (end < start)
  145. return -EINVAL;
  146. if (end == start)
  147. return 0;
  148. vma = find_vma(current->mm, start);
  149. if (!vma || vma->vm_start > start)
  150. return -ENOMEM;
  151. for (nstart = start ; ; ) {
  152. unsigned int newflags;
  153. /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
  154. newflags = vma->vm_flags | VM_LOCKED;
  155. if (!on)
  156. newflags &= ~VM_LOCKED;
  157. if (vma->vm_end >= end) {
  158. error = mlock_fixup(vma, nstart, end, newflags);
  159. break;
  160. }
  161. tmp = vma->vm_end;
  162. next = vma->vm_next;
  163. error = mlock_fixup(vma, nstart, tmp, newflags);
  164. if (error)
  165. break;
  166. nstart = tmp;
  167. vma = next;
  168. if (!vma || vma->vm_start != nstart) {
  169. error = -ENOMEM;
  170. break;
  171. }
  172. }
  173. return error;
  174. }
  175. asmlinkage long sys_mlock(unsigned long start, size_t len)
  176. {
  177. unsigned long locked;
  178. unsigned long lock_limit;
  179. int error = -ENOMEM;
  180. down_write(&current->mm->mmap_sem);
  181. len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
  182. start &= PAGE_MASK;
  183. locked = len >> PAGE_SHIFT;
  184. locked += current->mm->locked_vm;
  185. lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
  186. lock_limit >>= PAGE_SHIFT;
  187. /* check against resource limits */
  188. if (locked > lock_limit)
  189. goto out;
  190. /* we may lock at most half of physical memory... */
  191. /* (this check is pretty bogus, but doesn't hurt) */
  192. if (locked > num_physpages/2)
  193. goto out;
  194. error = do_mlock(start, len, 1);
  195. out:
  196. up_write(&current->mm->mmap_sem);
  197. return error;
  198. }
  199. asmlinkage long sys_munlock(unsigned long start, size_t len)
  200. {
  201. int ret;
  202. down_write(&current->mm->mmap_sem);
  203. len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
  204. start &= PAGE_MASK;
  205. ret = do_mlock(start, len, 0);
  206. up_write(&current->mm->mmap_sem);
  207. return ret;
  208. }
  209. static int do_mlockall(int flags)
  210. {
  211. int error;
  212. unsigned int def_flags;
  213. struct vm_area_struct * vma;
  214. if (!capable(CAP_IPC_LOCK))
  215. return -EPERM;
  216. def_flags = 0;
  217. if (flags & MCL_FUTURE)
  218. def_flags = VM_LOCKED;
  219. current->mm->def_flags = def_flags;
  220. error = 0;
  221. for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
  222. unsigned int newflags;
  223. newflags = vma->vm_flags | VM_LOCKED;
  224. if (!(flags & MCL_CURRENT))
  225. newflags &= ~VM_LOCKED;
  226. error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
  227. if (error)
  228. break;
  229. }
  230. return error;
  231. }
  232. asmlinkage long sys_mlockall(int flags)
  233. {
  234. unsigned long lock_limit;
  235. int ret = -EINVAL;
  236. down_write(&current->mm->mmap_sem);
  237. if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
  238. goto out;
  239. lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
  240. lock_limit >>= PAGE_SHIFT;
  241. ret = -ENOMEM;
  242. if (current->mm->total_vm > lock_limit)
  243. goto out;
  244. /* we may lock at most half of physical memory... */
  245. /* (this check is pretty bogus, but doesn't hurt) */
  246. if (current->mm->total_vm > num_physpages/2)
  247. goto out;
  248. ret = do_mlockall(flags);
  249. out:
  250. up_write(&current->mm->mmap_sem);
  251. return ret;
  252. }
  253. asmlinkage long sys_munlockall(void)
  254. {
  255. int ret;
  256. down_write(&current->mm->mmap_sem);
  257. ret = do_mlockall(0);
  258. up_write(&current->mm->mmap_sem);
  259. return ret;
  260. }