vm.c
上传用户:lgb322
上传日期:2013-02-24
资源大小:30529k
文件大小:11k
源码类别:

嵌入式Linux

开发平台:

Unix_Linux

  1. /* vm.c -- Memory mapping for DRM -*- linux-c -*-
  2.  * Created: Mon Jan  4 08:58:31 1999 by faith@precisioninsight.com
  3.  *
  4.  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  5.  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  6.  * All Rights Reserved.
  7.  *
  8.  * Permission is hereby granted, free of charge, to any person obtaining a
  9.  * copy of this software and associated documentation files (the "Software"),
  10.  * to deal in the Software without restriction, including without limitation
  11.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12.  * and/or sell copies of the Software, and to permit persons to whom the
  13.  * Software is furnished to do so, subject to the following conditions:
  14.  * 
  15.  * The above copyright notice and this permission notice (including the next
  16.  * paragraph) shall be included in all copies or substantial portions of the
  17.  * Software.
  18.  * 
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22.  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  25.  * DEALINGS IN THE SOFTWARE.
  26.  * 
  27.  * Authors:
  28.  *    Rickard E. (Rik) Faith <faith@valinux.com>
  29.  *
  30.  */
  31. #define __NO_VERSION__
  32. #include "drmP.h"
  33. struct vm_operations_struct   drm_vm_ops = {
  34. nopage:  drm_vm_nopage,
  35. open:  drm_vm_open,
  36. close:  drm_vm_close,
  37. };
  38. struct vm_operations_struct   drm_vm_shm_ops = {
  39. nopage:  drm_vm_shm_nopage,
  40. open:  drm_vm_open,
  41. close:  drm_vm_close,
  42. };
  43. struct vm_operations_struct   drm_vm_shm_lock_ops = {
  44. nopage:  drm_vm_shm_nopage_lock,
  45. open:  drm_vm_open,
  46. close:  drm_vm_close,
  47. };
  48. struct vm_operations_struct   drm_vm_dma_ops = {
  49. nopage:  drm_vm_dma_nopage,
  50. open:  drm_vm_open,
  51. close:  drm_vm_close,
  52. };
  53. #if LINUX_VERSION_CODE < 0x020317
  54. unsigned long drm_vm_nopage(struct vm_area_struct *vma,
  55.     unsigned long address,
  56.     int write_access)
  57. #else
  58. /* Return type changed in 2.3.23 */
  59. struct page *drm_vm_nopage(struct vm_area_struct *vma,
  60.    unsigned long address,
  61.    int write_access)
  62. #endif
  63. {
  64. return NOPAGE_SIGBUS; /* Disallow mremap */
  65. }
  66. #if LINUX_VERSION_CODE < 0x020317
  67. unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
  68. unsigned long address,
  69. int write_access)
  70. #else
  71. /* Return type changed in 2.3.23 */
  72. struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
  73.        unsigned long address,
  74.        int write_access)
  75. #endif
  76. {
  77. #if LINUX_VERSION_CODE >= 0x020300
  78. drm_map_t  *map  = (drm_map_t *)vma->vm_private_data;
  79. #else
  80. drm_map_t  *map  = (drm_map_t *)vma->vm_pte;
  81. #endif
  82. unsigned long  physical;
  83. unsigned long  offset;
  84. if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
  85. if (!map)        return NOPAGE_OOM;  /* Nothing allocated */
  86. offset  = address - vma->vm_start;
  87. physical = (unsigned long)map->handle + offset;
  88. atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
  89. DRM_DEBUG("0x%08lx => 0x%08lxn", address, physical);
  90. #if LINUX_VERSION_CODE < 0x020317
  91. return physical;
  92. #else
  93. return virt_to_page(physical);
  94. #endif
  95. }
  96. #if LINUX_VERSION_CODE < 0x020317
  97. unsigned long drm_vm_shm_nopage_lock(struct vm_area_struct *vma,
  98.      unsigned long address,
  99.      int write_access)
  100. #else
  101. /* Return type changed in 2.3.23 */
  102. struct page *drm_vm_shm_nopage_lock(struct vm_area_struct *vma,
  103.     unsigned long address,
  104.     int write_access)
  105. #endif
  106. {
  107. drm_file_t  *priv  = vma->vm_file->private_data;
  108. drm_device_t  *dev  = priv->dev;
  109. unsigned long  physical;
  110. unsigned long  offset;
  111. unsigned long  page;
  112. if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
  113. if (!dev->lock.hw_lock)    return NOPAGE_OOM;  /* Nothing allocated */
  114. offset  = address - vma->vm_start;
  115. page  = offset >> PAGE_SHIFT;
  116. physical = (unsigned long)dev->lock.hw_lock + offset;
  117. atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
  118. DRM_DEBUG("0x%08lx (page %lu) => 0x%08lxn", address, page, physical);
  119. #if LINUX_VERSION_CODE < 0x020317
  120. return physical;
  121. #else
  122. return virt_to_page(physical);
  123. #endif
  124. }
  125. #if LINUX_VERSION_CODE < 0x020317
  126. unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
  127. unsigned long address,
  128. int write_access)
  129. #else
  130. /* Return type changed in 2.3.23 */
  131. struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
  132.        unsigned long address,
  133.        int write_access)
  134. #endif
  135. {
  136. drm_file_t  *priv  = vma->vm_file->private_data;
  137. drm_device_t  *dev  = priv->dev;
  138. drm_device_dma_t *dma  = dev->dma;
  139. unsigned long  physical;
  140. unsigned long  offset;
  141. unsigned long  page;
  142. if (!dma)    return NOPAGE_SIGBUS; /* Error */
  143. if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
  144. if (!dma->pagelist)    return NOPAGE_OOM ; /* Nothing allocated */
  145. offset  = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
  146. page  = offset >> PAGE_SHIFT;
  147. physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
  148. atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
  149. DRM_DEBUG("0x%08lx (page %lu) => 0x%08lxn", address, page, physical);
  150. #if LINUX_VERSION_CODE < 0x020317
  151. return physical;
  152. #else
  153. return virt_to_page(physical);
  154. #endif
  155. }
  156. void drm_vm_open(struct vm_area_struct *vma)
  157. {
  158. drm_file_t *priv = vma->vm_file->private_data;
  159. drm_device_t *dev = priv->dev;
  160. #if DRM_DEBUG_CODE
  161. drm_vma_entry_t *vma_entry;
  162. #endif
  163. DRM_DEBUG("0x%08lx,0x%08lxn",
  164.   vma->vm_start, vma->vm_end - vma->vm_start);
  165. atomic_inc(&dev->vma_count);
  166. #if LINUX_VERSION_CODE < 0x020333
  167. /* The map can exist after the fd is closed. */
  168. MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
  169. #endif
  170. #if DRM_DEBUG_CODE
  171. vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
  172. if (vma_entry) {
  173. down(&dev->struct_sem);
  174. vma_entry->vma = vma;
  175. vma_entry->next = dev->vmalist;
  176. vma_entry->pid = current->pid;
  177. dev->vmalist = vma_entry;
  178. up(&dev->struct_sem);
  179. }
  180. #endif
  181. }
  182. void drm_vm_close(struct vm_area_struct *vma)
  183. {
  184. drm_file_t *priv = vma->vm_file->private_data;
  185. drm_device_t *dev = priv->dev;
  186. #if DRM_DEBUG_CODE
  187. drm_vma_entry_t *pt, *prev;
  188. #endif
  189. DRM_DEBUG("0x%08lx,0x%08lxn",
  190.   vma->vm_start, vma->vm_end - vma->vm_start);
  191. #if LINUX_VERSION_CODE < 0x020333
  192. MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
  193. #endif
  194. atomic_dec(&dev->vma_count);
  195. #if DRM_DEBUG_CODE
  196. down(&dev->struct_sem);
  197. for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
  198. if (pt->vma == vma) {
  199. if (prev) {
  200. prev->next = pt->next;
  201. } else {
  202. dev->vmalist = pt->next;
  203. }
  204. drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
  205. break;
  206. }
  207. }
  208. up(&dev->struct_sem);
  209. #endif
  210. }
  211. int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
  212. {
  213. drm_file_t  *priv  = filp->private_data;
  214. drm_device_t  *dev;
  215. drm_device_dma_t *dma;
  216. unsigned long  length  = vma->vm_end - vma->vm_start;
  217. lock_kernel();
  218. dev  = priv->dev;
  219. dma  = dev->dma;
  220. DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lxn",
  221.   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
  222. /* Length must match exact page count */
  223. if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
  224. unlock_kernel();
  225. return -EINVAL;
  226. }
  227. unlock_kernel();
  228. vma->vm_ops   = &drm_vm_dma_ops;
  229. vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
  230. #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
  231. /* In Linux 2.2.3 and above, this is
  232.    handled in do_mmap() in mm/mmap.c. */
  233. ++filp->f_count;
  234. #endif
  235. vma->vm_file  =  filp; /* Needed for drm_vm_open() */
  236. drm_vm_open(vma);
  237. return 0;
  238. }
  239. int drm_mmap(struct file *filp, struct vm_area_struct *vma)
  240. {
  241. drm_file_t *priv = filp->private_data;
  242. drm_device_t *dev = priv->dev;
  243. drm_map_t *map = NULL;
  244. int i;
  245. DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lxn",
  246.   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
  247. if (!VM_OFFSET(vma)) return drm_mmap_dma(filp, vma);
  248. /* A sequential search of a linked list is
  249.    fine here because: 1) there will only be
  250.    about 5-10 entries in the list and, 2) a
  251.    DRI client only has to do this mapping
  252.    once, so it doesn't have to be optimized
  253.    for performance, even if the list was a
  254.    bit longer. */
  255. for (i = 0; i < dev->map_count; i++) {
  256. map = dev->maplist[i];
  257. if (map->offset == VM_OFFSET(vma)) break;
  258. }
  259. if (i >= dev->map_count) return -EINVAL;
  260. if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
  261. return -EPERM;
  262. /* Check for valid size. */
  263. if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
  264. if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
  265. vma->vm_flags &= VM_MAYWRITE;
  266. #if defined(__i386__)
  267. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  268. #else
  269. /* Ye gads this is ugly.  With more thought
  270.                                    we could move this up higher and use
  271.                                    `protection_map' instead.  */
  272. vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
  273. __pte(pgprot_val(vma->vm_page_prot)))));
  274. #endif
  275. }
  276. switch (map->type) {
  277. case _DRM_FRAME_BUFFER:
  278. case _DRM_REGISTERS:
  279. case _DRM_AGP:
  280. if (VM_OFFSET(vma) >= __pa(high_memory)) {
  281. #if defined(__i386__) || defined(__x86_64__)
  282. if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
  283. pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
  284. pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
  285. }
  286. #elif defined(__ia64__)
  287. if (map->type != _DRM_AGP)
  288. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  289. #endif
  290. vma->vm_flags |= VM_IO; /* not in core dump */
  291. }
  292. if (remap_page_range(vma->vm_start,
  293.      VM_OFFSET(vma),
  294.      vma->vm_end - vma->vm_start,
  295.      vma->vm_page_prot))
  296. return -EAGAIN;
  297. DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
  298.   " offset = 0x%lxn",
  299.   map->type,
  300.   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
  301. vma->vm_ops = &drm_vm_ops;
  302. break;
  303. case _DRM_SHM:
  304. if (map->flags & _DRM_CONTAINS_LOCK)
  305. vma->vm_ops = &drm_vm_shm_lock_ops;
  306. else {
  307. vma->vm_ops = &drm_vm_shm_ops;
  308. #if LINUX_VERSION_CODE >= 0x020300
  309. vma->vm_private_data = (void *)map;
  310. #else
  311. vma->vm_pte = (unsigned long)map;
  312. #endif
  313. }
  314. /* Don't let this area swap.  Change when
  315.    DRM_KERNEL advisory is supported. */
  316. vma->vm_flags |= VM_LOCKED;
  317. break;
  318. default:
  319. return -EINVAL; /* This should never happen. */
  320. }
  321. vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
  322. #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
  323. /* In Linux 2.2.3 and above, this is
  324.    handled in do_mmap() in mm/mmap.c. */
  325. ++filp->f_count;
  326. #endif
  327. vma->vm_file  =  filp; /* Needed for drm_vm_open() */
  328. drm_vm_open(vma);
  329. return 0;
  330. }