drm_vm.h
上传用户:jlfgdled
上传日期:2013-04-10
资源大小:33168k
文件大小:13k
源码类别:

Linux/Unix编程

开发平台:

Unix_Linux

  1. /* drm_vm.h -- Memory mapping for DRM -*- linux-c -*-
  2.  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
  3.  *
  4.  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  5.  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  6.  * All Rights Reserved.
  7.  *
  8.  * Permission is hereby granted, free of charge, to any person obtaining a
  9.  * copy of this software and associated documentation files (the "Software"),
  10.  * to deal in the Software without restriction, including without limitation
  11.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12.  * and/or sell copies of the Software, and to permit persons to whom the
  13.  * Software is furnished to do so, subject to the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the next
  16.  * paragraph) shall be included in all copies or substantial portions of the
  17.  * Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22.  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  25.  * OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  * Authors:
  28.  *    Rickard E. (Rik) Faith <faith@valinux.com>
  29.  *    Gareth Hughes <gareth@valinux.com>
  30.  */
  31. #include "drmP.h"
  32. struct vm_operations_struct   DRM(vm_ops) = {
  33. nopage:  DRM(vm_nopage),
  34. open:  DRM(vm_open),
  35. close:  DRM(vm_close),
  36. };
  37. struct vm_operations_struct   DRM(vm_shm_ops) = {
  38. nopage:  DRM(vm_shm_nopage),
  39. open:  DRM(vm_open),
  40. close:  DRM(vm_shm_close),
  41. };
  42. struct vm_operations_struct   DRM(vm_dma_ops) = {
  43. nopage:  DRM(vm_dma_nopage),
  44. open:  DRM(vm_open),
  45. close:  DRM(vm_close),
  46. };
  47. struct vm_operations_struct   DRM(vm_sg_ops) = {
  48. nopage:  DRM(vm_sg_nopage),
  49. open:    DRM(vm_open),
  50. close:   DRM(vm_close),
  51. };
  52. struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
  53.     unsigned long address,
  54.     int unused)
  55. {
  56. #if __REALLY_HAVE_AGP
  57. drm_file_t *priv  = vma->vm_file->private_data;
  58. drm_device_t *dev = priv->dev;
  59. drm_map_t *map    = NULL;
  60. drm_map_list_t  *r_list;
  61. struct list_head *list;
  62. /*
  63.          * Find the right map
  64.          */
  65. if(!dev->agp->cant_use_aperture) goto vm_nopage_error;
  66. list_for_each(list, &dev->maplist->head) {
  67. r_list = (drm_map_list_t *)list;
  68. map = r_list->map;
  69. if (!map) continue;
  70. if (map->offset == VM_OFFSET(vma)) break;
  71. }
  72. if (map && map->type == _DRM_AGP) {
  73. unsigned long offset = address - vma->vm_start;
  74. unsigned long baddr = VM_OFFSET(vma) + offset;
  75. struct drm_agp_mem *agpmem;
  76. struct page *page;
  77. #if __alpha__
  78. /*
  79.                  * Adjust to a bus-relative address
  80.                  */
  81. baddr -= dev->hose->mem_space->start;
  82. #endif
  83. /*
  84.                  * It's AGP memory - find the real physical page to map
  85.                  */
  86. for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
  87. if (agpmem->bound <= baddr &&
  88.     agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) 
  89. break;
  90. }
  91. if (!agpmem) goto vm_nopage_error;
  92. /*
  93.                  * Get the page, inc the use count, and return it
  94.                  */
  95. offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
  96. agpmem->memory->memory[offset] &= dev->agp->page_mask;
  97. page = virt_to_page(__va(agpmem->memory->memory[offset]));
  98. get_page(page);
  99. DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lxn",
  100.   baddr, __va(agpmem->memory->memory[offset]), offset);
  101. return page;
  102.         }
  103. vm_nopage_error:
  104. #endif /* __REALLY_HAVE_AGP */
  105. return NOPAGE_SIGBUS; /* Disallow mremap */
  106. }
  107. struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
  108. unsigned long address,
  109. int write_access)
  110. {
  111. drm_map_t  *map  = (drm_map_t *)vma->vm_private_data;
  112. unsigned long  offset;
  113. unsigned long  i;
  114. struct page  *page;
  115. if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
  116. if (!map)        return NOPAGE_OOM;  /* Nothing allocated */
  117. offset  = address - vma->vm_start;
  118. i = (unsigned long)map->handle + offset;
  119. page = vmalloc_to_page((void *)i);
  120. if (!page)
  121. return NOPAGE_OOM;
  122. get_page(page);
  123. #if 0 /* XXX page_to_bus is not a portable interface available on all platforms. */
  124. DRM_DEBUG("0x%08lx => 0x%08llxn", address, (u64)page_to_bus(page));
  125. #endif
  126. return page;
  127. }
  128. /* Special close routine which deletes map information if we are the last
  129.  * person to close a mapping and its not in the global maplist.
  130.  */
  131. void DRM(vm_shm_close)(struct vm_area_struct *vma)
  132. {
  133. drm_file_t *priv = vma->vm_file->private_data;
  134. drm_device_t *dev = priv->dev;
  135. drm_vma_entry_t *pt, *prev, *next;
  136. drm_map_t *map;
  137. drm_map_list_t *r_list;
  138. struct list_head *list;
  139. int found_maps = 0;
  140. DRM_DEBUG("0x%08lx,0x%08lxn",
  141.   vma->vm_start, vma->vm_end - vma->vm_start);
  142. atomic_dec(&dev->vma_count);
  143. map = vma->vm_private_data;
  144. down(&dev->struct_sem);
  145. for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
  146. next = pt->next;
  147. if (pt->vma->vm_private_data == map) found_maps++;
  148. if (pt->vma == vma) {
  149. if (prev) {
  150. prev->next = pt->next;
  151. } else {
  152. dev->vmalist = pt->next;
  153. }
  154. DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
  155. } else {
  156. prev = pt;
  157. }
  158. }
  159. /* We were the only map that was found */
  160. if(found_maps == 1 &&
  161.    map->flags & _DRM_REMOVABLE) {
  162. /* Check to see if we are in the maplist, if we are not, then
  163.  * we delete this mappings information.
  164.  */
  165. found_maps = 0;
  166. list = &dev->maplist->head;
  167. list_for_each(list, &dev->maplist->head) {
  168. r_list = (drm_map_list_t *) list;
  169. if (r_list->map == map) found_maps++;
  170. }
  171. if(!found_maps) {
  172. switch (map->type) {
  173. case _DRM_REGISTERS:
  174. case _DRM_FRAME_BUFFER:
  175. #if __REALLY_HAVE_MTRR
  176. if (map->mtrr >= 0) {
  177. int retcode;
  178. retcode = mtrr_del(map->mtrr,
  179.    map->offset,
  180.    map->size);
  181. DRM_DEBUG("mtrr_del = %dn", retcode);
  182. }
  183. #endif
  184. DRM(ioremapfree)(map->handle, map->size);
  185. break;
  186. case _DRM_SHM:
  187. vfree(map->handle);
  188. break;
  189. case _DRM_AGP:
  190. case _DRM_SCATTER_GATHER:
  191. break;
  192. }
  193. DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
  194. }
  195. }
  196. up(&dev->struct_sem);
  197. }
  198. struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
  199. unsigned long address,
  200. int write_access)
  201. {
  202. drm_file_t  *priv  = vma->vm_file->private_data;
  203. drm_device_t  *dev  = priv->dev;
  204. drm_device_dma_t *dma  = dev->dma;
  205. unsigned long  offset;
  206. unsigned long  page_nr;
  207. struct page  *page;
  208. if (!dma)    return NOPAGE_SIGBUS; /* Error */
  209. if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
  210. if (!dma->pagelist)    return NOPAGE_OOM ; /* Nothing allocated */
  211. offset  = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
  212. page_nr  = offset >> PAGE_SHIFT;
  213. page = virt_to_page((dma->pagelist[page_nr] + 
  214.      (offset & (~PAGE_MASK))));
  215. get_page(page);
  216. #if 0 /* XXX page_to_bus is not a portable interface available on all platforms. */
  217. DRM_DEBUG("0x%08lx (page %lu) => 0x%08llxn", address, page_nr, 
  218.   (u64)page_to_bus(page));
  219. #endif
  220. return page;
  221. }
  222. struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
  223.        unsigned long address,
  224.        int write_access)
  225. {
  226. drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
  227. drm_file_t *priv = vma->vm_file->private_data;
  228. drm_device_t *dev = priv->dev;
  229. drm_sg_mem_t *entry = dev->sg;
  230. unsigned long offset;
  231. unsigned long map_offset;
  232. unsigned long page_offset;
  233. struct page *page;
  234. if (!entry)                return NOPAGE_SIGBUS; /* Error */
  235. if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
  236. if (!entry->pagelist)      return NOPAGE_OOM ;  /* Nothing allocated */
  237. offset = address - vma->vm_start;
  238. map_offset = map->offset - dev->sg->handle;
  239. page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
  240. page = entry->pagelist[page_offset];
  241. get_page(page);
  242. return page;
  243. }
  244. void DRM(vm_open)(struct vm_area_struct *vma)
  245. {
  246. drm_file_t *priv = vma->vm_file->private_data;
  247. drm_device_t *dev = priv->dev;
  248. drm_vma_entry_t *vma_entry;
  249. DRM_DEBUG("0x%08lx,0x%08lxn",
  250.   vma->vm_start, vma->vm_end - vma->vm_start);
  251. atomic_inc(&dev->vma_count);
  252. vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
  253. if (vma_entry) {
  254. down(&dev->struct_sem);
  255. vma_entry->vma = vma;
  256. vma_entry->next = dev->vmalist;
  257. vma_entry->pid = current->pid;
  258. dev->vmalist = vma_entry;
  259. up(&dev->struct_sem);
  260. }
  261. }
  262. void DRM(vm_close)(struct vm_area_struct *vma)
  263. {
  264. drm_file_t *priv = vma->vm_file->private_data;
  265. drm_device_t *dev = priv->dev;
  266. drm_vma_entry_t *pt, *prev;
  267. DRM_DEBUG("0x%08lx,0x%08lxn",
  268.   vma->vm_start, vma->vm_end - vma->vm_start);
  269. atomic_dec(&dev->vma_count);
  270. down(&dev->struct_sem);
  271. for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
  272. if (pt->vma == vma) {
  273. if (prev) {
  274. prev->next = pt->next;
  275. } else {
  276. dev->vmalist = pt->next;
  277. }
  278. DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
  279. break;
  280. }
  281. }
  282. up(&dev->struct_sem);
  283. }
  284. int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
  285. {
  286. drm_file_t  *priv  = filp->private_data;
  287. drm_device_t  *dev;
  288. drm_device_dma_t *dma;
  289. unsigned long  length  = vma->vm_end - vma->vm_start;
  290. lock_kernel();
  291. dev  = priv->dev;
  292. dma  = dev->dma;
  293. DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lxn",
  294.   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
  295. /* Length must match exact page count */
  296. if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
  297. unlock_kernel();
  298. return -EINVAL;
  299. }
  300. unlock_kernel();
  301. vma->vm_ops   = &DRM(vm_dma_ops);
  302. vma->vm_flags |= VM_RESERVED; /* Don't swap */
  303. vma->vm_file  =  filp; /* Needed for drm_vm_open() */
  304. DRM(vm_open)(vma);
  305. return 0;
  306. }
  307. #ifndef DRIVER_GET_MAP_OFS
  308. #define DRIVER_GET_MAP_OFS() (map->offset)
  309. #endif
  310. #ifndef DRIVER_GET_REG_OFS
  311. #ifdef __alpha__
  312. #define DRIVER_GET_REG_OFS() (dev->hose->dense_mem_base -
  313.  dev->hose->mem_space->start)
  314. #else
  315. #define DRIVER_GET_REG_OFS() 0
  316. #endif
  317. #endif
  318. int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
  319. {
  320. drm_file_t *priv = filp->private_data;
  321. drm_device_t *dev = priv->dev;
  322. drm_map_t *map = NULL;
  323. drm_map_list_t  *r_list;
  324. unsigned long   offset  = 0;
  325. struct list_head *list;
  326. DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lxn",
  327.   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
  328. if ( !priv->authenticated ) return -EACCES;
  329. if (!VM_OFFSET(vma)) return DRM(mmap_dma)(filp, vma);
  330. /* A sequential search of a linked list is
  331.    fine here because: 1) there will only be
  332.    about 5-10 entries in the list and, 2) a
  333.    DRI client only has to do this mapping
  334.    once, so it doesn't have to be optimized
  335.    for performance, even if the list was a
  336.    bit longer. */
  337. list_for_each(list, &dev->maplist->head) {
  338. unsigned long off;
  339. r_list = (drm_map_list_t *)list;
  340. map = r_list->map;
  341. if (!map) continue;
  342. off = DRIVER_GET_MAP_OFS();
  343. if (off == VM_OFFSET(vma)) break;
  344. }
  345. if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
  346. return -EPERM;
  347. /* Check for valid size. */
  348. if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
  349. if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
  350. vma->vm_flags &= VM_MAYWRITE;
  351. #if defined(__i386__)
  352. pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
  353. #else
  354. /* Ye gads this is ugly.  With more thought
  355.                                    we could move this up higher and use
  356.                                    `protection_map' instead.  */
  357. vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
  358. __pte(pgprot_val(vma->vm_page_prot)))));
  359. #endif
  360. }
  361. switch (map->type) {
  362.         case _DRM_AGP:
  363. #if defined(__alpha__)
  364.                 /*
  365.                  * On Alpha we can't talk to bus dma address from the
  366.                  * CPU, so for memory of type DRM_AGP, we'll deal with
  367.                  * sorting out the real physical pages and mappings
  368.                  * in nopage()
  369.                  */
  370.                 vma->vm_ops = &DRM(vm_ops);
  371.                 break;
  372. #endif
  373.                 /* fall through to _DRM_FRAME_BUFFER... */        
  374. case _DRM_FRAME_BUFFER:
  375. case _DRM_REGISTERS:
  376. if (VM_OFFSET(vma) >= __pa(high_memory)) {
  377. #if defined(__i386__)
  378. if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
  379. pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
  380. pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
  381. }
  382. #elif defined(__ia64__)
  383. if (map->type != _DRM_AGP)
  384. vma->vm_page_prot =
  385. pgprot_writecombine(vma->vm_page_prot);
  386. #elif defined(__powerpc__)
  387. pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
  388. #endif
  389. vma->vm_flags |= VM_IO; /* not in core dump */
  390. }
  391. offset = DRIVER_GET_REG_OFS();
  392. #ifdef __sparc__
  393. if (io_remap_page_range(vma->vm_start,
  394. VM_OFFSET(vma) + offset,
  395. vma->vm_end - vma->vm_start,
  396. vma->vm_page_prot, 0))
  397. #else
  398. if (remap_page_range(vma->vm_start,
  399.      VM_OFFSET(vma) + offset,
  400.      vma->vm_end - vma->vm_start,
  401.      vma->vm_page_prot))
  402. #endif
  403. return -EAGAIN;
  404. DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
  405.   " offset = 0x%lxn",
  406.   map->type,
  407.   vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset);
  408. vma->vm_ops = &DRM(vm_ops);
  409. break;
  410. case _DRM_SHM:
  411. vma->vm_ops = &DRM(vm_shm_ops);
  412. vma->vm_private_data = (void *)map;
  413. /* Don't let this area swap.  Change when
  414.    DRM_KERNEL advisory is supported. */
  415. break;
  416. case _DRM_SCATTER_GATHER:
  417. vma->vm_ops = &DRM(vm_sg_ops);
  418. vma->vm_private_data = (void *)map;
  419.                 break;
  420. default:
  421. return -EINVAL; /* This should never happen. */
  422. }
  423. vma->vm_flags |= VM_RESERVED; /* Don't swap */
  424. vma->vm_file  =  filp; /* Needed for drm_vm_open() */
  425. DRM(vm_open)(vma);
  426. return 0;
  427. }