raw1394.c
上传用户:jlfgdled
上传日期:2013-04-10
资源大小:33168k
文件大小:34k
源码类别:

Linux/Unix编程

开发平台:

Unix_Linux

  1. /*
  2.  * IEEE 1394 for Linux
  3.  *
  4.  * Raw interface to the bus
  5.  *
  6.  * Copyright (C) 1999, 2000 Andreas E. Bombe
  7.  *
  8.  * This code is licensed under the GPL.  See the file COPYING in the root
  9.  * directory of the kernel sources for details.
  10.  */
  11. #include <linux/kernel.h>
  12. #include <linux/list.h>
  13. #include <linux/string.h>
  14. #include <linux/slab.h>
  15. #include <linux/fs.h>
  16. #include <linux/poll.h>
  17. #include <linux/module.h>
  18. #include <linux/init.h>
  19. #include <linux/version.h>
  20. #include <linux/smp_lock.h>
  21. #include <asm/uaccess.h>
  22. #include <asm/atomic.h>
  23. #include <linux/devfs_fs_kernel.h>
  24. #include "ieee1394.h"
  25. #include "ieee1394_types.h"
  26. #include "ieee1394_core.h"
  27. #include "hosts.h"
  28. #include "highlevel.h"
  29. #include "ieee1394_transactions.h"
  30. #include "raw1394.h"
  31. #if BITS_PER_LONG == 64
  32. #define int2ptr(x) ((void *)x)
  33. #define ptr2int(x) ((u64)x)
  34. #else
  35. #define int2ptr(x) ((void *)(u32)x)
  36. #define ptr2int(x) ((u64)(u32)x)
  37. #endif
  38. static devfs_handle_t devfs_handle;
  39. static LIST_HEAD(host_info_list);
  40. static int host_count;
  41. static spinlock_t host_info_lock = SPIN_LOCK_UNLOCKED;
  42. static atomic_t internal_generation = ATOMIC_INIT(0);
  43. static struct hpsb_highlevel *hl_handle;
  44. static atomic_t iso_buffer_size;
  45. static const int iso_buffer_max = 4 * 1024 * 1024; /* 4 MB */
  46. static void queue_complete_cb(struct pending_request *req);
  47. static struct pending_request *__alloc_pending_request(int flags)
  48. {
  49.         struct pending_request *req;
  50.         req = (struct pending_request *)kmalloc(sizeof(struct pending_request),
  51.                                                 flags);
  52.         if (req != NULL) {
  53.                 memset(req, 0, sizeof(struct pending_request));
  54.                 INIT_LIST_HEAD(&req->list);
  55. INIT_TQUEUE(&req->tq, (void(*)(void*))queue_complete_cb, NULL);
  56.         }
  57.         return req;
  58. }
  59. static inline struct pending_request *alloc_pending_request(void)
  60. {
  61.         return __alloc_pending_request(SLAB_KERNEL);
  62. }
  63. static void free_pending_request(struct pending_request *req)
  64. {
  65.         if (req->ibs) {
  66.                 if (atomic_dec_and_test(&req->ibs->refcount)) {
  67.                         atomic_sub(req->ibs->data_size, &iso_buffer_size);
  68.                         kfree(req->ibs);
  69.                 }
  70.         } else if (req->free_data) {
  71.                 kfree(req->data);
  72.         }
  73.         free_hpsb_packet(req->packet);
  74.         kfree(req);
  75. }
  76. static void queue_complete_req(struct pending_request *req)
  77. {
  78.         unsigned long flags;
  79.         struct file_info *fi = req->file_info;
  80.         spin_lock_irqsave(&fi->reqlists_lock, flags);
  81.         list_del(&req->list);
  82.         list_add_tail(&req->list, &fi->req_complete);
  83.         spin_unlock_irqrestore(&fi->reqlists_lock, flags);
  84.         up(&fi->complete_sem);
  85.         wake_up_interruptible(&fi->poll_wait_complete);
  86. }
  87. static void queue_complete_cb(struct pending_request *req)
  88. {
  89.         struct hpsb_packet *packet = req->packet;
  90.         int rcode = (packet->header[1] >> 12) & 0xf;
  91.         switch (packet->ack_code) {
  92.         case ACKX_NONE:
  93.         case ACKX_SEND_ERROR:
  94.                 req->req.error = RAW1394_ERROR_SEND_ERROR;
  95.                 break;
  96.         case ACKX_ABORTED:
  97.                 req->req.error = RAW1394_ERROR_ABORTED;
  98.                 break;
  99.         case ACKX_TIMEOUT:
  100.                 req->req.error = RAW1394_ERROR_TIMEOUT;
  101.                 break;
  102.         default:
  103.                 req->req.error = (packet->ack_code << 16) | rcode;
  104.                 break;
  105.         }
  106.         if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) {
  107.                 req->req.length = 0;
  108.         }
  109.         free_tlabel(packet->host, packet->node_id, packet->tlabel);
  110.         queue_complete_req(req);
  111. }
  112. static void add_host(struct hpsb_host *host)
  113. {
  114.         struct host_info *hi;
  115.         hi = (struct host_info *)kmalloc(sizeof(struct host_info), SLAB_KERNEL);
  116.         if (hi != NULL) {
  117.                 INIT_LIST_HEAD(&hi->list);
  118.                 hi->host = host;
  119.                 INIT_LIST_HEAD(&hi->file_info_list);
  120.                 spin_lock_irq(&host_info_lock);
  121.                 list_add_tail(&hi->list, &host_info_list);
  122.                 host_count++;
  123.                 spin_unlock_irq(&host_info_lock);
  124.         }
  125.         atomic_inc(&internal_generation);
  126. }
  127. static struct host_info *find_host_info(struct hpsb_host *host)
  128. {
  129.         struct list_head *lh;
  130.         struct host_info *hi;
  131.         list_for_each(lh, &host_info_list) {
  132.                 hi = list_entry(lh, struct host_info, list);
  133.                 if (hi->host == host) {
  134.                         return hi;
  135.                 }
  136.         }
  137.         return NULL;
  138. }
  139. static void remove_host(struct hpsb_host *host)
  140. {
  141.         struct host_info *hi;
  142.         spin_lock_irq(&host_info_lock);
  143.         hi = find_host_info(host);
  144.         if (hi != NULL) {
  145.                 list_del(&hi->list);
  146.                 host_count--;
  147.         }
  148.         spin_unlock_irq(&host_info_lock);
  149.         if (hi == NULL) {
  150.                 printk(KERN_ERR "raw1394: attempt to remove unknown host "
  151.                        "0x%pn", host);
  152.                 return;
  153.         }
  154.         kfree(hi);
  155.         atomic_inc(&internal_generation);
  156. }
  157. static void host_reset(struct hpsb_host *host)
  158. {
  159.         unsigned long flags;
  160.         struct list_head *lh;
  161.         struct host_info *hi;
  162.         struct file_info *fi;
  163.         struct pending_request *req;
  164.         spin_lock_irqsave(&host_info_lock, flags);
  165.         hi = find_host_info(host);
  166.         if (hi != NULL) {
  167.                 list_for_each(lh, &hi->file_info_list) {
  168.                         fi = list_entry(lh, struct file_info, list);
  169.                         req = __alloc_pending_request(SLAB_ATOMIC);
  170.                         if (req != NULL) {
  171.                                 req->file_info = fi;
  172.                                 req->req.type = RAW1394_REQ_BUS_RESET;
  173.                                 req->req.generation = get_hpsb_generation(host);
  174.                                 req->req.misc = (host->node_id << 16)
  175.                                         | host->node_count;
  176.                                 if (fi->protocol_version > 3) {
  177.                                         req->req.misc |= ((host->irm_id
  178.                                                            & NODE_MASK) << 8);
  179.                                 }
  180.                                 queue_complete_req(req);
  181.                         }
  182.                 }
  183.         }
  184.         spin_unlock_irqrestore(&host_info_lock, flags);
  185. }
  186. static void iso_receive(struct hpsb_host *host, int channel, quadlet_t *data,
  187.                         unsigned int length)
  188. {
  189.         unsigned long flags;
  190.         struct list_head *lh;
  191.         struct host_info *hi;
  192.         struct file_info *fi;
  193.         struct pending_request *req;
  194.         struct iso_block_store *ibs = NULL;
  195.         LIST_HEAD(reqs);
  196.         if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
  197.                 HPSB_INFO("dropped iso packet");
  198.                 return;
  199.         }
  200.         spin_lock_irqsave(&host_info_lock, flags);
  201.         hi = find_host_info(host);
  202.         if (hi != NULL) {
  203. list_for_each(lh, &hi->file_info_list) {
  204.                         fi = list_entry(lh, struct file_info, list);
  205.                         if (!(fi->listen_channels & (1ULL << channel))) {
  206.                                 continue;
  207.                         }
  208.                         req = __alloc_pending_request(SLAB_ATOMIC);
  209.                         if (!req) break;
  210.                         if (!ibs) {
  211.                                 ibs = kmalloc(sizeof(struct iso_block_store)
  212.                                               + length, SLAB_ATOMIC);
  213.                                 if (!ibs) {
  214.                                         kfree(req);
  215.                                         break;
  216.                                 }
  217.                                 atomic_add(length, &iso_buffer_size);
  218.                                 atomic_set(&ibs->refcount, 0);
  219.                                 ibs->data_size = length;
  220.                                 memcpy(ibs->data, data, length);
  221.                         }
  222.                         atomic_inc(&ibs->refcount);
  223.                         req->file_info = fi;
  224.                         req->ibs = ibs;
  225.                         req->data = ibs->data;
  226.                         req->req.type = RAW1394_REQ_ISO_RECEIVE;
  227.                         req->req.generation = get_hpsb_generation(host);
  228.                         req->req.misc = 0;
  229.                         req->req.recvb = ptr2int(fi->iso_buffer);
  230.                         req->req.length = MIN(length, fi->iso_buffer_length);
  231.                         
  232.                         list_add_tail(&req->list, &reqs);
  233.                 }
  234.         }
  235.         spin_unlock_irqrestore(&host_info_lock, flags);
  236.         lh = reqs.next;
  237.         while (lh != &reqs) {
  238.                 req = list_entry(lh, struct pending_request, list);
  239.                 lh = lh->next;
  240.                 queue_complete_req(req);
  241.         }
  242. }
  243. static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
  244.                         int cts, u8 *data, unsigned int length)
  245. {
  246.         unsigned long flags;
  247.         struct list_head *lh;
  248.         struct host_info *hi;
  249.         struct file_info *fi;
  250.         struct pending_request *req;
  251.         struct iso_block_store *ibs = NULL;
  252.         LIST_HEAD(reqs);
  253.         if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
  254.                 HPSB_INFO("dropped fcp request");
  255.                 return;
  256.         }
  257.         spin_lock_irqsave(&host_info_lock, flags);
  258.         hi = find_host_info(host);
  259.         if (hi != NULL) {
  260. list_for_each(lh, &hi->file_info_list) {
  261.                         fi = list_entry(lh, struct file_info, list);
  262.                         if (!fi->fcp_buffer) {
  263.                                 continue;
  264.                         }
  265.                         req = __alloc_pending_request(SLAB_ATOMIC);
  266.                         if (!req) break;
  267.                         if (!ibs) {
  268.                                 ibs = kmalloc(sizeof(struct iso_block_store)
  269.                                               + length, SLAB_ATOMIC);
  270.                                 if (!ibs) {
  271.                                         kfree(req);
  272.                                         break;
  273.                                 }
  274.                                 atomic_add(length, &iso_buffer_size);
  275.                                 atomic_set(&ibs->refcount, 0);
  276.                                 ibs->data_size = length;
  277.                                 memcpy(ibs->data, data, length);
  278.                         }
  279.                         atomic_inc(&ibs->refcount);
  280.                         req->file_info = fi;
  281.                         req->ibs = ibs;
  282.                         req->data = ibs->data;
  283.                         req->req.type = RAW1394_REQ_FCP_REQUEST;
  284.                         req->req.generation = get_hpsb_generation(host);
  285.                         req->req.misc = nodeid | (direction << 16);
  286.                         req->req.recvb = ptr2int(fi->fcp_buffer);
  287.                         req->req.length = length;
  288.                         
  289.                         list_add_tail(&req->list, &reqs);
  290.                 }
  291.         }
  292.         spin_unlock_irqrestore(&host_info_lock, flags);
  293.         lh = reqs.next;
  294.         while (lh != &reqs) {
  295.                 req = list_entry(lh, struct pending_request, list);
  296.                 lh = lh->next;
  297.                 queue_complete_req(req);
  298.         }
  299. }
  300. static ssize_t raw1394_read(struct file *file, char *buffer, size_t count,
  301.                     loff_t *offset_is_ignored)
  302. {
  303.         struct file_info *fi = (struct file_info *)file->private_data;
  304.         struct list_head *lh;
  305.         struct pending_request *req;
  306.         if (count != sizeof(struct raw1394_request)) {
  307.                 return -EINVAL;
  308.         }
  309.         if (!access_ok(VERIFY_WRITE, buffer, count)) {
  310.                 return -EFAULT;
  311.         }
  312.         if (file->f_flags & O_NONBLOCK) {
  313.                 if (down_trylock(&fi->complete_sem)) {
  314.                         return -EAGAIN;
  315.                 }
  316.         } else {
  317.                 if (down_interruptible(&fi->complete_sem)) {
  318.                         return -ERESTARTSYS;
  319.                 }
  320.         }
  321.         spin_lock_irq(&fi->reqlists_lock);
  322.         lh = fi->req_complete.next;
  323.         list_del(lh);
  324.         spin_unlock_irq(&fi->reqlists_lock);
  325.         req = list_entry(lh, struct pending_request, list);
  326.         if (req->req.length) {
  327.                 if (copy_to_user(int2ptr(req->req.recvb), req->data,
  328.                                  req->req.length)) {
  329.                         req->req.error = RAW1394_ERROR_MEMFAULT;
  330.                 }
  331.         }
  332.         __copy_to_user(buffer, &req->req, sizeof(req->req));
  333.         free_pending_request(req);
  334.         return sizeof(struct raw1394_request);
  335. }
  336. static int state_opened(struct file_info *fi, struct pending_request *req)
  337. {
  338.         if (req->req.type == RAW1394_REQ_INITIALIZE) {
  339.                 switch (req->req.misc) {
  340.                 case RAW1394_KERNELAPI_VERSION:
  341.                 case 3:
  342.                         fi->state = initialized;
  343.                         fi->protocol_version = req->req.misc;
  344.                         req->req.error = RAW1394_ERROR_NONE;
  345.                         req->req.generation = atomic_read(&internal_generation);
  346.                         break;
  347.                 default:
  348.                         req->req.error = RAW1394_ERROR_COMPAT;
  349.                         req->req.misc = RAW1394_KERNELAPI_VERSION;
  350.                 }
  351.         } else {
  352.                 req->req.error = RAW1394_ERROR_STATE_ORDER;
  353.         }
  354.         req->req.length = 0;
  355.         queue_complete_req(req);
  356.         return sizeof(struct raw1394_request);
  357. }
  358. static int state_initialized(struct file_info *fi, struct pending_request *req)
  359. {
  360.         struct list_head *lh;
  361.         struct host_info *hi;
  362.         struct raw1394_khost_list *khl;
  363.         if (req->req.generation != atomic_read(&internal_generation)) {
  364.                 req->req.error = RAW1394_ERROR_GENERATION;
  365.                 req->req.generation = atomic_read(&internal_generation);
  366.                 req->req.length = 0;
  367.                 queue_complete_req(req);
  368.                 return sizeof(struct raw1394_request);
  369.         }
  370.         switch (req->req.type) {
  371.         case RAW1394_REQ_LIST_CARDS:
  372.                 spin_lock_irq(&host_info_lock);
  373.                 khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count,
  374.                               SLAB_ATOMIC);
  375.                 if (khl != NULL) {
  376.                         req->req.misc = host_count;
  377.                         req->data = (quadlet_t *)khl;
  378.                         
  379.                         list_for_each(lh, &host_info_list) {
  380.                                 hi = list_entry(lh, struct host_info, list);
  381.                                 khl->nodes = hi->host->node_count;
  382.                                 strcpy(khl->name, hi->host->driver->name);
  383.                                 khl++;
  384.                         }
  385.                 }
  386.                 spin_unlock_irq(&host_info_lock);
  387.                 if (khl != NULL) {
  388.                         req->req.error = RAW1394_ERROR_NONE;
  389.                         req->req.length = MIN(req->req.length,
  390.                                               sizeof(struct raw1394_khost_list)
  391.                                               * req->req.misc);
  392.                         req->free_data = 1;
  393.                 } else {
  394.                         return -ENOMEM;
  395.                 }
  396.                 break;
  397.         case RAW1394_REQ_SET_CARD:
  398.                 lh = NULL;
  399.                 spin_lock_irq(&host_info_lock);
  400.                 if (req->req.misc < host_count) {
  401.                         lh = host_info_list.next;
  402.                         while (req->req.misc--) {
  403.                                 lh = lh->next;
  404.                         }
  405.                         hi = list_entry(lh, struct host_info, list);
  406.                         hpsb_ref_host(hi->host);
  407.                         list_add_tail(&fi->list, &hi->file_info_list);
  408.                         fi->host = hi->host;
  409.                         fi->state = connected;
  410.                 }
  411.                 spin_unlock_irq(&host_info_lock);
  412.                 if (lh != NULL) {
  413.                         req->req.error = RAW1394_ERROR_NONE;
  414.                         req->req.generation = get_hpsb_generation(fi->host);
  415.                         req->req.misc = (fi->host->node_id << 16) 
  416.                                 | fi->host->node_count;
  417.                         if (fi->protocol_version > 3) {
  418.                                 req->req.misc |=
  419.                                         (fi->host->irm_id & NODE_MASK) << 8;
  420.                         }
  421.                 } else {
  422.                         req->req.error = RAW1394_ERROR_INVALID_ARG;
  423.                 }
  424.                 req->req.length = 0;
  425.                 break;
  426.         default:
  427.                 req->req.error = RAW1394_ERROR_STATE_ORDER;
  428.                 req->req.length = 0;
  429.                 break;
  430.         }
  431.         queue_complete_req(req);
  432.         return sizeof(struct raw1394_request);
  433. }
  434. static void handle_iso_listen(struct file_info *fi, struct pending_request *req)
  435. {
  436.         int channel = req->req.misc;
  437.         spin_lock(&host_info_lock);
  438.         if ((channel > 63) || (channel < -64)) {
  439.                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  440.         } else if (channel >= 0) {
  441.                 /* allocate channel req.misc */
  442.                 if (fi->listen_channels & (1ULL << channel)) {
  443.                         req->req.error = RAW1394_ERROR_ALREADY;
  444.                 } else {
  445.                         fi->listen_channels |= 1ULL << channel;
  446.                         hpsb_listen_channel(hl_handle, fi->host, channel);
  447.                         fi->iso_buffer = int2ptr(req->req.recvb);
  448.                         fi->iso_buffer_length = req->req.length;
  449.                 }
  450.         } else {
  451.                 /* deallocate channel (one's complement neg) req.misc */
  452.                 channel = ~channel;
  453.                 if (fi->listen_channels & (1ULL << channel)) {
  454.                         hpsb_unlisten_channel(hl_handle, fi->host, channel);
  455.                         fi->listen_channels &= ~(1ULL << channel);
  456.                 } else {
  457.                         req->req.error = RAW1394_ERROR_INVALID_ARG;
  458.                 }
  459.         }
  460.         req->req.length = 0;
  461.         queue_complete_req(req);
  462.         spin_unlock(&host_info_lock);
  463. }
  464. static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
  465. {
  466.         if (req->req.misc) {
  467.                 if (fi->fcp_buffer) {
  468.                         req->req.error = RAW1394_ERROR_ALREADY;
  469.                 } else {
  470.                         fi->fcp_buffer = (u8 *)int2ptr(req->req.recvb);
  471.                 }
  472.         } else {
  473.                 if (!fi->fcp_buffer) {
  474.                         req->req.error = RAW1394_ERROR_ALREADY;
  475.                 } else {
  476.                         fi->fcp_buffer = NULL;
  477.                 }
  478.         }
  479.         req->req.length = 0;
  480.         queue_complete_req(req);
  481. }
  482. static int handle_local_request(struct file_info *fi,
  483.                                 struct pending_request *req, int node)
  484. {
  485.         u64 addr = req->req.address & 0xffffffffffffULL;
  486.         req->data = kmalloc(req->req.length, SLAB_KERNEL);
  487.         if (!req->data) return -ENOMEM;
  488.         req->free_data = 1;
  489.         switch (req->req.type) {
  490.         case RAW1394_REQ_ASYNC_READ:
  491.                 req->req.error = highlevel_read(fi->host, node, req->data, addr,
  492.                                                 req->req.length);
  493.                 break;
  494.         case RAW1394_REQ_ASYNC_WRITE:
  495.                 if (copy_from_user(req->data, int2ptr(req->req.sendb),
  496.                                    req->req.length)) {
  497.                         req->req.error = RAW1394_ERROR_MEMFAULT;
  498.                         break;
  499.                 }
  500.                 req->req.error = highlevel_write(fi->host, node, node, req->data,
  501.                                                  addr, req->req.length);
  502.                 req->req.length = 0;
  503.                 break;
  504.         case RAW1394_REQ_LOCK:
  505.                 if ((req->req.misc == EXTCODE_FETCH_ADD)
  506.                     || (req->req.misc == EXTCODE_LITTLE_ADD)) {
  507.                         if (req->req.length != 4) {
  508.                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  509.                                 break;
  510.                         }
  511.                 } else {
  512.                         if (req->req.length != 8) {
  513.                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  514.                                 break;
  515.                         }
  516.                 }
  517.                 if (copy_from_user(req->data, int2ptr(req->req.sendb),
  518.                                    req->req.length)) {
  519.                         req->req.error = RAW1394_ERROR_MEMFAULT;
  520.                         break;
  521.                 }
  522.                 if (req->req.length == 8) {
  523.                         req->req.error = highlevel_lock(fi->host, node,
  524.                                                         req->data, addr,
  525.                                                         req->data[1],
  526.                                                         req->data[0],
  527.                                                         req->req.misc);
  528.                         req->req.length = 4;
  529.                 } else {
  530.                         req->req.error = highlevel_lock(fi->host, node,
  531.                                                         req->data, addr,
  532.                                                         req->data[0], 0,
  533.                                                         req->req.misc);
  534.                 }
  535.                 break;
  536.         case RAW1394_REQ_LOCK64:
  537.         default:
  538.                 req->req.error = RAW1394_ERROR_STATE_ORDER;
  539.         }
  540.         if (req->req.error)
  541.                 req->req.length = 0;
  542.         if (req->req.error >= 0)
  543.                 req->req.error |= ACK_PENDING << 16;
  544.         queue_complete_req(req);
  545.         return sizeof(struct raw1394_request);
  546. }
  547. static int handle_remote_request(struct file_info *fi,
  548.                                  struct pending_request *req, int node)
  549. {
  550.         struct hpsb_packet *packet = NULL;
  551.         u64 addr = req->req.address & 0xffffffffffffULL;
  552.         switch (req->req.type) {
  553.         case RAW1394_REQ_ASYNC_READ:
  554.                 if (req->req.length == 4) {
  555.                         packet = hpsb_make_readqpacket(fi->host, node, addr);
  556.                         if (!packet) return -ENOMEM;
  557.                         req->data = &packet->header[3];
  558.                 } else {
  559.                         packet = hpsb_make_readbpacket(fi->host, node, addr,
  560.                                                        req->req.length);
  561.                         if (!packet) return -ENOMEM;
  562.                         req->data = packet->data;
  563.                 }
  564.                 break;
  565.         case RAW1394_REQ_ASYNC_WRITE:
  566.                 if (req->req.length == 4) {
  567.                         quadlet_t x;
  568.                         if (copy_from_user(&x, int2ptr(req->req.sendb), 4)) {
  569.                                 req->req.error = RAW1394_ERROR_MEMFAULT;
  570.                         }
  571.                         packet = hpsb_make_writeqpacket(fi->host, node, addr,
  572.                                                         x);
  573.                         if (!packet) return -ENOMEM;
  574.                 } else {
  575.                         packet = hpsb_make_writebpacket(fi->host, node, addr,
  576.                                                         req->req.length);
  577.                         if (!packet) return -ENOMEM;
  578.                         if (copy_from_user(packet->data, int2ptr(req->req.sendb),
  579.                                            req->req.length)) {
  580.                                 req->req.error = RAW1394_ERROR_MEMFAULT;
  581.                         }
  582.                 }
  583.                 req->req.length = 0;
  584.                 break;
  585.         case RAW1394_REQ_LOCK:
  586.                 if ((req->req.misc == EXTCODE_FETCH_ADD)
  587.                     || (req->req.misc == EXTCODE_LITTLE_ADD)) {
  588.                         if (req->req.length != 4) {
  589.                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  590.                                 break;
  591.                         }
  592.                 } else {
  593.                         if (req->req.length != 8) {
  594.                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  595.                                 break;
  596.                         }
  597.                 }
  598.                 packet = hpsb_make_lockpacket(fi->host, node, addr,
  599.                                               req->req.misc);
  600.                 if (!packet) return -ENOMEM;
  601.                 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
  602.                                    req->req.length)) {
  603.                         req->req.error = RAW1394_ERROR_MEMFAULT;
  604.                         break;
  605.                 }
  606.                 req->data = packet->data;
  607.                 req->req.length = 4;
  608.                 break;
  609.         case RAW1394_REQ_LOCK64:
  610.         default:
  611.                 req->req.error = RAW1394_ERROR_STATE_ORDER;
  612.         }
  613.         req->packet = packet;
  614.         if (req->req.error) {
  615.                 req->req.length = 0;
  616.                 queue_complete_req(req);
  617.                 return sizeof(struct raw1394_request);
  618.         }
  619.         req->tq.data = req;
  620.         hpsb_add_packet_complete_task(packet, &req->tq);
  621.         spin_lock_irq(&fi->reqlists_lock);
  622.         list_add_tail(&req->list, &fi->req_pending);
  623.         spin_unlock_irq(&fi->reqlists_lock);
  624. packet->generation = req->req.generation;
  625.         if (!hpsb_send_packet(packet)) {
  626.                 req->req.error = RAW1394_ERROR_SEND_ERROR;
  627.                 req->req.length = 0;
  628.                 free_tlabel(packet->host, packet->node_id, packet->tlabel);
  629.                 queue_complete_req(req);
  630.         }
  631.         return sizeof(struct raw1394_request);
  632. }
  633. static int handle_iso_send(struct file_info *fi, struct pending_request *req,
  634.                            int channel)
  635. {
  636.         struct hpsb_packet *packet;
  637.         packet = alloc_hpsb_packet(req->req.length);
  638.         if (!packet) return -ENOMEM;
  639.         req->packet = packet;
  640.         fill_iso_packet(packet, req->req.length, channel & 0x3f,
  641.                         (req->req.misc >> 16) & 0x3, req->req.misc & 0xf);
  642.         packet->type = hpsb_iso;
  643.         packet->speed_code = req->req.address & 0x3;
  644.         packet->host = fi->host;
  645.         if (copy_from_user(packet->data, int2ptr(req->req.sendb),
  646.                            req->req.length)) {
  647.                 req->req.error = RAW1394_ERROR_MEMFAULT;
  648.                 req->req.length = 0;
  649.                 queue_complete_req(req);
  650.                 return sizeof(struct raw1394_request);
  651.         }
  652.         req->tq.data = req;
  653.         req->tq.routine = (void (*)(void*))queue_complete_req;
  654.         req->req.length = 0;
  655. hpsb_add_packet_complete_task(packet, &req->tq);
  656.         spin_lock_irq(&fi->reqlists_lock);
  657.         list_add_tail(&req->list, &fi->req_pending);
  658.         spin_unlock_irq(&fi->reqlists_lock);
  659. /* Update the generation of the packet just before sending. */
  660. packet->generation = get_hpsb_generation(fi->host);
  661.         if (!hpsb_send_packet(packet)) {
  662.                 req->req.error = RAW1394_ERROR_SEND_ERROR;
  663.                 queue_complete_req(req);
  664.         }
  665.         return sizeof(struct raw1394_request);
  666. }
  667. static int state_connected(struct file_info *fi, struct pending_request *req)
  668. {
  669.         int node = req->req.address >> 48;
  670.         req->req.error = RAW1394_ERROR_NONE;
  671.         if (req->req.type ==  RAW1394_REQ_ISO_SEND) {
  672.                 return handle_iso_send(fi, req, node);
  673.         }
  674.         if (req->req.generation != get_hpsb_generation(fi->host)) {
  675.                 req->req.error = RAW1394_ERROR_GENERATION;
  676.                 req->req.generation = get_hpsb_generation(fi->host);
  677.                 req->req.length = 0;
  678.                 queue_complete_req(req);
  679.                 return sizeof(struct raw1394_request);
  680.         }
  681.         switch (req->req.type) {
  682.         case RAW1394_REQ_ISO_LISTEN:
  683.                 handle_iso_listen(fi, req);
  684.                 return sizeof(struct raw1394_request);
  685.         case RAW1394_REQ_FCP_LISTEN:
  686.                 handle_fcp_listen(fi, req);
  687.                 return sizeof(struct raw1394_request);
  688.         case RAW1394_REQ_RESET_BUS:
  689.                 hpsb_reset_bus(fi->host, LONG_RESET);
  690.                 return sizeof(struct raw1394_request);
  691.         }
  692.         if (req->req.length == 0) {
  693.                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  694.                 queue_complete_req(req);
  695.                 return sizeof(struct raw1394_request);
  696.         }
  697.         if (fi->host->node_id == node) {
  698.                 return handle_local_request(fi, req, node);
  699.         }
  700.         return handle_remote_request(fi, req, node);
  701. }
  702. static ssize_t raw1394_write(struct file *file, const char *buffer, size_t count,
  703.                      loff_t *offset_is_ignored)
  704. {
  705.         struct file_info *fi = (struct file_info *)file->private_data;
  706.         struct pending_request *req;
  707.         ssize_t retval = 0;
  708.         if (count != sizeof(struct raw1394_request)) {
  709.                 return -EINVAL;
  710.         }
  711.         req = alloc_pending_request();
  712.         if (req == NULL) {
  713.                 return -ENOMEM;
  714.         }
  715.         req->file_info = fi;
  716.         if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) {
  717.                 free_pending_request(req);
  718.                 return -EFAULT;
  719.         }
  720.         switch (fi->state) {
  721.         case opened:
  722.                 retval = state_opened(fi, req);
  723.                 break;
  724.         case initialized:
  725.                 retval = state_initialized(fi, req);
  726.                 break;
  727.         case connected:
  728.                 retval = state_connected(fi, req);
  729.                 break;
  730.         }
  731.         if (retval < 0) {
  732.                 free_pending_request(req);
  733.         }
  734.         return retval;
  735. }
  736. static unsigned int raw1394_poll(struct file *file, poll_table *pt)
  737. {
  738.         struct file_info *fi = file->private_data;
  739.         unsigned int mask = POLLOUT | POLLWRNORM;
  740.         poll_wait(file, &fi->poll_wait_complete, pt);
  741.         spin_lock_irq(&fi->reqlists_lock);
  742.         if (!list_empty(&fi->req_complete)) {
  743.                 mask |= POLLIN | POLLRDNORM;
  744.         }
  745.         spin_unlock_irq(&fi->reqlists_lock);
  746.         return mask;
  747. }
  748. static int raw1394_open(struct inode *inode, struct file *file)
  749. {
  750.         struct file_info *fi;
  751.         if (ieee1394_file_to_instance(file) > 0) {
  752.                 return -ENXIO;
  753.         }
  754.         fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL);
  755.         if (fi == NULL)
  756.                 return -ENOMEM;
  757.         
  758.         memset(fi, 0, sizeof(struct file_info));
  759.         INIT_LIST_HEAD(&fi->list);
  760.         fi->state = opened;
  761.         INIT_LIST_HEAD(&fi->req_pending);
  762.         INIT_LIST_HEAD(&fi->req_complete);
  763.         sema_init(&fi->complete_sem, 0);
  764.         spin_lock_init(&fi->reqlists_lock);
  765.         init_waitqueue_head(&fi->poll_wait_complete);
  766.         file->private_data = fi;
  767.         return 0;
  768. }
  769. static int raw1394_release(struct inode *inode, struct file *file)
  770. {
  771.         struct file_info *fi = file->private_data;
  772.         struct list_head *lh;
  773.         struct pending_request *req;
  774.         int done = 0, i;
  775.         for (i = 0; i < 64; i++) {
  776.                 if (fi->listen_channels & (1ULL << i)) {
  777.                         hpsb_unlisten_channel(hl_handle, fi->host, i);
  778.                 }
  779.         }
  780.         spin_lock(&host_info_lock);
  781.         fi->listen_channels = 0;
  782.         spin_unlock(&host_info_lock);
  783.         while (!done) {
  784.                 spin_lock_irq(&fi->reqlists_lock);
  785.                 while (!list_empty(&fi->req_complete)) {
  786.                         lh = fi->req_complete.next;
  787.                         list_del(lh);
  788.                         req = list_entry(lh, struct pending_request, list);
  789.                         free_pending_request(req);
  790.                 }
  791.                 if (list_empty(&fi->req_pending)) done = 1;
  792.                 spin_unlock_irq(&fi->reqlists_lock);
  793.                 if (!done) down_interruptible(&fi->complete_sem);
  794.         }
  795.         if (fi->state == connected) {
  796.                 spin_lock_irq(&host_info_lock);
  797.                 list_del(&fi->list);
  798.                 spin_unlock_irq(&host_info_lock);
  799.                 hpsb_unref_host(fi->host);
  800.         }
  801.         kfree(fi);
  802.         return 0;
  803. }
  804. static struct hpsb_highlevel_ops hl_ops = {
  805.         .add_host =    add_host,
  806.         .remove_host = remove_host,
  807.         .host_reset =  host_reset,
  808.         .iso_receive = iso_receive,
  809.         .fcp_request = fcp_request,
  810. };
  811. static struct file_operations file_ops = {
  812. .owner = THIS_MODULE,
  813.         .read = raw1394_read, 
  814.         .write = raw1394_write, 
  815.         .poll = raw1394_poll, 
  816.         .open = raw1394_open, 
  817.         .release = raw1394_release, 
  818. };
  819. static int __init init_raw1394(void)
  820. {
  821.         hl_handle = hpsb_register_highlevel(RAW1394_DEVICE_NAME, &hl_ops);
  822.         if (hl_handle == NULL) {
  823.                 HPSB_ERR("raw1394 failed to register with ieee1394 highlevel");
  824.                 return -ENOMEM;
  825.         }
  826. devfs_handle = devfs_register(NULL,
  827.       RAW1394_DEVICE_NAME, DEVFS_FL_NONE,
  828.                                       IEEE1394_MAJOR,
  829.       IEEE1394_MINOR_BLOCK_RAW1394 * 16,
  830.                                       S_IFCHR | S_IRUSR | S_IWUSR, &file_ops,
  831.                                       NULL);
  832.         if (ieee1394_register_chardev(IEEE1394_MINOR_BLOCK_RAW1394,
  833.       THIS_MODULE, &file_ops)) {
  834.                 HPSB_ERR("raw1394 failed to register minor device block");
  835. devfs_unregister(devfs_handle);
  836. hpsb_unregister_highlevel(hl_handle);
  837.                 return -EBUSY;
  838.         }
  839. printk(KERN_INFO "raw1394: /dev/%s device initializedn", RAW1394_DEVICE_NAME);
  840.         return 0;
  841. }
  842. static void __exit cleanup_raw1394(void)
  843. {
  844.         ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_RAW1394);
  845. devfs_unregister(devfs_handle);
  846.         hpsb_unregister_highlevel(hl_handle);
  847. }
  848. module_init(init_raw1394);
  849. module_exit(cleanup_raw1394);
  850. MODULE_LICENSE("GPL");