raw1394.c
上传用户:lgb322
上传日期:2013-02-24
资源大小:30529k
文件大小:34k
源码类别:

嵌入式Linux

开发平台:

Unix_Linux

  1. /*
  2.  * IEEE 1394 for Linux
  3.  *
  4.  * Raw interface to the bus
  5.  *
  6.  * Copyright (C) 1999, 2000 Andreas E. Bombe
  7.  *
  8.  * This code is licensed under the GPL.  See the file COPYING in the root
  9.  * directory of the kernel sources for details.
  10.  */
  11. #include <linux/kernel.h>
  12. #include <linux/list.h>
  13. #include <linux/string.h>
  14. #include <linux/slab.h>
  15. #include <linux/fs.h>
  16. #include <linux/poll.h>
  17. #include <linux/module.h>
  18. #include <linux/init.h>
  19. #include <linux/version.h>
  20. #include <linux/smp_lock.h>
  21. #include <asm/uaccess.h>
  22. #include <asm/atomic.h>
  23. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
  24. #include <linux/devfs_fs_kernel.h>
  25. #endif
  26. #include "ieee1394.h"
  27. #include "ieee1394_types.h"
  28. #include "ieee1394_core.h"
  29. #include "hosts.h"
  30. #include "highlevel.h"
  31. #include "ieee1394_transactions.h"
  32. #include "raw1394.h"
  33. #if BITS_PER_LONG == 64
  34. #define int2ptr(x) ((void *)x)
  35. #define ptr2int(x) ((u64)x)
  36. #else
  37. #define int2ptr(x) ((void *)(u32)x)
  38. #define ptr2int(x) ((u64)(u32)x)
  39. #endif
  40. static devfs_handle_t devfs_handle;
  41. static LIST_HEAD(host_info_list);
  42. static int host_count;
  43. static spinlock_t host_info_lock = SPIN_LOCK_UNLOCKED;
  44. static atomic_t internal_generation = ATOMIC_INIT(0);
  45. static struct hpsb_highlevel *hl_handle;
  46. static atomic_t iso_buffer_size;
  47. static const int iso_buffer_max = 4 * 1024 * 1024; /* 4 MB */
  48. static void queue_complete_cb(struct pending_request *req);
  49. static struct pending_request *__alloc_pending_request(int flags)
  50. {
  51.         struct pending_request *req;
  52.         req = (struct pending_request *)kmalloc(sizeof(struct pending_request),
  53.                                                 flags);
  54.         if (req != NULL) {
  55.                 memset(req, 0, sizeof(struct pending_request));
  56.                 INIT_LIST_HEAD(&req->list);
  57. INIT_TQUEUE(&req->tq, (void(*)(void*))queue_complete_cb, NULL);
  58.         }
  59.         return req;
  60. }
  61. static inline struct pending_request *alloc_pending_request(void)
  62. {
  63.         return __alloc_pending_request(SLAB_KERNEL);
  64. }
  65. static void free_pending_request(struct pending_request *req)
  66. {
  67.         if (req->ibs) {
  68.                 if (atomic_dec_and_test(&req->ibs->refcount)) {
  69.                         atomic_sub(req->ibs->data_size, &iso_buffer_size);
  70.                         kfree(req->ibs);
  71.                 }
  72.         } else if (req->free_data) {
  73.                 kfree(req->data);
  74.         }
  75.         free_hpsb_packet(req->packet);
  76.         kfree(req);
  77. }
  78. static void queue_complete_req(struct pending_request *req)
  79. {
  80.         unsigned long flags;
  81.         struct file_info *fi = req->file_info;
  82.         spin_lock_irqsave(&fi->reqlists_lock, flags);
  83.         list_del(&req->list);
  84.         list_add_tail(&req->list, &fi->req_complete);
  85.         spin_unlock_irqrestore(&fi->reqlists_lock, flags);
  86.         up(&fi->complete_sem);
  87.         wake_up_interruptible(&fi->poll_wait_complete);
  88. }
  89. static void queue_complete_cb(struct pending_request *req)
  90. {
  91.         struct hpsb_packet *packet = req->packet;
  92.         int rcode = (packet->header[1] >> 12) & 0xf;
  93.         switch (packet->ack_code) {
  94.         case ACKX_NONE:
  95.         case ACKX_SEND_ERROR:
  96.                 req->req.error = RAW1394_ERROR_SEND_ERROR;
  97.                 break;
  98.         case ACKX_ABORTED:
  99.                 req->req.error = RAW1394_ERROR_ABORTED;
  100.                 break;
  101.         case ACKX_TIMEOUT:
  102.                 req->req.error = RAW1394_ERROR_TIMEOUT;
  103.                 break;
  104.         default:
  105.                 req->req.error = (packet->ack_code << 16) | rcode;
  106.                 break;
  107.         }
  108.         if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) {
  109.                 req->req.length = 0;
  110.         }
  111.         free_tlabel(packet->host, packet->node_id, packet->tlabel);
  112.         queue_complete_req(req);
  113. }
  114. static void add_host(struct hpsb_host *host)
  115. {
  116.         struct host_info *hi;
  117.         hi = (struct host_info *)kmalloc(sizeof(struct host_info), SLAB_KERNEL);
  118.         if (hi != NULL) {
  119.                 INIT_LIST_HEAD(&hi->list);
  120.                 hi->host = host;
  121.                 INIT_LIST_HEAD(&hi->file_info_list);
  122.                 spin_lock_irq(&host_info_lock);
  123.                 list_add_tail(&hi->list, &host_info_list);
  124.                 host_count++;
  125.                 spin_unlock_irq(&host_info_lock);
  126.         }
  127.         atomic_inc(&internal_generation);
  128. }
  129. static struct host_info *find_host_info(struct hpsb_host *host)
  130. {
  131.         struct list_head *lh;
  132.         struct host_info *hi;
  133.         list_for_each(lh, &host_info_list) {
  134.                 hi = list_entry(lh, struct host_info, list);
  135.                 if (hi->host == host) {
  136.                         return hi;
  137.                 }
  138.         }
  139.         return NULL;
  140. }
  141. static void remove_host(struct hpsb_host *host)
  142. {
  143.         struct host_info *hi;
  144.         spin_lock_irq(&host_info_lock);
  145.         hi = find_host_info(host);
  146.         if (hi != NULL) {
  147.                 list_del(&hi->list);
  148.                 host_count--;
  149.         }
  150.         spin_unlock_irq(&host_info_lock);
  151.         if (hi == NULL) {
  152.                 printk(KERN_ERR "raw1394: attempt to remove unknown host "
  153.                        "0x%pn", host);
  154.                 return;
  155.         }
  156.         kfree(hi);
  157. }
  158. static void host_reset(struct hpsb_host *host)
  159. {
  160.         unsigned long flags;
  161.         struct list_head *lh;
  162.         struct host_info *hi;
  163.         struct file_info *fi;
  164.         struct pending_request *req;
  165.         spin_lock_irqsave(&host_info_lock, flags);
  166.         hi = find_host_info(host);
  167.         if (hi != NULL) {
  168.                 list_for_each(lh, &hi->file_info_list) {
  169.                         fi = list_entry(lh, struct file_info, list);
  170.                         req = __alloc_pending_request(SLAB_ATOMIC);
  171.                         if (req != NULL) {
  172.                                 req->file_info = fi;
  173.                                 req->req.type = RAW1394_REQ_BUS_RESET;
  174.                                 req->req.generation = get_hpsb_generation(host);
  175.                                 req->req.misc = (host->node_id << 16)
  176.                                         | host->node_count;
  177.                                 if (fi->protocol_version > 3) {
  178.                                         req->req.misc |= ((host->irm_id
  179.                                                            & NODE_MASK) << 8);
  180.                                 }
  181.                                 queue_complete_req(req);
  182.                         }
  183.                 }
  184.         }
  185.         spin_unlock_irqrestore(&host_info_lock, flags);
  186.         atomic_inc(&internal_generation);
  187. }
  188. static void iso_receive(struct hpsb_host *host, int channel, quadlet_t *data,
  189.                         unsigned int length)
  190. {
  191.         unsigned long flags;
  192.         struct list_head *lh;
  193.         struct host_info *hi;
  194.         struct file_info *fi;
  195.         struct pending_request *req;
  196.         struct iso_block_store *ibs = NULL;
  197.         LIST_HEAD(reqs);
  198.         if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
  199.                 HPSB_INFO("dropped iso packet");
  200.                 return;
  201.         }
  202.         spin_lock_irqsave(&host_info_lock, flags);
  203.         hi = find_host_info(host);
  204.         if (hi != NULL) {
  205. list_for_each(lh, &hi->file_info_list) {
  206.                         fi = list_entry(lh, struct file_info, list);
  207.                         if (!(fi->listen_channels & (1ULL << channel))) {
  208.                                 continue;
  209.                         }
  210.                         req = __alloc_pending_request(SLAB_ATOMIC);
  211.                         if (!req) break;
  212.                         if (!ibs) {
  213.                                 ibs = kmalloc(sizeof(struct iso_block_store)
  214.                                               + length, SLAB_ATOMIC);
  215.                                 if (!ibs) {
  216.                                         kfree(req);
  217.                                         break;
  218.                                 }
  219.                                 atomic_add(length, &iso_buffer_size);
  220.                                 atomic_set(&ibs->refcount, 0);
  221.                                 ibs->data_size = length;
  222.                                 memcpy(ibs->data, data, length);
  223.                         }
  224.                         atomic_inc(&ibs->refcount);
  225.                         req->file_info = fi;
  226.                         req->ibs = ibs;
  227.                         req->data = ibs->data;
  228.                         req->req.type = RAW1394_REQ_ISO_RECEIVE;
  229.                         req->req.generation = get_hpsb_generation(host);
  230.                         req->req.misc = 0;
  231.                         req->req.recvb = ptr2int(fi->iso_buffer);
  232.                         req->req.length = MIN(length, fi->iso_buffer_length);
  233.                         
  234.                         list_add_tail(&req->list, &reqs);
  235.                 }
  236.         }
  237.         spin_unlock_irqrestore(&host_info_lock, flags);
  238.         lh = reqs.next;
  239.         while (lh != &reqs) {
  240.                 req = list_entry(lh, struct pending_request, list);
  241.                 lh = lh->next;
  242.                 queue_complete_req(req);
  243.         }
  244. }
  245. static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
  246.                         int cts, u8 *data, unsigned int length)
  247. {
  248.         unsigned long flags;
  249.         struct list_head *lh;
  250.         struct host_info *hi;
  251.         struct file_info *fi;
  252.         struct pending_request *req;
  253.         struct iso_block_store *ibs = NULL;
  254.         LIST_HEAD(reqs);
  255.         if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
  256.                 HPSB_INFO("dropped fcp request");
  257.                 return;
  258.         }
  259.         spin_lock_irqsave(&host_info_lock, flags);
  260.         hi = find_host_info(host);
  261.         if (hi != NULL) {
  262. list_for_each(lh, &hi->file_info_list) {
  263.                         fi = list_entry(lh, struct file_info, list);
  264.                         if (!fi->fcp_buffer) {
  265.                                 continue;
  266.                         }
  267.                         req = __alloc_pending_request(SLAB_ATOMIC);
  268.                         if (!req) break;
  269.                         if (!ibs) {
  270.                                 ibs = kmalloc(sizeof(struct iso_block_store)
  271.                                               + length, SLAB_ATOMIC);
  272.                                 if (!ibs) {
  273.                                         kfree(req);
  274.                                         break;
  275.                                 }
  276.                                 atomic_add(length, &iso_buffer_size);
  277.                                 atomic_set(&ibs->refcount, 0);
  278.                                 ibs->data_size = length;
  279.                                 memcpy(ibs->data, data, length);
  280.                         }
  281.                         atomic_inc(&ibs->refcount);
  282.                         req->file_info = fi;
  283.                         req->ibs = ibs;
  284.                         req->data = ibs->data;
  285.                         req->req.type = RAW1394_REQ_FCP_REQUEST;
  286.                         req->req.generation = get_hpsb_generation(host);
  287.                         req->req.misc = nodeid | (direction << 16);
  288.                         req->req.recvb = ptr2int(fi->fcp_buffer);
  289.                         req->req.length = length;
  290.                         
  291.                         list_add_tail(&req->list, &reqs);
  292.                 }
  293.         }
  294.         spin_unlock_irqrestore(&host_info_lock, flags);
  295.         lh = reqs.next;
  296.         while (lh != &reqs) {
  297.                 req = list_entry(lh, struct pending_request, list);
  298.                 lh = lh->next;
  299.                 queue_complete_req(req);
  300.         }
  301. }
  302. static ssize_t raw1394_read(struct file *file, char *buffer, size_t count,
  303.                     loff_t *offset_is_ignored)
  304. {
  305.         struct file_info *fi = (struct file_info *)file->private_data;
  306.         struct list_head *lh;
  307.         struct pending_request *req;
  308.         if (count != sizeof(struct raw1394_request)) {
  309.                 return -EINVAL;
  310.         }
  311.         if (!access_ok(VERIFY_WRITE, buffer, count)) {
  312.                 return -EFAULT;
  313.         }
  314.         if (file->f_flags & O_NONBLOCK) {
  315.                 if (down_trylock(&fi->complete_sem)) {
  316.                         return -EAGAIN;
  317.                 }
  318.         } else {
  319.                 if (down_interruptible(&fi->complete_sem)) {
  320.                         return -ERESTARTSYS;
  321.                 }
  322.         }
  323.         spin_lock_irq(&fi->reqlists_lock);
  324.         lh = fi->req_complete.next;
  325.         list_del(lh);
  326.         spin_unlock_irq(&fi->reqlists_lock);
  327.         req = list_entry(lh, struct pending_request, list);
  328.         if (req->req.length) {
  329.                 if (copy_to_user(int2ptr(req->req.recvb), req->data,
  330.                                  req->req.length)) {
  331.                         req->req.error = RAW1394_ERROR_MEMFAULT;
  332.                 }
  333.         }
  334.         __copy_to_user(buffer, &req->req, sizeof(req->req));
  335.         free_pending_request(req);
  336.         return sizeof(struct raw1394_request);
  337. }
  338. static int state_opened(struct file_info *fi, struct pending_request *req)
  339. {
  340.         if (req->req.type == RAW1394_REQ_INITIALIZE) {
  341.                 switch (req->req.misc) {
  342.                 case RAW1394_KERNELAPI_VERSION:
  343.                 case 3:
  344.                         fi->state = initialized;
  345.                         fi->protocol_version = req->req.misc;
  346.                         req->req.error = RAW1394_ERROR_NONE;
  347.                         req->req.generation = atomic_read(&internal_generation);
  348.                         break;
  349.                 default:
  350.                         req->req.error = RAW1394_ERROR_COMPAT;
  351.                         req->req.misc = RAW1394_KERNELAPI_VERSION;
  352.                 }
  353.         } else {
  354.                 req->req.error = RAW1394_ERROR_STATE_ORDER;
  355.         }
  356.         req->req.length = 0;
  357.         queue_complete_req(req);
  358.         return sizeof(struct raw1394_request);
  359. }
  360. static int state_initialized(struct file_info *fi, struct pending_request *req)
  361. {
  362.         struct list_head *lh;
  363.         struct host_info *hi;
  364.         struct raw1394_khost_list *khl;
  365.         if (req->req.generation != atomic_read(&internal_generation)) {
  366.                 req->req.error = RAW1394_ERROR_GENERATION;
  367.                 req->req.generation = atomic_read(&internal_generation);
  368.                 req->req.length = 0;
  369.                 queue_complete_req(req);
  370.                 return sizeof(struct raw1394_request);
  371.         }
  372.         switch (req->req.type) {
  373.         case RAW1394_REQ_LIST_CARDS:
  374.                 spin_lock_irq(&host_info_lock);
  375.                 khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count,
  376.                               SLAB_ATOMIC);
  377.                 if (khl != NULL) {
  378.                         req->req.misc = host_count;
  379.                         req->data = (quadlet_t *)khl;
  380.                         
  381.                         list_for_each(lh, &host_info_list) {
  382.                                 hi = list_entry(lh, struct host_info, list);
  383.                                 khl->nodes = hi->host->node_count;
  384.                                 strcpy(khl->name, hi->host->template->name);
  385.                                 khl++;
  386.                         }
  387.                 }
  388.                 spin_unlock_irq(&host_info_lock);
  389.                 if (khl != NULL) {
  390.                         req->req.error = RAW1394_ERROR_NONE;
  391.                         req->req.length = MIN(req->req.length,
  392.                                               sizeof(struct raw1394_khost_list)
  393.                                               * req->req.misc);
  394.                         req->free_data = 1;
  395.                 } else {
  396.                         return -ENOMEM;
  397.                 }
  398.                 break;
  399.         case RAW1394_REQ_SET_CARD:
  400.                 lh = NULL;
  401.                 spin_lock_irq(&host_info_lock);
  402.                 if (req->req.misc < host_count) {
  403.                         lh = host_info_list.next;
  404.                         while (req->req.misc--) {
  405.                                 lh = lh->next;
  406.                         }
  407.                         hi = list_entry(lh, struct host_info, list);
  408.                         hpsb_inc_host_usage(hi->host);
  409.                         list_add_tail(&fi->list, &hi->file_info_list);
  410.                         fi->host = hi->host;
  411.                         fi->state = connected;
  412.                 }
  413.                 spin_unlock_irq(&host_info_lock);
  414.                 if (lh != NULL) {
  415.                         req->req.error = RAW1394_ERROR_NONE;
  416.                         req->req.generation = get_hpsb_generation(fi->host);
  417.                         req->req.misc = (fi->host->node_id << 16) 
  418.                                 | fi->host->node_count;
  419.                         if (fi->protocol_version > 3) {
  420.                                 req->req.misc |=
  421.                                         (fi->host->irm_id & NODE_MASK) << 8;
  422.                         }
  423.                 } else {
  424.                         req->req.error = RAW1394_ERROR_INVALID_ARG;
  425.                 }
  426.                 req->req.length = 0;
  427.                 break;
  428.         default:
  429.                 req->req.error = RAW1394_ERROR_STATE_ORDER;
  430.                 req->req.length = 0;
  431.                 break;
  432.         }
  433.         queue_complete_req(req);
  434.         return sizeof(struct raw1394_request);
  435. }
  436. static void handle_iso_listen(struct file_info *fi, struct pending_request *req)
  437. {
  438.         int channel = req->req.misc;
  439.         spin_lock(&host_info_lock);
  440.         if ((channel > 63) || (channel < -64)) {
  441.                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  442.         } else if (channel >= 0) {
  443.                 /* allocate channel req.misc */
  444.                 if (fi->listen_channels & (1ULL << channel)) {
  445.                         req->req.error = RAW1394_ERROR_ALREADY;
  446.                 } else {
  447.                         fi->listen_channels |= 1ULL << channel;
  448.                         hpsb_listen_channel(hl_handle, fi->host, channel);
  449.                         fi->iso_buffer = int2ptr(req->req.recvb);
  450.                         fi->iso_buffer_length = req->req.length;
  451.                 }
  452.         } else {
  453.                 /* deallocate channel (one's complement neg) req.misc */
  454.                 channel = ~channel;
  455.                 if (fi->listen_channels & (1ULL << channel)) {
  456.                         hpsb_unlisten_channel(hl_handle, fi->host, channel);
  457.                         fi->listen_channels &= ~(1ULL << channel);
  458.                 } else {
  459.                         req->req.error = RAW1394_ERROR_INVALID_ARG;
  460.                 }
  461.         }
  462.         req->req.length = 0;
  463.         queue_complete_req(req);
  464.         spin_unlock(&host_info_lock);
  465. }
  466. static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
  467. {
  468.         if (req->req.misc) {
  469.                 if (fi->fcp_buffer) {
  470.                         req->req.error = RAW1394_ERROR_ALREADY;
  471.                 } else {
  472.                         fi->fcp_buffer = (u8 *)int2ptr(req->req.recvb);
  473.                 }
  474.         } else {
  475.                 if (!fi->fcp_buffer) {
  476.                         req->req.error = RAW1394_ERROR_ALREADY;
  477.                 } else {
  478.                         fi->fcp_buffer = NULL;
  479.                 }
  480.         }
  481.         req->req.length = 0;
  482.         queue_complete_req(req);
  483. }
  484. static int handle_local_request(struct file_info *fi,
  485.                                 struct pending_request *req, int node)
  486. {
  487.         u64 addr = req->req.address & 0xffffffffffffULL;
  488.         req->data = kmalloc(req->req.length, SLAB_KERNEL);
  489.         if (!req->data) return -ENOMEM;
  490.         req->free_data = 1;
  491.         switch (req->req.type) {
  492.         case RAW1394_REQ_ASYNC_READ:
  493.                 req->req.error = highlevel_read(fi->host, node, req->data, addr,
  494.                                                 req->req.length);
  495.                 break;
  496.         case RAW1394_REQ_ASYNC_WRITE:
  497.                 if (copy_from_user(req->data, int2ptr(req->req.sendb),
  498.                                    req->req.length)) {
  499.                         req->req.error = RAW1394_ERROR_MEMFAULT;
  500.                         break;
  501.                 }
  502.                 req->req.error = highlevel_write(fi->host, node, node, req->data,
  503.                                                  addr, req->req.length);
  504.                 req->req.length = 0;
  505.                 break;
  506.         case RAW1394_REQ_LOCK:
  507.                 if ((req->req.misc == EXTCODE_FETCH_ADD)
  508.                     || (req->req.misc == EXTCODE_LITTLE_ADD)) {
  509.                         if (req->req.length != 4) {
  510.                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  511.                                 break;
  512.                         }
  513.                 } else {
  514.                         if (req->req.length != 8) {
  515.                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  516.                                 break;
  517.                         }
  518.                 }
  519.                 if (copy_from_user(req->data, int2ptr(req->req.sendb),
  520.                                    req->req.length)) {
  521.                         req->req.error = RAW1394_ERROR_MEMFAULT;
  522.                         break;
  523.                 }
  524.                 if (req->req.length == 8) {
  525.                         req->req.error = highlevel_lock(fi->host, node,
  526.                                                         req->data, addr,
  527.                                                         req->data[1],
  528.                                                         req->data[0],
  529.                                                         req->req.misc);
  530.                         req->req.length = 4;
  531.                 } else {
  532.                         req->req.error = highlevel_lock(fi->host, node,
  533.                                                         req->data, addr,
  534.                                                         req->data[0], 0,
  535.                                                         req->req.misc);
  536.                 }
  537.                 break;
  538.         case RAW1394_REQ_LOCK64:
  539.         default:
  540.                 req->req.error = RAW1394_ERROR_STATE_ORDER;
  541.         }
  542.         if (req->req.error)
  543.                 req->req.length = 0;
  544.         if (req->req.error >= 0)
  545.                 req->req.error |= ACK_PENDING << 16;
  546.         queue_complete_req(req);
  547.         return sizeof(struct raw1394_request);
  548. }
  549. static int handle_remote_request(struct file_info *fi,
  550.                                  struct pending_request *req, int node)
  551. {
  552.         struct hpsb_packet *packet = NULL;
  553.         u64 addr = req->req.address & 0xffffffffffffULL;
  554.         switch (req->req.type) {
  555.         case RAW1394_REQ_ASYNC_READ:
  556.                 if (req->req.length == 4) {
  557.                         packet = hpsb_make_readqpacket(fi->host, node, addr);
  558.                         if (!packet) return -ENOMEM;
  559.                         req->data = &packet->header[3];
  560.                 } else {
  561.                         packet = hpsb_make_readbpacket(fi->host, node, addr,
  562.                                                        req->req.length);
  563.                         if (!packet) return -ENOMEM;
  564.                         req->data = packet->data;
  565.                 }
  566.                 break;
  567.         case RAW1394_REQ_ASYNC_WRITE:
  568.                 if (req->req.length == 4) {
  569.                         quadlet_t x;
  570.                         if (copy_from_user(&x, int2ptr(req->req.sendb), 4)) {
  571.                                 req->req.error = RAW1394_ERROR_MEMFAULT;
  572.                         }
  573.                         packet = hpsb_make_writeqpacket(fi->host, node, addr,
  574.                                                         x);
  575.                         if (!packet) return -ENOMEM;
  576.                 } else {
  577.                         packet = hpsb_make_writebpacket(fi->host, node, addr,
  578.                                                         req->req.length);
  579.                         if (!packet) return -ENOMEM;
  580.                         if (copy_from_user(packet->data, int2ptr(req->req.sendb),
  581.                                            req->req.length)) {
  582.                                 req->req.error = RAW1394_ERROR_MEMFAULT;
  583.                         }
  584.                 }
  585.                 req->req.length = 0;
  586.                 break;
  587.         case RAW1394_REQ_LOCK:
  588.                 if ((req->req.misc == EXTCODE_FETCH_ADD)
  589.                     || (req->req.misc == EXTCODE_LITTLE_ADD)) {
  590.                         if (req->req.length != 4) {
  591.                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  592.                                 break;
  593.                         }
  594.                 } else {
  595.                         if (req->req.length != 8) {
  596.                                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  597.                                 break;
  598.                         }
  599.                 }
  600.                 packet = hpsb_make_lockpacket(fi->host, node, addr,
  601.                                               req->req.misc);
  602.                 if (!packet) return -ENOMEM;
  603.                 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
  604.                                    req->req.length)) {
  605.                         req->req.error = RAW1394_ERROR_MEMFAULT;
  606.                         break;
  607.                 }
  608.                 req->data = packet->data;
  609.                 req->req.length = 4;
  610.                 break;
  611.         case RAW1394_REQ_LOCK64:
  612.         default:
  613.                 req->req.error = RAW1394_ERROR_STATE_ORDER;
  614.         }
  615.         req->packet = packet;
  616.         if (req->req.error) {
  617.                 req->req.length = 0;
  618.                 queue_complete_req(req);
  619.                 return sizeof(struct raw1394_request);
  620.         }
  621.         req->tq.data = req;
  622.         queue_task(&req->tq, &packet->complete_tq);
  623.         spin_lock_irq(&fi->reqlists_lock);
  624.         list_add_tail(&req->list, &fi->req_pending);
  625.         spin_unlock_irq(&fi->reqlists_lock);
  626. packet->generation = req->req.generation;
  627.         if (!hpsb_send_packet(packet)) {
  628.                 req->req.error = RAW1394_ERROR_SEND_ERROR;
  629.                 req->req.length = 0;
  630.                 free_tlabel(packet->host, packet->node_id, packet->tlabel);
  631.                 queue_complete_req(req);
  632.         }
  633.         return sizeof(struct raw1394_request);
  634. }
  635. static int handle_iso_send(struct file_info *fi, struct pending_request *req,
  636.                            int channel)
  637. {
  638.         struct hpsb_packet *packet;
  639.         packet = alloc_hpsb_packet(req->req.length);
  640.         if (!packet) return -ENOMEM;
  641.         req->packet = packet;
  642.         fill_iso_packet(packet, req->req.length, channel & 0x3f,
  643.                         (req->req.misc >> 16) & 0x3, req->req.misc & 0xf);
  644.         packet->type = hpsb_iso;
  645.         packet->speed_code = req->req.address & 0x3;
  646.         packet->host = fi->host;
  647.         if (copy_from_user(packet->data, int2ptr(req->req.sendb),
  648.                            req->req.length)) {
  649.                 req->req.error = RAW1394_ERROR_MEMFAULT;
  650.                 req->req.length = 0;
  651.                 queue_complete_req(req);
  652.                 return sizeof(struct raw1394_request);
  653.         }
  654.         req->tq.data = req;
  655.         req->tq.routine = (void (*)(void*))queue_complete_req;
  656.         req->req.length = 0;
  657.         queue_task(&req->tq, &packet->complete_tq);
  658.         spin_lock_irq(&fi->reqlists_lock);
  659.         list_add_tail(&req->list, &fi->req_pending);
  660.         spin_unlock_irq(&fi->reqlists_lock);
  661. /* Update the generation of the packet just before sending. */
  662. packet->generation = get_hpsb_generation(fi->host);
  663.         if (!hpsb_send_packet(packet)) {
  664.                 req->req.error = RAW1394_ERROR_SEND_ERROR;
  665.                 queue_complete_req(req);
  666.         }
  667.         return sizeof(struct raw1394_request);
  668. }
  669. static int state_connected(struct file_info *fi, struct pending_request *req)
  670. {
  671.         int node = req->req.address >> 48;
  672.         req->req.error = RAW1394_ERROR_NONE;
  673.         if (req->req.type ==  RAW1394_REQ_ISO_SEND) {
  674.                 return handle_iso_send(fi, req, node);
  675.         }
  676.         if (req->req.generation != get_hpsb_generation(fi->host)) {
  677.                 req->req.error = RAW1394_ERROR_GENERATION;
  678.                 req->req.generation = get_hpsb_generation(fi->host);
  679.                 req->req.length = 0;
  680.                 queue_complete_req(req);
  681.                 return sizeof(struct raw1394_request);
  682.         }
  683.         switch (req->req.type) {
  684.         case RAW1394_REQ_ISO_LISTEN:
  685.                 handle_iso_listen(fi, req);
  686.                 return sizeof(struct raw1394_request);
  687.         case RAW1394_REQ_FCP_LISTEN:
  688.                 handle_fcp_listen(fi, req);
  689.                 return sizeof(struct raw1394_request);
  690.         case RAW1394_REQ_RESET_BUS:
  691.                 hpsb_reset_bus(fi->host, LONG_RESET);
  692.                 return sizeof(struct raw1394_request);
  693.         }
  694.         if (req->req.length == 0) {
  695.                 req->req.error = RAW1394_ERROR_INVALID_ARG;
  696.                 queue_complete_req(req);
  697.                 return sizeof(struct raw1394_request);
  698.         }
  699.         if (fi->host->node_id == node) {
  700.                 return handle_local_request(fi, req, node);
  701.         }
  702.         return handle_remote_request(fi, req, node);
  703. }
  704. static ssize_t raw1394_write(struct file *file, const char *buffer, size_t count,
  705.                      loff_t *offset_is_ignored)
  706. {
  707.         struct file_info *fi = (struct file_info *)file->private_data;
  708.         struct pending_request *req;
  709.         ssize_t retval = 0;
  710.         if (count != sizeof(struct raw1394_request)) {
  711.                 return -EINVAL;
  712.         }
  713.         req = alloc_pending_request();
  714.         if (req == NULL) {
  715.                 return -ENOMEM;
  716.         }
  717.         req->file_info = fi;
  718.         if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) {
  719.                 free_pending_request(req);
  720.                 return -EFAULT;
  721.         }
  722.         switch (fi->state) {
  723.         case opened:
  724.                 retval = state_opened(fi, req);
  725.                 break;
  726.         case initialized:
  727.                 retval = state_initialized(fi, req);
  728.                 break;
  729.         case connected:
  730.                 retval = state_connected(fi, req);
  731.                 break;
  732.         }
  733.         if (retval < 0) {
  734.                 free_pending_request(req);
  735.         }
  736.         return retval;
  737. }
  738. static unsigned int raw1394_poll(struct file *file, poll_table *pt)
  739. {
  740.         struct file_info *fi = file->private_data;
  741.         unsigned int mask = POLLOUT | POLLWRNORM;
  742.         poll_wait(file, &fi->poll_wait_complete, pt);
  743.         spin_lock_irq(&fi->reqlists_lock);
  744.         if (!list_empty(&fi->req_complete)) {
  745.                 mask |= POLLIN | POLLRDNORM;
  746.         }
  747.         spin_unlock_irq(&fi->reqlists_lock);
  748.         return mask;
  749. }
  750. static int raw1394_open(struct inode *inode, struct file *file)
  751. {
  752.         struct file_info *fi;
  753.         if (MINOR(inode->i_rdev)) {
  754.                 return -ENXIO;
  755.         }
  756.         V22_COMPAT_MOD_INC_USE_COUNT;
  757.         fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL);
  758.         if (fi == NULL) {
  759.                 V22_COMPAT_MOD_DEC_USE_COUNT;
  760.                 return -ENOMEM;
  761.         }
  762.         
  763.         memset(fi, 0, sizeof(struct file_info));
  764.         INIT_LIST_HEAD(&fi->list);
  765.         fi->state = opened;
  766.         INIT_LIST_HEAD(&fi->req_pending);
  767.         INIT_LIST_HEAD(&fi->req_complete);
  768.         sema_init(&fi->complete_sem, 0);
  769.         spin_lock_init(&fi->reqlists_lock);
  770.         init_waitqueue_head(&fi->poll_wait_complete);
  771.         file->private_data = fi;
  772.         return 0;
  773. }
  774. static int raw1394_release(struct inode *inode, struct file *file)
  775. {
  776.         struct file_info *fi = file->private_data;
  777.         struct list_head *lh;
  778.         struct pending_request *req;
  779.         int done = 0, i;
  780.         lock_kernel();
  781.         for (i = 0; i < 64; i++) {
  782.                 if (fi->listen_channels & (1ULL << i)) {
  783.                         hpsb_unlisten_channel(hl_handle, fi->host, i);
  784.                 }
  785.         }
  786.         spin_lock(&host_info_lock);
  787.         fi->listen_channels = 0;
  788.         spin_unlock(&host_info_lock);
  789.         while (!done) {
  790.                 spin_lock_irq(&fi->reqlists_lock);
  791.                 while (!list_empty(&fi->req_complete)) {
  792.                         lh = fi->req_complete.next;
  793.                         list_del(lh);
  794.                         req = list_entry(lh, struct pending_request, list);
  795.                         free_pending_request(req);
  796.                 }
  797.                 if (list_empty(&fi->req_pending)) done = 1;
  798.                 spin_unlock_irq(&fi->reqlists_lock);
  799.                 if (!done) down_interruptible(&fi->complete_sem);
  800.         }
  801.         if (fi->state == connected) {
  802.                 spin_lock_irq(&host_info_lock);
  803.                 list_del(&fi->list);
  804.                 spin_unlock_irq(&host_info_lock);
  805.                 hpsb_dec_host_usage(fi->host);
  806.         }
  807.         kfree(fi);
  808.         V22_COMPAT_MOD_DEC_USE_COUNT;
  809.         unlock_kernel();
  810.         return 0;
  811. }
  812. static struct hpsb_highlevel_ops hl_ops = {
  813.         add_host:     add_host,
  814.         remove_host:  remove_host,
  815.         host_reset:   host_reset,
  816.         iso_receive:  iso_receive,
  817.         fcp_request:  fcp_request,
  818. };
  819. static struct file_operations file_ops = {
  820.         OWNER_THIS_MODULE
  821.         read:     raw1394_read, 
  822.         write:    raw1394_write, 
  823.         poll:     raw1394_poll, 
  824.         open:     raw1394_open, 
  825.         release:  raw1394_release, 
  826. };
  827. static int __init init_raw1394(void)
  828. {
  829.         hl_handle = hpsb_register_highlevel(RAW1394_DEVICE_NAME, &hl_ops);
  830.         if (hl_handle == NULL) {
  831.                 HPSB_ERR("raw1394 failed to register with ieee1394 highlevel");
  832.                 return -ENOMEM;
  833.         }
  834. devfs_handle = devfs_register(NULL, RAW1394_DEVICE_NAME, DEVFS_FL_NONE,
  835.                                       RAW1394_DEVICE_MAJOR, 0,
  836.                                       S_IFCHR | S_IRUSR | S_IWUSR, &file_ops,
  837.                                       NULL);
  838.         if (devfs_register_chrdev(RAW1394_DEVICE_MAJOR, RAW1394_DEVICE_NAME, 
  839.                                   &file_ops)) {
  840.                 HPSB_ERR("raw1394 failed to register /dev/raw1394 device");
  841.                 return -EBUSY;
  842.         }
  843. printk(KERN_INFO "raw1394: /dev/%s device initializedn", RAW1394_DEVICE_NAME);
  844.         return 0;
  845. }
  846. static void __exit cleanup_raw1394(void)
  847. {
  848.         devfs_unregister_chrdev(RAW1394_DEVICE_MAJOR, RAW1394_DEVICE_NAME);
  849. devfs_unregister(devfs_handle);
  850.         hpsb_unregister_highlevel(hl_handle);
  851. }
  852. module_init(init_raw1394);
  853. module_exit(cleanup_raw1394);
  854. MODULE_LICENSE("GPL");