ieee1394_core.c
上传用户:jlfgdled
上传日期:2013-04-10
资源大小:33168k
文件大小:35k
源码类别:

Linux/Unix编程

开发平台:

Unix_Linux

  1. /*
  2.  * IEEE 1394 for Linux
  3.  *
  4.  * Core support: hpsb_packet management, packet handling and forwarding to
  5.  *               highlevel or lowlevel code
  6.  *
  7.  * Copyright (C) 1999, 2000 Andreas E. Bombe
  8.  *
  9.  * This code is licensed under the GPL.  See the file COPYING in the root
  10.  * directory of the kernel sources for details.
  11.  */
  12. #include <linux/config.h>
  13. #include <linux/kernel.h>
  14. #include <linux/list.h>
  15. #include <linux/string.h>
  16. #include <linux/init.h>
  17. #include <linux/slab.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/module.h>
  20. #include <linux/proc_fs.h>
  21. #include <linux/tqueue.h>
  22. #include <asm/bitops.h>
  23. #include <asm/byteorder.h>
  24. #include <asm/semaphore.h>
  25. #include "ieee1394_types.h"
  26. #include "ieee1394.h"
  27. #include "hosts.h"
  28. #include "ieee1394_core.h"
  29. #include "highlevel.h"
  30. #include "ieee1394_transactions.h"
  31. #include "csr.h"
  32. #include "nodemgr.h"
  33. #include "ieee1394_hotplug.h"
  34. /*
  35.  * Disable the nodemgr detection and config rom reading functionality.
  36.  */
  37. MODULE_PARM(disable_nodemgr, "i");
  38. MODULE_PARM_DESC(disable_nodemgr, "Disable nodemgr functionality.");
  39. static int disable_nodemgr = 0;
  40. MODULE_PARM(disable_hotplug, "i");
  41. MODULE_PARM_DESC(disable_hotplug, "Disable hotplug for detected nodes.");
  42. static int disable_hotplug = 0;
  43. /* We are GPL, so treat us special */
  44. MODULE_LICENSE("GPL");
  45. static kmem_cache_t *hpsb_packet_cache;
  46. /* Some globals used */
  47. const char *hpsb_speedto_str[] = { "S100", "S200", "S400" };
  48. static void dump_packet(const char *text, quadlet_t *data, int size)
  49. {
  50.         int i;
  51.         size /= 4;
  52.         size = (size > 4 ? 4 : size);
  53.         printk(KERN_DEBUG "ieee1394: %s", text);
  54.         for (i = 0; i < size; i++) {
  55.                 printk(" %8.8x", data[i]);
  56.         }
  57.         printk("n");
  58. }
  59. static void process_complete_tasks(struct hpsb_packet *packet)
  60. {
  61. struct list_head *lh, *next;
  62. list_for_each_safe(lh, next, &packet->complete_tq) {
  63. struct tq_struct *tq = list_entry(lh, struct tq_struct, list);
  64. list_del(&tq->list);
  65. schedule_task(tq);
  66. }
  67. return;
  68. }
  69. /**
  70.  * hpsb_add_packet_complete_task - add a new task for when a packet completes
  71.  * @packet: the packet whose completion we want the task added to
  72.  * @tq: the tq_struct describing the task to add
  73.  */
  74. void hpsb_add_packet_complete_task(struct hpsb_packet *packet, struct tq_struct *tq)
  75. {
  76. list_add_tail(&tq->list, &packet->complete_tq);
  77. return;
  78. }
  79. /**
  80.  * alloc_hpsb_packet - allocate new packet structure
  81.  * @data_size: size of the data block to be allocated
  82.  *
  83.  * This function allocates, initializes and returns a new &struct hpsb_packet.
  84.  * It can be used in interrupt context.  A header block is always included, its
  85.  * size is big enough to contain all possible 1394 headers.  The data block is
  86.  * only allocated when @data_size is not zero.
  87.  *
  88.  * For packets for which responses will be received the @data_size has to be big
  89.  * enough to contain the response's data block since no further allocation
  90.  * occurs at response matching time.
  91.  *
  92.  * The packet's generation value will be set to the current generation number
  93.  * for ease of use.  Remember to overwrite it with your own recorded generation
  94.  * number if you can not be sure that your code will not race with a bus reset.
  95.  *
  96.  * Return value: A pointer to a &struct hpsb_packet or NULL on allocation
  97.  * failure.
  98.  */
  99. struct hpsb_packet *alloc_hpsb_packet(size_t data_size)
  100. {
  101.         struct hpsb_packet *packet = NULL;
  102.         void *data = NULL;
  103.         int kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
  104.         packet = kmem_cache_alloc(hpsb_packet_cache, kmflags);
  105.         if (packet == NULL)
  106.                 return NULL;
  107.         memset(packet, 0, sizeof(struct hpsb_packet));
  108.         packet->header = packet->embedded_header;
  109.         if (data_size) {
  110.                 data = kmalloc(data_size + 8, kmflags);
  111.                 if (data == NULL) {
  112. kmem_cache_free(hpsb_packet_cache, packet);
  113.                         return NULL;
  114.                 }
  115.                 packet->data = data;
  116.                 packet->data_size = data_size;
  117.         }
  118.         INIT_LIST_HEAD(&packet->complete_tq);
  119.         INIT_LIST_HEAD(&packet->list);
  120.         sema_init(&packet->state_change, 0);
  121.         packet->state = hpsb_unused;
  122.         packet->generation = -1;
  123.         packet->data_be = 1;
  124.         return packet;
  125. }
  126. /**
  127.  * free_hpsb_packet - free packet and data associated with it
  128.  * @packet: packet to free (is NULL safe)
  129.  *
  130.  * This function will free packet->data, packet->header and finally the packet
  131.  * itself.
  132.  */
  133. void free_hpsb_packet(struct hpsb_packet *packet)
  134. {
  135.         if (!packet) return;
  136.         kfree(packet->data);
  137.         kmem_cache_free(hpsb_packet_cache, packet);
  138. }
  139. int hpsb_reset_bus(struct hpsb_host *host, int type)
  140. {
  141.         if (!host->in_bus_reset) {
  142.                 host->driver->devctl(host, RESET_BUS, type);
  143.                 return 0;
  144.         } else {
  145.                 return 1;
  146.         }
  147. }
  148. int hpsb_bus_reset(struct hpsb_host *host)
  149. {
  150.         if (host->in_bus_reset) {
  151.                 HPSB_NOTICE("%s called while bus reset already in progress",
  152.     __FUNCTION__);
  153.                 return 1;
  154.         }
  155.         abort_requests(host);
  156.         host->in_bus_reset = 1;
  157.         host->irm_id = -1;
  158. host->is_irm = 0;
  159.         host->busmgr_id = -1;
  160. host->is_busmgr = 0;
  161. host->is_cycmst = 0;
  162.         host->node_count = 0;
  163.         host->selfid_count = 0;
  164.         return 0;
  165. }
  166. /*
  167.  * Verify num_of_selfids SelfIDs and return number of nodes.  Return zero in
  168.  * case verification failed.
  169.  */
  170. static int check_selfids(struct hpsb_host *host)
  171. {
  172.         int nodeid = -1;
  173.         int rest_of_selfids = host->selfid_count;
  174.         struct selfid *sid = (struct selfid *)host->topology_map;
  175.         struct ext_selfid *esid;
  176.         int esid_seq = 23;
  177. host->nodes_active = 0;
  178.         while (rest_of_selfids--) {
  179.                 if (!sid->extended) {
  180.                         nodeid++;
  181.                         esid_seq = 0;
  182.                         
  183.                         if (sid->phy_id != nodeid) {
  184.                                 HPSB_INFO("SelfIDs failed monotony check with "
  185.                                           "%d", sid->phy_id);
  186.                                 return 0;
  187.                         }
  188.                         
  189. if (sid->link_active) {
  190. host->nodes_active++;
  191. if (sid->contender)
  192. host->irm_id = LOCAL_BUS | sid->phy_id;
  193. }
  194.                 } else {
  195.                         esid = (struct ext_selfid *)sid;
  196.                         if ((esid->phy_id != nodeid) 
  197.                             || (esid->seq_nr != esid_seq)) {
  198.                                 HPSB_INFO("SelfIDs failed monotony check with "
  199.                                           "%d/%d", esid->phy_id, esid->seq_nr);
  200.                                 return 0;
  201.                         }
  202.                         esid_seq++;
  203.                 }
  204.                 sid++;
  205.         }
  206.         
  207.         esid = (struct ext_selfid *)(sid - 1);
  208.         while (esid->extended) {
  209.                 if ((esid->porta == 0x2) || (esid->portb == 0x2)
  210.                     || (esid->portc == 0x2) || (esid->portd == 0x2)
  211.                     || (esid->porte == 0x2) || (esid->portf == 0x2)
  212.                     || (esid->portg == 0x2) || (esid->porth == 0x2)) {
  213.                                 HPSB_INFO("SelfIDs failed root check on "
  214.                                           "extended SelfID");
  215.                                 return 0;
  216.                 }
  217.                 esid--;
  218.         }
  219.         sid = (struct selfid *)esid;
  220.         if ((sid->port0 == 0x2) || (sid->port1 == 0x2) || (sid->port2 == 0x2)) {
  221.                         HPSB_INFO("SelfIDs failed root check");
  222.                         return 0;
  223.         }
  224. host->node_count = nodeid + 1;
  225.         return 1;
  226. }
  227. static void build_speed_map(struct hpsb_host *host, int nodecount)
  228. {
  229.         char speedcap[nodecount];
  230.         char cldcnt[nodecount];
  231.         u8 *map = host->speed_map;
  232.         struct selfid *sid;
  233.         struct ext_selfid *esid;
  234.         int i, j, n;
  235.         for (i = 0; i < (nodecount * 64); i += 64) {
  236.                 for (j = 0; j < nodecount; j++) {
  237.                         map[i+j] = SPEED_400;
  238.                 }
  239.         }
  240.         for (i = 0; i < nodecount; i++) {
  241.                 cldcnt[i] = 0;
  242.         }
  243.         /* find direct children count and speed */
  244.         for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
  245.                      n = nodecount - 1;
  246.              (void *)sid >= (void *)host->topology_map; sid--) {
  247.                 if (sid->extended) {
  248.                         esid = (struct ext_selfid *)sid;
  249.                         if (esid->porta == 0x3) cldcnt[n]++;
  250.                         if (esid->portb == 0x3) cldcnt[n]++;
  251.                         if (esid->portc == 0x3) cldcnt[n]++;
  252.                         if (esid->portd == 0x3) cldcnt[n]++;
  253.                         if (esid->porte == 0x3) cldcnt[n]++;
  254.                         if (esid->portf == 0x3) cldcnt[n]++;
  255.                         if (esid->portg == 0x3) cldcnt[n]++;
  256.                         if (esid->porth == 0x3) cldcnt[n]++;
  257.                 } else {
  258.                         if (sid->port0 == 0x3) cldcnt[n]++;
  259.                         if (sid->port1 == 0x3) cldcnt[n]++;
  260.                         if (sid->port2 == 0x3) cldcnt[n]++;
  261.                         speedcap[n] = sid->speed;
  262.                         n--;
  263.                 }
  264.         }
  265.         /* set self mapping */
  266.         for (i = 0; i < nodecount; i++) {
  267.                 map[64*i + i] = speedcap[i];
  268.         }
  269.         /* fix up direct children count to total children count;
  270.          * also fix up speedcaps for sibling and parent communication */
  271.         for (i = 1; i < nodecount; i++) {
  272.                 for (j = cldcnt[i], n = i - 1; j > 0; j--) {
  273.                         cldcnt[i] += cldcnt[n];
  274.                         speedcap[n] = MIN(speedcap[n], speedcap[i]);
  275.                         n -= cldcnt[n] + 1;
  276.                 }
  277.         }
  278.         for (n = 0; n < nodecount; n++) {
  279.                 for (i = n - cldcnt[n]; i <= n; i++) {
  280.                         for (j = 0; j < (n - cldcnt[n]); j++) {
  281.                                 map[j*64 + i] = map[i*64 + j] =
  282.                                         MIN(map[i*64 + j], speedcap[n]);
  283.                         }
  284.                         for (j = n + 1; j < nodecount; j++) {
  285.                                 map[j*64 + i] = map[i*64 + j] =
  286.                                         MIN(map[i*64 + j], speedcap[n]);
  287.                         }
  288.                 }
  289.         }
  290. }
  291. void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
  292. {
  293.         if (host->in_bus_reset) {
  294. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  295.                 HPSB_INFO("Including SelfID 0x%x", sid);
  296. #endif
  297.                 host->topology_map[host->selfid_count++] = sid;
  298.         } else {
  299.                 HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
  300.     sid, (host->node_id & BUS_MASK) >> 6);
  301.         }
  302. }
  303. void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
  304. {
  305. if (!host->in_bus_reset)
  306. HPSB_NOTICE("SelfID completion called outside of bus reset!");
  307.         host->node_id = LOCAL_BUS | phyid;
  308.         host->is_root = isroot;
  309.         if (!check_selfids(host)) {
  310.                 if (host->reset_retries++ < 20) {
  311.                         /* selfid stage did not complete without error */
  312.                         HPSB_NOTICE("Error in SelfID stage, resetting");
  313. host->in_bus_reset = 0;
  314.                         hpsb_reset_bus(host, LONG_RESET);
  315.                         return;
  316.                 } else {
  317.                         HPSB_NOTICE("Stopping out-of-control reset loop");
  318.                         HPSB_NOTICE("Warning - topology map and speed map will not be valid");
  319.                 }
  320.         } else {
  321.                 build_speed_map(host, host->node_count);
  322.         }
  323.         /* irm_id is kept up to date by check_selfids() */
  324.         if (host->irm_id == host->node_id) {
  325.                 host->is_irm = 1;
  326.                 host->is_busmgr = 1;
  327.                 host->busmgr_id = host->node_id;
  328.                 host->csr.bus_manager_id = host->node_id;
  329.         }
  330.         host->reset_retries = 0;
  331.         if (isroot) {
  332. host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
  333. host->is_cycmst = 1;
  334. }
  335. atomic_inc(&host->generation);
  336. host->in_bus_reset = 0;
  337.         highlevel_host_reset(host);
  338. }
  339. void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, 
  340.                       int ackcode)
  341. {
  342.         unsigned long flags;
  343.         packet->ack_code = ackcode;
  344.         if (packet->no_waiter) {
  345.                 /* must not have a tlabel allocated */
  346.                 free_hpsb_packet(packet);
  347.                 return;
  348.         }
  349.         if (ackcode != ACK_PENDING || !packet->expect_response) {
  350.                 packet->state = hpsb_complete;
  351.                 up(&packet->state_change);
  352.                 up(&packet->state_change);
  353.                 process_complete_tasks(packet);
  354.                 return;
  355.         }
  356.         packet->state = hpsb_pending;
  357.         packet->sendtime = jiffies;
  358.         spin_lock_irqsave(&host->pending_pkt_lock, flags);
  359.         list_add_tail(&packet->list, &host->pending_packets);
  360.         spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
  361.         up(&packet->state_change);
  362.         schedule_task(&host->timeout_tq);
  363. }
  364. /**
  365.  * hpsb_send_packet - transmit a packet on the bus
  366.  * @packet: packet to send
  367.  *
  368.  * The packet is sent through the host specified in the packet->host field.
  369.  * Before sending, the packet's transmit speed is automatically determined using
  370.  * the local speed map when it is an async, non-broadcast packet.
  371.  *
  372.  * Possibilities for failure are that host is either not initialized, in bus
  373.  * reset, the packet's generation number doesn't match the current generation
  374.  * number or the host reports a transmit error.
  375.  *
  376.  * Return value: False (0) on failure, true (1) otherwise.
  377.  */
  378. int hpsb_send_packet(struct hpsb_packet *packet)
  379. {
  380.         struct hpsb_host *host = packet->host;
  381.         if (host->is_shutdown || host->in_bus_reset
  382.             || (packet->generation != get_hpsb_generation(host))) {
  383.                 return 0;
  384.         }
  385.         packet->state = hpsb_queued;
  386.         if (packet->type == hpsb_async && packet->node_id != ALL_NODES) {
  387.                 packet->speed_code =
  388.                         host->speed_map[(host->node_id & NODE_MASK) * 64
  389.                                        + (packet->node_id & NODE_MASK)];
  390.         }
  391. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  392.         switch (packet->speed_code) {
  393.         case 2:
  394.                 dump_packet("send packet 400:", packet->header,
  395.                             packet->header_size);
  396.                 break;
  397.         case 1:
  398.                 dump_packet("send packet 200:", packet->header,
  399.                             packet->header_size);
  400.                 break;
  401.         default:
  402.                 dump_packet("send packet 100:", packet->header,
  403.                             packet->header_size);
  404.         }
  405. #endif
  406.         return host->driver->transmit_packet(host, packet);
  407. }
  408. static void send_packet_nocare(struct hpsb_packet *packet)
  409. {
  410.         if (!hpsb_send_packet(packet)) {
  411.                 free_hpsb_packet(packet);
  412.         }
  413. }
  414. void handle_packet_response(struct hpsb_host *host, int tcode, quadlet_t *data,
  415.                             size_t size)
  416. {
  417.         struct hpsb_packet *packet = NULL;
  418.         struct list_head *lh;
  419.         int tcode_match = 0;
  420.         int tlabel;
  421.         unsigned long flags;
  422.         tlabel = (data[0] >> 10) & 0x3f;
  423.         spin_lock_irqsave(&host->pending_pkt_lock, flags);
  424.         list_for_each(lh, &host->pending_packets) {
  425.                 packet = list_entry(lh, struct hpsb_packet, list);
  426.                 if ((packet->tlabel == tlabel)
  427.                     && (packet->node_id == (data[1] >> 16))){
  428.                         break;
  429.                 }
  430.         }
  431.         if (lh == &host->pending_packets) {
  432.                 HPSB_DEBUG("unsolicited response packet received - np");
  433.                 dump_packet("contents:", data, 16);
  434.                 spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
  435.                 return;
  436.         }
  437.         switch (packet->tcode) {
  438.         case TCODE_WRITEQ:
  439.         case TCODE_WRITEB:
  440.                 if (tcode == TCODE_WRITE_RESPONSE) tcode_match = 1;
  441.                 break;
  442.         case TCODE_READQ:
  443.                 if (tcode == TCODE_READQ_RESPONSE) tcode_match = 1;
  444.                 break;
  445.         case TCODE_READB:
  446.                 if (tcode == TCODE_READB_RESPONSE) tcode_match = 1;
  447.                 break;
  448.         case TCODE_LOCK_REQUEST:
  449.                 if (tcode == TCODE_LOCK_RESPONSE) tcode_match = 1;
  450.                 break;
  451.         }
  452.         if (!tcode_match || (packet->tlabel != tlabel)
  453.             || (packet->node_id != (data[1] >> 16))) {
  454.                 HPSB_INFO("unsolicited response packet received");
  455.                 dump_packet("contents:", data, 16);
  456.                 spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
  457.                 return;
  458.         }
  459.         list_del(&packet->list);
  460.         spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
  461.         /* FIXME - update size fields? */
  462.         switch (tcode) {
  463.         case TCODE_WRITE_RESPONSE:
  464.                 memcpy(packet->header, data, 12);
  465.                 break;
  466.         case TCODE_READQ_RESPONSE:
  467.                 memcpy(packet->header, data, 16);
  468.                 break;
  469.         case TCODE_READB_RESPONSE:
  470.                 memcpy(packet->header, data, 16);
  471.                 memcpy(packet->data, data + 4, size - 16);
  472.                 break;
  473.         case TCODE_LOCK_RESPONSE:
  474.                 memcpy(packet->header, data, 16);
  475.                 memcpy(packet->data, data + 4, (size - 16) > 8 ? 8 : size - 16);
  476.                 break;
  477.         }
  478.         packet->state = hpsb_complete;
  479.         up(&packet->state_change);
  480. process_complete_tasks(packet);
  481. }
  482. static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
  483.        quadlet_t *data, size_t dsize)
  484. {
  485.         struct hpsb_packet *p;
  486.         dsize += (dsize % 4 ? 4 - (dsize % 4) : 0);
  487.         p = alloc_hpsb_packet(dsize);
  488.         if (p == NULL) {
  489.                 /* FIXME - send data_error response */
  490.                 return NULL;
  491.         }
  492.         p->type = hpsb_async;
  493.         p->state = hpsb_unused;
  494.         p->host = host;
  495.         p->node_id = data[1] >> 16;
  496.         p->tlabel = (data[0] >> 10) & 0x3f;
  497.         p->no_waiter = 1;
  498. p->generation = get_hpsb_generation(host);
  499.         if (dsize % 4) {
  500.                 p->data[dsize / 4] = 0;
  501.         }
  502.         return p;
  503. }
  504. #define PREP_REPLY_PACKET(length) 
  505.                 packet = create_reply_packet(host, data, length); 
  506.                 if (packet == NULL) break
  507. static void handle_incoming_packet(struct hpsb_host *host, int tcode,
  508.    quadlet_t *data, size_t size, int write_acked)
  509. {
  510.         struct hpsb_packet *packet;
  511.         int length, rcode, extcode;
  512.         nodeid_t source = data[1] >> 16;
  513. nodeid_t dest = data[0] >> 16;
  514.         u64 addr;
  515.         /* big FIXME - no error checking is done for an out of bounds length */
  516.         switch (tcode) {
  517.         case TCODE_WRITEQ:
  518.                 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
  519.                 rcode = highlevel_write(host, source, dest, data+3,
  520. addr, 4);
  521.                 if (!write_acked
  522.                     && ((data[0] >> 16) & NODE_MASK) != NODE_MASK) {
  523.                         /* not a broadcast write, reply */
  524.                         PREP_REPLY_PACKET(0);
  525.                         fill_async_write_resp(packet, rcode);
  526.                         send_packet_nocare(packet);
  527.                 }
  528.                 break;
  529.         case TCODE_WRITEB:
  530.                 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
  531.                 rcode = highlevel_write(host, source, dest, data+4,
  532. addr, data[3]>>16);
  533.                 if (!write_acked
  534.                     && ((data[0] >> 16) & NODE_MASK) != NODE_MASK) {
  535.                         /* not a broadcast write, reply */
  536.                         PREP_REPLY_PACKET(0);
  537.                         fill_async_write_resp(packet, rcode);
  538.                         send_packet_nocare(packet);
  539.                 }
  540.                 break;
  541.         case TCODE_READQ:
  542.                 PREP_REPLY_PACKET(0);
  543.                 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
  544.                 rcode = highlevel_read(host, source, data, addr, 4);
  545.                 fill_async_readquad_resp(packet, rcode, *data);
  546.                 send_packet_nocare(packet);
  547.                 break;
  548.         case TCODE_READB:
  549.                 length = data[3] >> 16;
  550.                 PREP_REPLY_PACKET(length);
  551.                 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
  552.                 rcode = highlevel_read(host, source, packet->data, addr,
  553.                                        length);
  554.                 fill_async_readblock_resp(packet, rcode, length);
  555.                 send_packet_nocare(packet);
  556.                 break;
  557.         case TCODE_LOCK_REQUEST:
  558.                 length = data[3] >> 16;
  559.                 extcode = data[3] & 0xffff;
  560.                 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
  561.                 PREP_REPLY_PACKET(8);
  562.                 if ((extcode == 0) || (extcode >= 7)) {
  563.                         /* let switch default handle error */
  564.                         length = 0;
  565.                 }
  566.                 switch (length) {
  567.                 case 4:
  568.                         rcode = highlevel_lock(host, source, packet->data, addr,
  569.                                                data[4], 0, extcode);
  570.                         fill_async_lock_resp(packet, rcode, extcode, 4);
  571.                         break;
  572.                 case 8:
  573.                         if ((extcode != EXTCODE_FETCH_ADD) 
  574.                             && (extcode != EXTCODE_LITTLE_ADD)) {
  575.                                 rcode = highlevel_lock(host, source,
  576.                                                        packet->data, addr,
  577.                                                        data[5], data[4], 
  578.                                                        extcode);
  579.                                 fill_async_lock_resp(packet, rcode, extcode, 4);
  580.                         } else {
  581.                                 rcode = highlevel_lock64(host, source,
  582.                                              (octlet_t *)packet->data, addr,
  583.                                              *(octlet_t *)(data + 4), 0ULL,
  584.                                              extcode);
  585.                                 fill_async_lock_resp(packet, rcode, extcode, 8);
  586.                         }
  587.                         break;
  588.                 case 16:
  589.                         rcode = highlevel_lock64(host, source,
  590.                                                  (octlet_t *)packet->data, addr,
  591.                                                  *(octlet_t *)(data + 6),
  592.                                                  *(octlet_t *)(data + 4), 
  593.                                                  extcode);
  594.                         fill_async_lock_resp(packet, rcode, extcode, 8);
  595.                         break;
  596.                 default:
  597.                         fill_async_lock_resp(packet, RCODE_TYPE_ERROR,
  598.                                              extcode, 0);
  599.                 }
  600.                 send_packet_nocare(packet);
  601.                 break;
  602.         }
  603. }
  604. #undef PREP_REPLY_PACKET
  605. void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
  606.                           int write_acked)
  607. {
  608.         int tcode;
  609.         if (host->in_bus_reset) {
  610.                 HPSB_INFO("received packet during reset; ignoring");
  611.                 return;
  612.         }
  613. #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
  614.         dump_packet("received packet:", data, size);
  615. #endif
  616.         tcode = (data[0] >> 4) & 0xf;
  617.         switch (tcode) {
  618.         case TCODE_WRITE_RESPONSE:
  619.         case TCODE_READQ_RESPONSE:
  620.         case TCODE_READB_RESPONSE:
  621.         case TCODE_LOCK_RESPONSE:
  622.                 handle_packet_response(host, tcode, data, size);
  623.                 break;
  624.         case TCODE_WRITEQ:
  625.         case TCODE_WRITEB:
  626.         case TCODE_READQ:
  627.         case TCODE_READB:
  628.         case TCODE_LOCK_REQUEST:
  629.                 handle_incoming_packet(host, tcode, data, size, write_acked);
  630.                 break;
  631.         case TCODE_ISO_DATA:
  632.                 highlevel_iso_receive(host, data, size);
  633.                 break;
  634.         case TCODE_CYCLE_START:
  635.                 /* simply ignore this packet if it is passed on */
  636.                 break;
  637.         default:
  638.                 HPSB_NOTICE("received packet with bogus transaction code %d", 
  639.                             tcode);
  640.                 break;
  641.         }
  642. }
  643. void abort_requests(struct hpsb_host *host)
  644. {
  645.         unsigned long flags;
  646.         struct hpsb_packet *packet;
  647.         struct list_head *lh;
  648.         LIST_HEAD(llist);
  649.         host->driver->devctl(host, CANCEL_REQUESTS, 0);
  650.         spin_lock_irqsave(&host->pending_pkt_lock, flags);
  651.         list_splice(&host->pending_packets, &llist);
  652.         INIT_LIST_HEAD(&host->pending_packets);
  653.         spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
  654.         list_for_each(lh, &llist) {
  655.                 packet = list_entry(lh, struct hpsb_packet, list);
  656.                 packet->state = hpsb_complete;
  657.                 packet->ack_code = ACKX_ABORTED;
  658.                 up(&packet->state_change);
  659. process_complete_tasks(packet);
  660.         }
  661. }
  662. void abort_timedouts(struct hpsb_host *host)
  663. {
  664.         unsigned long flags;
  665.         struct hpsb_packet *packet;
  666.         unsigned long expire;
  667.         struct list_head *lh, *next;
  668.         LIST_HEAD(expiredlist);
  669.         spin_lock_irqsave(&host->csr.lock, flags);
  670.         expire = (host->csr.split_timeout_hi * 8000 
  671.                   + (host->csr.split_timeout_lo >> 19))
  672.                 * HZ / 8000;
  673.         /* Avoid shortening of timeout due to rounding errors: */
  674.         expire++;
  675.         spin_unlock_irqrestore(&host->csr.lock, flags);
  676.         spin_lock_irqsave(&host->pending_pkt_lock, flags);
  677. for (lh = host->pending_packets.next; lh != &host->pending_packets; lh = next) {
  678.                 packet = list_entry(lh, struct hpsb_packet, list);
  679. next = lh->next;
  680.                 if (time_before(packet->sendtime + expire, jiffies)) {
  681.                         list_del(&packet->list);
  682.                         list_add(&packet->list, &expiredlist);
  683.                 }
  684.         }
  685.         if (!list_empty(&host->pending_packets))
  686. schedule_task(&host->timeout_tq);
  687.         spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
  688.         list_for_each(lh, &expiredlist) {
  689.                 packet = list_entry(lh, struct hpsb_packet, list);
  690.                 packet->state = hpsb_complete;
  691.                 packet->ack_code = ACKX_TIMEOUT;
  692.                 up(&packet->state_change);
  693. process_complete_tasks(packet);
  694.         }
  695. }
  696. /*
  697.  * character device dispatching (see ieee1394_core.h)
  698.  * Dan Maas <dmaas@dcine.com>
  699.  */
  700. static struct {
  701. struct file_operations *file_ops;
  702. struct module *module;
  703. } ieee1394_chardevs[16];
  704. static rwlock_t ieee1394_chardevs_lock = RW_LOCK_UNLOCKED;
  705. static int ieee1394_dispatch_open(struct inode *inode, struct file *file);
  706. static struct file_operations ieee1394_chardev_ops = {
  707. .owner =THIS_MODULE,
  708. .open = ieee1394_dispatch_open,
  709. };
  710. devfs_handle_t ieee1394_devfs_handle;
  711. /* claim a block of minor numbers */
  712. int ieee1394_register_chardev(int blocknum,
  713.       struct module *module,
  714.       struct file_operations *file_ops)
  715. {
  716. int retval;
  717. if( (blocknum < 0) || (blocknum > 15) )
  718. return -EINVAL;
  719. write_lock(&ieee1394_chardevs_lock);
  720. if(ieee1394_chardevs[blocknum].file_ops == NULL) {
  721. /* grab the minor block */
  722. ieee1394_chardevs[blocknum].file_ops = file_ops;
  723. ieee1394_chardevs[blocknum].module = module;
  724. retval = 0;
  725. } else {
  726. /* block already taken */
  727. retval = -EBUSY;
  728. }
  729. write_unlock(&ieee1394_chardevs_lock);
  730. return retval;
  731. }
  732. /* release a block of minor numbers */
  733. void ieee1394_unregister_chardev(int blocknum)
  734. {
  735. if( (blocknum < 0) || (blocknum > 15) )
  736. return;
  737. write_lock(&ieee1394_chardevs_lock);
  738. if(ieee1394_chardevs[blocknum].file_ops) {
  739. ieee1394_chardevs[blocknum].file_ops = NULL;
  740. ieee1394_chardevs[blocknum].module = NULL;
  741. }
  742. write_unlock(&ieee1394_chardevs_lock);
  743. }
  744. /*
  745.   ieee1394_get_chardev() - look up and acquire a character device
  746.   driver that has previously registered using ieee1394_register_chardev()
  747.   
  748.   On success, returns 1 and sets module and file_ops to the driver.
  749.   The module will have an incremented reference count.
  750.    
  751.   On failure, returns 0.
  752.   The module will NOT have an incremented reference count.
  753. */
  754. static int ieee1394_get_chardev(int blocknum,
  755. struct module **module,
  756. struct file_operations **file_ops)
  757. {
  758. int ret = 0;
  759.        
  760. if( (blocknum < 0) || (blocknum > 15) )
  761. return ret;
  762. read_lock(&ieee1394_chardevs_lock);
  763. *module = ieee1394_chardevs[blocknum].module;
  764. *file_ops = ieee1394_chardevs[blocknum].file_ops;
  765. if(*file_ops == NULL)
  766. goto out;
  767. /* don't need try_inc_mod_count if the driver is non-modular */
  768. if(*module && (try_inc_mod_count(*module) == 0))
  769. goto out;
  770. /* success! */
  771. ret = 1;
  772. out:
  773. read_unlock(&ieee1394_chardevs_lock);
  774. return ret;
  775. }
  776. /* the point of entry for open() on any ieee1394 character device */
  777. static int ieee1394_dispatch_open(struct inode *inode, struct file *file)
  778. {
  779. struct file_operations *file_ops;
  780. struct module *module;
  781. int blocknum;
  782. int retval;
  783. /*
  784.   Maintaining correct module reference counts is tricky here!
  785.   The key thing to remember is that the VFS increments the
  786.   reference count of ieee1394 before it calls
  787.   ieee1394_dispatch_open().
  788.   If the open() succeeds, then we need to transfer this extra
  789.   reference to the task-specific driver module (e.g. raw1394).
  790.   The VFS will deref the driver module automatically when the
  791.   file is later released.
  792.   If the open() fails, then the VFS will drop the
  793.   reference count of whatever module file->f_op->owner points
  794.   to, immediately after this function returns.
  795. */
  796.         /* shift away lower four bits of the minor
  797.    to get the index of the ieee1394_driver
  798.    we want */
  799. blocknum = (minor(inode->i_rdev) >> 4) & 0xF;
  800. /* look up the driver */
  801. if(ieee1394_get_chardev(blocknum, &module, &file_ops) == 0)
  802. return -ENODEV;
  803. /* redirect all subsequent requests to the driver's
  804.    own file_operations */
  805. file->f_op = file_ops;
  806. /* at this point BOTH ieee1394 and the task-specific driver have
  807.    an extra reference */
  808. /* follow through with the open() */
  809. retval = file_ops->open(inode, file);
  810. if(retval == 0) {
  811. /* If the open() succeeded, then ieee1394 will be left
  812.    with an extra module reference, so we discard it here.
  813.    The task-specific driver still has the extra
  814.    reference given to it by ieee1394_get_chardev().
  815.    This extra reference prevents the module from
  816.    unloading while the file is open, and will be
  817.    dropped by the VFS when the file is released.
  818. */
  819. if(THIS_MODULE)
  820. __MOD_DEC_USE_COUNT((struct module*) THIS_MODULE);
  821. /* note that if ieee1394 is compiled into the kernel,
  822.    THIS_MODULE will be (void*) NULL, hence the if and
  823.    the cast are necessary */
  824. } else {
  825. /* if the open() failed, then we need to drop the
  826.    extra reference we gave to the task-specific
  827.    driver */
  828. if(module)
  829. __MOD_DEC_USE_COUNT(module);
  830. /* point the file's f_ops back to ieee1394. The VFS will then
  831.    decrement ieee1394's reference count immediately after this
  832.    function returns. */
  833. file->f_op = &ieee1394_chardev_ops;
  834. }
  835. return retval;
  836. }
  837. struct proc_dir_entry *ieee1394_procfs_entry;
  838. static int __init ieee1394_init(void)
  839. {
  840. hpsb_packet_cache = kmem_cache_create("hpsb_packet", sizeof(struct hpsb_packet),
  841.       0, 0, NULL, NULL);
  842. ieee1394_devfs_handle = devfs_mk_dir(NULL, "ieee1394", NULL);
  843. if (register_chrdev(IEEE1394_MAJOR, "ieee1394", &ieee1394_chardev_ops)) {
  844. HPSB_ERR("unable to register character device major %d!n", IEEE1394_MAJOR);
  845. devfs_unregister(ieee1394_devfs_handle);
  846. return -ENODEV;
  847. }
  848. #ifdef CONFIG_PROC_FS
  849. /* Must be done before we start everything else, since the drivers
  850.  * may use it.  */
  851. ieee1394_procfs_entry = proc_mkdir( "ieee1394", proc_bus);
  852. if (ieee1394_procfs_entry == NULL) {
  853. HPSB_ERR("unable to create /proc/bus/ieee1394n");
  854. unregister_chrdev(IEEE1394_MAJOR, "ieee1394");
  855. devfs_unregister(ieee1394_devfs_handle);
  856. return -ENOMEM;
  857. }
  858. ieee1394_procfs_entry->owner = THIS_MODULE;
  859. #endif
  860. init_hpsb_highlevel();
  861. init_csr();
  862. if (!disable_nodemgr)
  863. init_ieee1394_nodemgr(disable_hotplug);
  864. else
  865. HPSB_INFO("nodemgr functionality disabled");
  866. return 0;
  867. }
  868. static void __exit ieee1394_cleanup(void)
  869. {
  870. if (!disable_nodemgr)
  871. cleanup_ieee1394_nodemgr();
  872. cleanup_csr();
  873. kmem_cache_destroy(hpsb_packet_cache);
  874. unregister_chrdev(IEEE1394_MAJOR, "ieee1394");
  875. /* it's ok to pass a NULL devfs_handle to devfs_unregister */
  876. devfs_unregister(ieee1394_devfs_handle);
  877. remove_proc_entry("ieee1394", proc_bus);
  878. }
  879. module_init(ieee1394_init);
  880. module_exit(ieee1394_cleanup);
  881. /* Exported symbols */
  882. EXPORT_SYMBOL(hpsb_alloc_host);
  883. EXPORT_SYMBOL(hpsb_add_host);
  884. EXPORT_SYMBOL(hpsb_remove_host);
  885. EXPORT_SYMBOL(hpsb_ref_host);
  886. EXPORT_SYMBOL(hpsb_unref_host);
  887. EXPORT_SYMBOL(hpsb_speedto_str);
  888. EXPORT_SYMBOL(hpsb_add_packet_complete_task);
  889. EXPORT_SYMBOL(alloc_hpsb_packet);
  890. EXPORT_SYMBOL(free_hpsb_packet);
  891. EXPORT_SYMBOL(hpsb_send_packet);
  892. EXPORT_SYMBOL(hpsb_reset_bus);
  893. EXPORT_SYMBOL(hpsb_bus_reset);
  894. EXPORT_SYMBOL(hpsb_selfid_received);
  895. EXPORT_SYMBOL(hpsb_selfid_complete);
  896. EXPORT_SYMBOL(hpsb_packet_sent);
  897. EXPORT_SYMBOL(hpsb_packet_received);
  898. EXPORT_SYMBOL(get_tlabel);
  899. EXPORT_SYMBOL(free_tlabel);
  900. EXPORT_SYMBOL(fill_async_readquad);
  901. EXPORT_SYMBOL(fill_async_readquad_resp);
  902. EXPORT_SYMBOL(fill_async_readblock);
  903. EXPORT_SYMBOL(fill_async_readblock_resp);
  904. EXPORT_SYMBOL(fill_async_writequad);
  905. EXPORT_SYMBOL(fill_async_writeblock);
  906. EXPORT_SYMBOL(fill_async_write_resp);
  907. EXPORT_SYMBOL(fill_async_lock);
  908. EXPORT_SYMBOL(fill_async_lock_resp);
  909. EXPORT_SYMBOL(fill_iso_packet);
  910. EXPORT_SYMBOL(fill_phy_packet);
  911. EXPORT_SYMBOL(hpsb_make_readqpacket);
  912. EXPORT_SYMBOL(hpsb_make_readbpacket);
  913. EXPORT_SYMBOL(hpsb_make_writeqpacket);
  914. EXPORT_SYMBOL(hpsb_make_writebpacket);
  915. EXPORT_SYMBOL(hpsb_make_lockpacket);
  916. EXPORT_SYMBOL(hpsb_make_phypacket);
  917. EXPORT_SYMBOL(hpsb_packet_success);
  918. EXPORT_SYMBOL(hpsb_make_packet);
  919. EXPORT_SYMBOL(hpsb_read);
  920. EXPORT_SYMBOL(hpsb_write);
  921. EXPORT_SYMBOL(hpsb_lock);
  922. EXPORT_SYMBOL(hpsb_register_highlevel);
  923. EXPORT_SYMBOL(hpsb_unregister_highlevel);
  924. EXPORT_SYMBOL(hpsb_register_addrspace);
  925. EXPORT_SYMBOL(hpsb_listen_channel);
  926. EXPORT_SYMBOL(hpsb_unlisten_channel);
  927. EXPORT_SYMBOL(highlevel_read);
  928. EXPORT_SYMBOL(highlevel_write);
  929. EXPORT_SYMBOL(highlevel_lock);
  930. EXPORT_SYMBOL(highlevel_lock64);
  931. EXPORT_SYMBOL(highlevel_add_host);
  932. EXPORT_SYMBOL(highlevel_remove_host);
  933. EXPORT_SYMBOL(highlevel_host_reset);
  934. EXPORT_SYMBOL(hpsb_guid_get_entry);
  935. EXPORT_SYMBOL(hpsb_nodeid_get_entry);
  936. EXPORT_SYMBOL(hpsb_node_fill_packet);
  937. EXPORT_SYMBOL(hpsb_node_read);
  938. EXPORT_SYMBOL(hpsb_node_write);
  939. EXPORT_SYMBOL(hpsb_node_lock);
  940. EXPORT_SYMBOL(hpsb_register_protocol);
  941. EXPORT_SYMBOL(hpsb_unregister_protocol);
  942. EXPORT_SYMBOL(hpsb_release_unit_directory);
  943. EXPORT_SYMBOL(ieee1394_register_chardev);
  944. EXPORT_SYMBOL(ieee1394_unregister_chardev);
  945. EXPORT_SYMBOL(ieee1394_devfs_handle);
  946. EXPORT_SYMBOL(ieee1394_procfs_entry);