2 * ASIX AX8817x USB 2.0 10/100/HomePNA Ethernet controller driver
4 * $Id: ax8817x.c,v 1.11 2003/06/15 19:00:02 dhollis Exp $
6 * Copyright (c) 2002-2003 TiVo Inc.
8 * This software may be used and distributed according to the terms
9 * of the GNU General Public License, incorporated herein by reference.
13 * 2003-06-15 - Dave Hollis <dhollis@davehollis.com> 2.0.0
14 * * Remove crc32 inline function, use core kernel instead
15 * * Set sane defaults for rx_buffers
16 * * Fix ethtool GETDRVINFO bits - use strlcpy and
19 * 2003-06-05 - Dave Hollis <dhollis@davehollis.com> 0.10.0
20 * * Port to 2.5 series kernels
21 * * Remove #if 0 blocks that are confirmed
23 * * Re-did tx routines based off pegasus driver.
24 * This resolved hard crashes and greatly simplified
26 * * Redo mii/ethtool routines
28 * 2003-05-31 - Dave Hollis <dhollis@davehollis.com> 0.9.8
29 * * Don't stop/start the queue in start_xmit
30 * * Swallow URB status upon hard removal
31 * * Cleanup remaining comments (kill // style)
33 * 2003-05-29 - Dave Hollis <dhollis@davehollis.com> 0.9.7
35 * * Follow-up on suggestions from David Brownell &
36 * Oliver Neukum which should help with robustness
37 * * Use ether_crc from stock kernel if available
39 * 2003-05-28 - Dave Hollis <dhollis@davehollis.com> 0.9.6
40 * * Added basic ethtool & mii support
42 * 2003-05-28 - Dave Hollis <dhollis@davehollis.com> 0.9.5
43 * * Workout devrequest change to usb_ctrlrequest structure
44 * * Replace FILL_BULK_URB macros to non-deprecated
45 * usb_fill_bulk_urb macros
46 * * Replace printks with equivalent macros
47 * * Use defines for module description, version, author to
48 * simplify future changes
53 * Fix mii/ethtool output
56 #include <linux/slab.h>
58 #include <linux/init.h>
59 #include <linux/module.h>
60 #include <linux/usb.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/ethtool.h>
65 #include <linux/skbuff.h>
66 #include <linux/mii.h>
67 #include <linux/crc32.h>
68 #include <asm/uaccess.h>
69 #include <linux/version.h>
71 /* Version Information */
72 #define DRIVER_VERSION "v2.0.0"
73 #define DRIVER_AUTHOR "TiVo, Inc."
74 #define DRIVER_DESC "ASIX AX8817x USB Ethernet driver"
75 #define DRIVER_NAME "ax8817x"
77 MODULE_DESCRIPTION(DRIVER_DESC);
78 MODULE_AUTHOR(DRIVER_AUTHOR);
79 MODULE_LICENSE("GPL");
81 #define AX_REQ_READ ( USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE )
82 #define AX_REQ_WRITE ( USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE )
84 #define AX_CMD_SET_SW_MII 0x06
85 #define AX_CMD_READ_MII_REG 0x07
86 #define AX_CMD_WRITE_MII_REG 0x08
87 #define AX_CMD_SET_HW_MII 0x0a
88 #define AX_CMD_WRITE_RX_CTL 0x10
89 #define AX_CMD_WRITE_MULTI_FILTER 0x16
90 #define AX_CMD_READ_NODE_ID 0x17
91 #define AX_CMD_READ_PHY_ID 0x19
92 #define AX_CMD_WRITE_MEDIUM_MODE 0x1b
93 #define AX_CMD_WRITE_GPIOS 0x1f
95 #define AX_RX_MAX ETH_FRAME_LEN
96 #define AX_TIMEOUT_CMD ( HZ / 10 )
97 #define AX_TIMEOUT_TX ( HZ * 2 )
98 #define AX_MAX_MCAST 64
100 #define AX_DRV_STATE_INITIALIZING 0x00
101 #define AX_DRV_STATE_RUNNING 0x01
102 #define AX_DRV_STATE_EXITING 0x02
104 #define AX_PHY_STATE_INITIALIZING 0x00
105 #define AX_PHY_STATE_NO_LINK 0x01
106 #define AX_PHY_STATE_POLLING_1 0x02
107 #define AX_PHY_STATE_POLLING_2 0x03
108 #define AX_PHY_STATE_POLLING_3 0x04
109 #define AX_PHY_STATE_POLLING_4 0x05
110 #define AX_PHY_STATE_SETTING_MAC 0x06
111 #define AX_PHY_STATE_LINK 0x07
112 #define AX_PHY_STATE_ABORT_POLL 0x08
113 #define AX_PHY_STATE_ABORTING 0x09
115 #define AX_MAX_PHY_RETRY 50
117 #define AX_RX_URBS_DEFAULT 2
119 static int n_rx_urbs = AX_RX_URBS_DEFAULT;
121 MODULE_PARM(n_rx_urbs, "i");
122 MODULE_PARM_DESC(n_rx_urbs,
123 "Number of rx buffers to queue at once (def 2)");
127 typedef int (*ax_cmd_callback_t) (struct ax8817x_info *,
128 struct ax_cmd_req *);
131 struct list_head list;
132 ax_cmd_callback_t cmd_callback;
138 struct usb_ctrlrequest devreq;
141 struct ax8817x_info {
142 struct usb_device *usb;
143 struct net_device *net;
144 struct net_device_stats stats;
145 struct mii_if_info mii;
146 struct urb **rx_urbs;
151 struct list_head ctl_queue;
153 atomic_t rx_refill_cnt;
154 struct ax_cmd_req phy_req;
161 const struct usb_device_id ax8817x_id_table[] __devinitdata = {
162 /* Linksys USB200M */
163 {USB_DEVICE(0x077b, 0x2226), driver_info:0x00130103},
164 /* Hawking UF200, TRENDnet TU2-ET100 */
165 {USB_DEVICE(0x07b8, 0x420a), driver_info:0x001f1d1f},
167 {USB_DEVICE(0x0846, 0x1040), driver_info:0x00130103},
168 /* D-Link DUB-E100 */
169 {USB_DEVICE(0x2001, 0x1a00), driver_info:0x009f9d9f},
174 MODULE_DEVICE_TABLE(usb, ax8817x_id_table);
177 static void ax_run_ctl_queue(struct ax8817x_info *, struct ax_cmd_req *,
179 static void ax_rx_callback(struct urb *, struct pt_regs *);
181 static void ax_ctl_callback(struct urb *urb, struct pt_regs *regs)
183 struct ax8817x_info *ax_info =
184 (struct ax8817x_info *) urb->context;
186 ax_run_ctl_queue(ax_info, NULL,
187 urb->status ? urb->status : urb->actual_length);
191 * Queue a new ctl request, or dequeue the first in the list
193 static void ax_run_ctl_queue(struct ax8817x_info *ax_info,
194 struct ax_cmd_req *req, int status)
196 struct ax_cmd_req *next_req = NULL;
197 struct ax_cmd_req *last_req = NULL;
200 /* Need to lock around queue list manipulation */
201 spin_lock_irqsave(&ax_info->ctl_lock, flags);
205 list_entry(ax_info->ctl_queue.next, struct ax_cmd_req,
208 if (list_empty(&ax_info->ctl_queue)) {
212 req->status = -EINPROGRESS;
213 list_add_tail(&req->list, &ax_info->ctl_queue);
217 if (last_req != NULL) {
218 /* dequeue completed entry */
219 list_del(&last_req->list);
221 last_req->status = status;
222 if (last_req->cmd_callback(ax_info, last_req)) {
223 /* requeue if told to do so */
224 last_req->status = -EINPROGRESS;
225 list_add_tail(&last_req->list,
226 &ax_info->ctl_queue);
229 if (list_empty(&ax_info->ctl_queue)) {
233 list_entry(ax_info->ctl_queue.next,
234 struct ax_cmd_req, list);
238 spin_unlock_irqrestore(&ax_info->ctl_lock, flags);
240 if (next_req == NULL) {
244 /* XXX: do something with timeout */
245 usb_fill_control_urb(ax_info->ctl_urb, ax_info->usb,
247 bRequestType & USB_DIR_IN ?
248 usb_rcvctrlpipe(ax_info->usb,
250 usb_sndctrlpipe(ax_info->usb, 0),
251 (void *) &next_req->devreq,
252 next_req->data, next_req->data_size,
253 ax_ctl_callback, ax_info);
255 status = usb_submit_urb(ax_info->ctl_urb, GFP_ATOMIC);
262 spin_lock_irqsave(&ax_info->ctl_lock, flags);
266 static int ax_sync_cmd_callback(struct ax8817x_info *unused,
267 struct ax_cmd_req *req)
269 wait_queue_head_t *wq = (wait_queue_head_t *) req->priv;
276 static int ax_async_cmd_callback(struct ax8817x_info *unused,
277 struct ax_cmd_req *req)
279 if (req->status < 0) {
280 err("%s: Async command %d failed: %d\n", __FUNCTION__,
281 req->devreq.bRequest, req->status);
284 /* Nothing else to do here, just need to free the request (and its
286 if (req->data != NULL) {
295 * This is mostly the same as usb_control_msg(), except that it is able
296 * to queue control messages
298 static int ax_control_msg(struct ax8817x_info *ax_info, u8 requesttype,
299 u8 request, u16 value, u16 index, void *data,
300 u16 size, int timeout)
302 struct ax_cmd_req *req;
303 DECLARE_WAIT_QUEUE_HEAD(wq);
304 DECLARE_WAITQUEUE(wait, current);
307 req = kmalloc(sizeof(struct ax_cmd_req), GFP_KERNEL);
312 req->devreq.bRequestType = requesttype;
313 req->devreq.bRequest = request;
314 req->devreq.wValue = cpu_to_le16(value);
315 req->devreq.wIndex = cpu_to_le16(index);
316 req->devreq.wLength = cpu_to_le16(size);
318 req->data_size = size;
319 req->timeout = timeout;
322 set_current_state(TASK_UNINTERRUPTIBLE);
323 add_wait_queue(&wq, &wait);
325 req->cmd_callback = ax_sync_cmd_callback;
327 ax_run_ctl_queue(ax_info, req, 0);
338 * Same, but can be used asynchronously, may fail, and returns no exit
341 static void ax_control_msg_async(struct ax8817x_info *ax_info,
342 u8 requesttype, u8 request, u16 value,
343 u16 index, void *data, u16 size,
346 struct ax_cmd_req *req;
348 req = kmalloc(sizeof(struct ax_cmd_req), GFP_ATOMIC);
350 /* There's not much else we can do here... */
351 err("%s: Failed alloc\n", __FUNCTION__);
355 req->devreq.bRequestType = requesttype;
356 req->devreq.bRequest = request;
357 req->devreq.wValue = cpu_to_le16(value);
358 req->devreq.wIndex = cpu_to_le16(index);
359 req->devreq.wLength = cpu_to_le16(size);
361 req->data_size = size;
362 req->timeout = timeout;
364 req->cmd_callback = ax_async_cmd_callback;
366 ax_run_ctl_queue(ax_info, req, 0);
369 static inline int ax_read_cmd(struct ax8817x_info *ax_info, u8 cmd,
370 u16 value, u16 index, u16 size, void *data)
372 return ax_control_msg(ax_info, AX_REQ_READ, cmd, value, index,
373 data, size, AX_TIMEOUT_CMD);
376 static inline int ax_write_cmd(struct ax8817x_info *ax_info, u8 cmd,
377 u16 value, u16 index, u16 size, void *data)
379 return ax_control_msg(ax_info, AX_REQ_WRITE, cmd, value, index,
380 data, size, AX_TIMEOUT_CMD);
383 static inline void ax_write_cmd_async(struct ax8817x_info *ax_info, u8 cmd,
384 u16 value, u16 index, u16 size,
387 ax_control_msg_async(ax_info, AX_REQ_WRITE, cmd, value, index,
388 data, size, AX_TIMEOUT_CMD);
391 static int ax_refill_rx_urb(struct ax8817x_info *ax_info, struct urb *urb)
396 skb = dev_alloc_skb(AX_RX_MAX + 2);
398 skb_reserve(skb, 2); /* for IP header alignment */
399 skb->dev = ax_info->net;
401 usb_fill_bulk_urb(urb, ax_info->usb,
402 usb_rcvbulkpipe(ax_info->usb, 3),
403 skb->data, AX_RX_MAX, ax_rx_callback,
406 ret = usb_submit_urb(urb, GFP_ATOMIC);
408 err("Failed submit rx URB (%d)\n", ret);
409 dev_kfree_skb_irq(skb);
415 /* this just means we're low on memory at the moment. Try to
416 handle it gracefully. */
424 static int ax_phy_cmd_callback(struct ax8817x_info *ax_info,
425 struct ax_cmd_req *req)
431 if (req->status < 0) {
432 err("%s: Failed at state %d: %d\n", __FUNCTION__,
433 ax_info->phy_state, req->status);
434 /* Not sure what else we can do, so just bail */
435 ax_info->phy_state = AX_PHY_STATE_ABORTING;
438 switch (ax_info->phy_state) {
439 /* Now that we're in software MII mode, read the BMSR */
440 case AX_PHY_STATE_POLLING_1:
441 ax_info->phy_state = AX_PHY_STATE_POLLING_2;
442 req->devreq.bRequestType = AX_REQ_READ;
443 req->devreq.bRequest = AX_CMD_READ_MII_REG;
444 req->devreq.wValue = cpu_to_le16(ax_info->phy_id);
445 req->devreq.wIndex = cpu_to_le16(MII_BMSR);
446 req->devreq.wLength = cpu_to_le16(2);
448 req->priv = 0; /* This is the retry count */
451 /* Done reading BMSR */
452 case AX_PHY_STATE_POLLING_2:
453 mii_data_le = *(u16 *) req->data;
455 cpu_to_le16(BMSR_LSTATUS | BMSR_ANEGCAPABLE))
456 == cpu_to_le16(BMSR_LSTATUS | BMSR_ANEGCAPABLE)) {
457 if (mii_data_le & cpu_to_le16(BMSR_ANEGCOMPLETE)) {
458 /* Autonegotiation done, go on to read LPA */
460 AX_PHY_STATE_POLLING_3;
461 req->devreq.wIndex = cpu_to_le16(MII_LPA);
463 } else if ((long) req->priv++ < AX_MAX_PHY_RETRY) {
464 /* Reread BMSR if it's still autonegotiating. This is
465 probably unnecessary logic, I've never seen it take
466 more than 1 try... */
469 /* else fall through to abort */
471 /* XXX: should probably handle auto-neg failure better,
472 by reverting to manual setting of something safe. (?) */
474 ax_info->phy_state = AX_PHY_STATE_ABORT_POLL;
475 /* and then fall through to set hw MII */
477 /* Got what we needed from PHY, set back to hardware MII mode
478 (Do same for abort in mid-poll) */
479 case AX_PHY_STATE_POLLING_3:
480 case AX_PHY_STATE_ABORT_POLL:
481 ax_info->phy_state += 1;
482 req->devreq.bRequestType = AX_REQ_WRITE;
483 req->devreq.bRequest = AX_CMD_SET_HW_MII;
484 req->devreq.wValue = cpu_to_le16(0);
485 req->devreq.wIndex = cpu_to_le16(0);
486 req->devreq.wLength = cpu_to_le16(0);
490 /* The end result, set the right duplex and flow control mode in the
491 MAC (based on the PHY's LPA reg, which should still be in the data
493 case AX_PHY_STATE_POLLING_4:
494 mii_data_le = *(u16 *) req->data;
495 ax_info->phy_state = AX_PHY_STATE_SETTING_MAC;
496 req->devreq.bRequest = AX_CMD_WRITE_MEDIUM_MODE;
497 full_duplex = mii_data_le & cpu_to_le16(LPA_DUPLEX);
498 flow_control = full_duplex &&
499 (mii_data_le & cpu_to_le16(0x0400));
500 req->devreq.wValue = cpu_to_le16(0x04) |
501 (full_duplex ? cpu_to_le16(0x02) : 0) |
502 (flow_control ? cpu_to_le16(0x10) : 0);
503 info("%s: Link established, %s duplex, flow control %sabled\n", ax_info->net->name, full_duplex ? "full" : "half", flow_control ? "en" : "dis");
507 case AX_PHY_STATE_SETTING_MAC:
508 ax_info->phy_state = AX_PHY_STATE_LINK;
509 netif_carrier_on(ax_info->net);
513 err("%s: Unknown state %d\n", __FUNCTION__,
516 case AX_PHY_STATE_ABORTING:
517 ax_info->phy_state = AX_PHY_STATE_NO_LINK;
522 static void ax_int_callback(struct urb *urb, struct pt_regs *regs)
524 struct ax8817x_info *ax_info =
525 (struct ax8817x_info *) urb->context;
528 if (ax_info->drv_state == AX_DRV_STATE_EXITING ||
529 urb->actual_length < 3) {
533 /* Ignore the first PHY link report, it will sometimes be reported as
534 link active, even though we just told the PHY to reset. If it
535 really has link, we'll pick it up next int callback.
537 if (ax_info->phy_state == AX_PHY_STATE_INITIALIZING) {
538 netif_carrier_off(ax_info->net);
539 ax_info->phy_state = AX_PHY_STATE_NO_LINK;
543 /* Assume we're only interested in the primary PHY for now. */
544 phy_link = ax_info->int_buf[2] & 1;
547 (ax_info->phy_state == AX_PHY_STATE_NO_LINK) ? 0 : 1) {
548 /* Common case, no change */
553 netif_carrier_off(ax_info->net);
554 /* Abort an in-progress poll of the PHY if necessary */
555 switch (ax_info->phy_state) {
556 case AX_PHY_STATE_POLLING_1:
557 case AX_PHY_STATE_POLLING_2:
558 case AX_PHY_STATE_POLLING_3:
559 ax_info->phy_state = AX_PHY_STATE_ABORT_POLL;
562 case AX_PHY_STATE_POLLING_4:
563 case AX_PHY_STATE_SETTING_MAC:
564 ax_info->phy_state = AX_PHY_STATE_ABORTING;
567 case AX_PHY_STATE_LINK:
568 ax_info->phy_state = AX_PHY_STATE_NO_LINK;
572 /* If we're already aborting, continue aborting */
576 /* Note that we only fall into this case if previous phy_state was
577 AX_PHY_STATE_NO_LINK. When the link is reported active while
578 we're still polling, or when we're aborting, the logic above
579 will just return, and we'll check again next int callback. */
581 ax_info->phy_state = AX_PHY_STATE_POLLING_1;
582 ax_info->phy_req.devreq.bRequestType = AX_REQ_WRITE;
583 ax_info->phy_req.devreq.bRequest = AX_CMD_SET_SW_MII;
584 ax_info->phy_req.devreq.wValue = cpu_to_le16(0);
585 ax_info->phy_req.devreq.wIndex = cpu_to_le16(0);
586 ax_info->phy_req.devreq.wLength = cpu_to_le16(0);
587 ax_info->phy_req.data_size = 0;
588 ax_info->phy_req.timeout = AX_TIMEOUT_CMD;
589 ax_info->phy_req.cmd_callback = ax_phy_cmd_callback;
591 ax_run_ctl_queue(ax_info, &ax_info->phy_req, 0);
595 static void ax_rx_callback(struct urb *urb, struct pt_regs *regs)
597 struct sk_buff *skb = (struct sk_buff *) urb->context;
598 struct net_device *net = skb->dev;
599 struct ax8817x_info *ax_info = (struct ax8817x_info *) net->priv;
600 int ret, len, refill;
602 switch (urb->status) {
607 err("%s: URB status %d\n", __FUNCTION__, urb->status);
608 /* It's not clear that we can do much in this case, the rx pipe
609 doesn't ever seem to stall, so if we got -ETIMEDOUT, that
610 usually means the device was unplugged, and we just haven't
612 Just fall through and free skb without resubmitting urb. */
614 case -ECONNRESET: /* Async unlink */
615 case -ESHUTDOWN: /* Hardware gone */
616 case -EILSEQ: /* Get this when you yank it out on UHCI */
617 case -ETIMEDOUT: /* OHCI */
618 case -EPROTO: /* EHCI */
620 dev_kfree_skb_any(skb);
625 if (ax_info->drv_state == AX_DRV_STATE_INITIALIZING) {
626 /* Not really expecting this to ever happen, since we haven't yet
627 enabled receive in the rx_ctl register, but ya never know... */
629 } else if (ax_info->drv_state == AX_DRV_STATE_EXITING) {
630 dev_kfree_skb_any(skb);
635 len = urb->actual_length;
637 /* this shouldn't happen... */
641 refill = ax_refill_rx_urb(ax_info, urb);
644 || atomic_read(&ax_info->rx_refill_cnt) < n_rx_urbs) {
645 /* Send the receive buffer up the network stack */
647 skb->protocol = eth_type_trans(skb, net);
648 net->last_rx = jiffies;
649 ax_info->stats.rx_packets++;
650 ax_info->stats.rx_bytes += len;
657 /* This is the common case. This URB got refilled OK, and
658 no other URBs need to be refilled. */
659 if (atomic_read(&ax_info->rx_refill_cnt) == 0) {
663 for (i = 0; i < n_rx_urbs; i++) {
664 struct urb *urb = ax_info->rx_urbs[i];
666 if (urb->context == NULL) {
667 if (ax_refill_rx_urb(ax_info, urb)
669 atomic_dec(&ax_info->
677 /* remember to refill this one later */
678 atomic_inc(&ax_info->rx_refill_cnt);
683 ax_info->stats.rx_dropped++;
685 /* the error code was already printk'ed in ax_refill_rx_urb()
686 so just note the consequences here: */
687 warn("Halting rx due to error\n");
691 /* fall through to resubmit this URB with the existing skb
692 will try to reallocate skb's on next rx callback */
696 usb_fill_bulk_urb(urb, ax_info->usb,
697 usb_rcvbulkpipe(ax_info->usb, 3), skb->data,
698 AX_RX_MAX, ax_rx_callback, skb);
700 ret = usb_submit_urb(urb, GFP_ATOMIC);
702 err("Failed submit rx URB (%d)\n", ret);
706 static int ax8817x_open(struct net_device *net)
708 struct ax8817x_info *ax_info = (struct ax8817x_info *) net->priv;
712 ret = ax_write_cmd(ax_info, AX_CMD_WRITE_RX_CTL, 0x80, 0, 0, buf);
719 ax_info->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
720 if (ax_info->tx_urb == NULL) {
721 err("Error allocating tx_urb!");
725 atomic_set(&ax_info->rx_refill_cnt, 0);
727 for (i = 0; i < n_rx_urbs && ret == 0; i++) {
728 struct urb *urb = ax_info->rx_urbs[i];
731 urb = ax_info->rx_urbs[i] =
732 usb_alloc_urb(0, GFP_KERNEL);
738 urb->transfer_flags |= URB_NO_INTERRUPT; /* FIXME: Was USB_QUEUE_BULK */
741 ret = ax_refill_rx_urb(ax_info, urb);
743 atomic_inc(&ax_info->rx_refill_cnt);
748 /* XXX: should handle the case where we couldn't allocate any skb's
749 better. They get allocated with GFP_ATOMIC, so they may all fail... */
750 if (ret == 0 && atomic_read(&ax_info->rx_refill_cnt) < n_rx_urbs) {
751 netif_start_queue(net);
753 /* Error: clean up anything we allocated and bail. */
754 usb_free_urb(ax_info->tx_urb);
756 for (i = 0; i < n_rx_urbs; i++) {
757 struct urb *urb = ax_info->rx_urbs[i];
760 /* skb gets freed in the URB callback */
766 err("%s: Failed start rx queue (%d)\n", __FUNCTION__, ret);
771 static int ax8817x_stop(struct net_device *net)
773 struct ax8817x_info *ax_info = (struct ax8817x_info *) net->priv;
777 netif_stop_queue(net);
779 ret = ax_write_cmd(ax_info, AX_CMD_WRITE_RX_CTL, 0x80, 0, 0, buf);
780 if (ret < 0 && ax_info->drv_state != AX_DRV_STATE_EXITING) {
781 err("%s: Failed cmd (%d)\n", __FUNCTION__, ret);
783 if (ax_info->tx_urb != NULL) {
784 usb_unlink_urb(ax_info->tx_urb);
785 usb_free_urb(ax_info->tx_urb);
786 ax_info->tx_urb = NULL;
789 for (i = 0; i < n_rx_urbs; i++) {
790 struct urb *urb = ax_info->rx_urbs[i];
792 /* skb gets freed in the URB callback */
795 ax_info->rx_urbs[i] = NULL;
802 static void write_bulk_callback(struct urb *urb, struct pt_regs *regs)
804 struct ax8817x_info *ax_info = urb->context;
806 if (!ax_info || (ax_info->drv_state == AX_DRV_STATE_EXITING))
809 if (!netif_device_present(ax_info->net))
813 info("%s: TX status %d", ax_info->net->name, urb->status);
815 ax_info->net->trans_start = jiffies;
816 netif_wake_queue(ax_info->net);
819 static int ax8817x_start_xmit(struct sk_buff *skb, struct net_device *net)
821 struct ax8817x_info *ax_info = net->priv;
824 netif_stop_queue(net);
826 ax_info->tx_urb->transfer_flags |= URB_ZERO_PACKET;
827 usb_fill_bulk_urb(ax_info->tx_urb, ax_info->usb,
828 usb_sndbulkpipe(ax_info->usb, 2),
829 skb->data, skb->len, write_bulk_callback,
831 if ((res = usb_submit_urb(ax_info->tx_urb, GFP_ATOMIC))) {
832 warn("Failed tx_urb %d", res);
833 ax_info->stats.tx_errors++;
834 netif_start_queue(net);
836 ax_info->stats.tx_packets++;
837 ax_info->stats.tx_bytes += skb->len;
838 net->trans_start = jiffies;
845 static void ax8817x_tx_timeout(struct net_device *net)
847 struct ax8817x_info *ax_info = net->priv;
852 warn("%s: Tx timed out.", net->name);
853 ax_info->tx_urb->transfer_flags |= URB_ASYNC_UNLINK;
854 usb_unlink_urb(ax_info->tx_urb);
855 ax_info->stats.tx_errors++;
858 static struct net_device_stats *ax8817x_stats(struct net_device *net)
860 struct ax8817x_info *ax_info = (struct ax8817x_info *) net->priv;
862 return &ax_info->stats;
865 static void ax8817x_set_multicast(struct net_device *net)
867 struct ax8817x_info *ax_info = (struct ax8817x_info *) net->priv;
870 if (net->flags & IFF_PROMISC) {
872 } else if (net->flags & IFF_ALLMULTI
873 || net->mc_count > AX_MAX_MCAST) {
875 } else if (net->mc_count == 0) {
876 /* just broadcast and directed */
878 struct dev_mc_list *mc_list = net->mc_list;
883 multi_filter = kmalloc(8, GFP_ATOMIC);
884 if (multi_filter == NULL) {
885 /* Oops, couldn't allocate a DMA buffer for setting the multicast
886 filter. Try all multi mode, although the ax_write_cmd_async
887 will almost certainly fail, too... (but it will printk). */
890 memset(multi_filter, 0, 8);
892 /* Build the multicast hash filter. */
893 for (i = 0; i < net->mc_count; i++) {
896 mc_list->dmi_addr) >> 26;
897 multi_filter[crc_bits >> 3] |=
899 mc_list = mc_list->next;
902 ax_write_cmd_async(ax_info,
903 AX_CMD_WRITE_MULTI_FILTER, 0, 0,
910 ax_write_cmd_async(ax_info, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0,
914 static int read_mii_word(struct ax8817x_info *ax_info, __u8 phy, __u8 indx,
919 ax_write_cmd(ax_info, AX_CMD_SET_SW_MII, 0, 0, 0, NULL);
921 ax_read_cmd(ax_info, AX_CMD_READ_MII_REG, phy, indx, 2, regd);
922 ax_write_cmd(ax_info, AX_CMD_SET_HW_MII, 0, 0, 0, NULL);
927 static int write_mii_word(struct ax8817x_info *ax_info, __u8 phy,
928 __u8 indx, __u16 regd)
930 warn("write_mii_word - not implemented!");
934 static int mdio_read(struct net_device *dev, int phy_id, int loc)
936 struct ax8817x_info *ax_info = dev->priv;
939 read_mii_word(ax_info, phy_id, loc, (u16 *) & res);
943 static void mdio_write(struct net_device *dev, int phy_id, int loc,
946 struct ax8817x_info *ax_info = dev->priv;
948 write_mii_word(ax_info, phy_id, loc, val);
951 static int ax8817x_ethtool_ioctl(struct net_device *net, void __user *uaddr)
953 struct ax8817x_info *ax_info;
957 if (get_user(cmd, (int *) uaddr))
961 case ETHTOOL_GDRVINFO:{
962 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
964 strlcpy(info.driver, DRIVER_NAME,
965 ETHTOOL_BUSINFO_LEN);
966 strlcpy(info.version, DRIVER_VERSION,
967 ETHTOOL_BUSINFO_LEN);
968 usb_make_path(ax_info->usb, info.bus_info,sizeof info.bus_info);
969 if (copy_to_user(uaddr, &info, sizeof(info)))
974 struct ethtool_cmd ecmd;
976 mii_ethtool_gset(&ax_info->mii, &ecmd);
977 if (copy_to_user(uaddr, &ecmd, sizeof(ecmd)))
983 struct ethtool_cmd ecmd;
985 if (copy_from_user(&ecmd, uaddr, sizeof(ecmd)))
987 r = mii_ethtool_sset(&ax_info->mii, &ecmd);
990 case ETHTOOL_NWAY_RST:{
991 return mii_nway_restart(&ax_info->mii);
994 struct ethtool_value edata = { ETHTOOL_GLINK };
997 ax_info->phy_state == AX_PHY_STATE_LINK;
998 if (copy_to_user(uaddr, &edata, sizeof(edata)))
1002 case ETHTOOL_GMSGLVL:{
1003 struct ethtool_value edata = { ETHTOOL_GMSGLVL };
1004 /* edata.data = ax_info->msg_enable; FIXME */
1005 if (copy_to_user(uaddr, &edata, sizeof(edata)))
1009 case ETHTOOL_SMSGLVL:{
1010 struct ethtool_value edata;
1012 if (copy_from_user(&edata, uaddr, sizeof(edata)))
1014 /* sp->msg_enable = edata.data; FIXME */
1021 static int ax8817x_mii_ioctl(struct net_device *net, struct ifreq *ifr,
1024 struct ax8817x_info *ax_info;
1025 struct mii_ioctl_data *data_ptr =
1026 (struct mii_ioctl_data *) &(ifr->ifr_data);
1028 ax_info = net->priv;
1032 data_ptr->phy_id = ax_info->phy_id;
1035 if (!capable(CAP_NET_ADMIN))
1038 ax_read_cmd(ax_info, AX_CMD_READ_MII_REG, 0,
1039 data_ptr->reg_num & 0x1f, 2,
1040 &(data_ptr->val_out));
1048 static int ax8817x_ioctl(struct net_device *net, struct ifreq *ifr,
1051 struct ax8817x_info *ax_info;
1054 ax_info = net->priv;
1059 res = ax8817x_ethtool_ioctl(net, (void __user *)ifr->ifr_data);
1061 case SIOCGMIIPHY: /* Get address of PHY in use */
1062 case SIOCGMIIREG: /* Read from MII PHY register */
1063 case SIOCSMIIREG: /* Write to MII PHY register */
1064 return ax8817x_mii_ioctl(net, ifr, cmd);
1072 static int ax8817x_net_init(struct net_device *net)
1074 struct ax8817x_info *ax_info = (struct ax8817x_info *) net->priv;
1076 u16 *buf16 = (u16 *) buf;
1079 ret = ax_write_cmd(ax_info, AX_CMD_WRITE_RX_CTL, 0x80, 0, 0, buf);
1086 /* Get the MAC address */
1087 ret = ax_read_cmd(ax_info, AX_CMD_READ_NODE_ID, 0, 0, 6, buf);
1092 memcpy(net->dev_addr, buf, 6);
1094 /* Get the PHY id */
1095 ret = ax_read_cmd(ax_info, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
1098 } else if (ret < 2) {
1099 /* this should always return 2 bytes */
1103 /* Reset the PHY, and drop it into auto-negotiation mode */
1104 ax_info->phy_id = buf[1];
1105 ax_info->phy_state = AX_PHY_STATE_INITIALIZING;
1107 ret = ax_write_cmd(ax_info, AX_CMD_SET_SW_MII, 0, 0, 0, &buf);
1112 *buf16 = cpu_to_le16(BMCR_RESET);
1113 ret = ax_write_cmd(ax_info, AX_CMD_WRITE_MII_REG,
1114 ax_info->phy_id, MII_BMCR, 2, buf16);
1119 /* Advertise that we can do full-duplex pause */
1120 *buf16 = cpu_to_le16(ADVERTISE_ALL | ADVERTISE_CSMA | 0x0400);
1121 ret = ax_write_cmd(ax_info, AX_CMD_WRITE_MII_REG,
1122 ax_info->phy_id, MII_ADVERTISE, 2, buf16);
1127 *buf16 = cpu_to_le16(BMCR_ANENABLE | BMCR_ANRESTART);
1128 ret = ax_write_cmd(ax_info, AX_CMD_WRITE_MII_REG,
1129 ax_info->phy_id, MII_BMCR, 2, buf16);
1134 ret = ax_write_cmd(ax_info, AX_CMD_SET_HW_MII, 0, 0, 0, &buf);
1139 net->open = ax8817x_open;
1140 net->stop = ax8817x_stop;
1141 net->hard_start_xmit = ax8817x_start_xmit;
1142 net->tx_timeout = ax8817x_tx_timeout;
1143 net->watchdog_timeo = AX_TIMEOUT_TX;
1144 net->get_stats = ax8817x_stats;
1145 net->do_ioctl = ax8817x_ioctl;
1146 net->set_multicast_list = ax8817x_set_multicast;
1151 static int ax8817x_bind(struct usb_interface *intf,
1152 const struct usb_device_id *id)
1154 struct usb_device *usb = interface_to_usbdev(intf);
1155 struct ax8817x_info *ax_info;
1156 struct net_device *net;
1158 unsigned long gpio_bits = id->driver_info;
1161 /* Allocate the URB lists along with the device info struct */
1162 ax_info = kmalloc(sizeof(struct ax8817x_info) +
1163 n_rx_urbs * sizeof(struct urb *), GFP_KERNEL);
1164 if (ax_info == NULL) {
1165 err("%s: Failed ax alloc\n", __FUNCTION__);
1169 memset(ax_info, 0, sizeof(struct ax8817x_info) +
1170 n_rx_urbs * sizeof(struct urb *));
1172 ax_info->drv_state = AX_DRV_STATE_INITIALIZING;
1173 ax_info->rx_urbs = (struct urb **) (ax_info + 1);
1176 /* Set up the control URB queue */
1178 INIT_LIST_HEAD(&ax_info->ctl_queue);
1179 spin_lock_init(&ax_info->ctl_lock);
1180 ax_info->ctl_urb = usb_alloc_urb(0, GFP_KERNEL);
1181 if (ax_info->ctl_urb == NULL) {
1182 goto exit_err_free_ax;
1185 /* Toggle the GPIOs in a manufacturer/model specific way */
1187 for (i = 2; i >= 0; i--) {
1188 ret = ax_write_cmd(ax_info, AX_CMD_WRITE_GPIOS,
1189 (gpio_bits >> (i * 8)) & 0xff, 0, 0,
1192 goto exit_err_free_ax;
1197 /* Set up the net device */
1199 net = alloc_etherdev(0);
1201 err("%s: Failed net alloc\n", __FUNCTION__);
1202 goto exit_err_free_ax;
1207 SET_MODULE_OWNER(net);
1208 net->init = ax8817x_net_init;
1209 net->priv = ax_info;
1211 SET_NETDEV_DEV(net, &intf->dev);
1212 ret = register_netdev(net);
1214 err("%s: Failed net init (%d)\n", __FUNCTION__, ret);
1215 goto exit_err_free_net;
1218 /* Setup mii structure */
1219 ax_info->mii.dev = net;
1220 ax_info->mii.mdio_read = mdio_read;
1221 ax_info->mii.mdio_write = mdio_write;
1222 ax_info->mii.phy_id_mask = 0x1f;
1223 ax_info->mii.reg_num_mask = 0x1f;
1225 /* Set up the interrupt URB, and start PHY state monitoring */
1227 ax_info->int_urb = usb_alloc_urb(0, GFP_KERNEL);
1228 if (ax_info->int_urb == NULL) {
1229 goto exit_err_unregister_net;
1231 ax_info->int_buf = kmalloc(8, GFP_KERNEL);
1232 if (ax_info->int_buf == NULL) {
1233 goto exit_err_free_int_urb;
1235 ax_info->phy_req.data = kmalloc(2, GFP_KERNEL);
1236 if (ax_info->phy_req.data == NULL) {
1237 goto exit_err_free_int_buf;
1240 usb_fill_int_urb(ax_info->int_urb, usb, usb_rcvintpipe(usb, 1),
1241 ax_info->int_buf, 8, ax_int_callback, ax_info,
1244 ret = usb_submit_urb(ax_info->int_urb, GFP_ATOMIC);
1246 err("%s: Failed int URB submit (%d)\n", __FUNCTION__, ret);
1247 goto exit_err_free_phy_buf;
1250 ax_info->drv_state = AX_DRV_STATE_RUNNING;
1251 usb_set_intfdata(intf, ax_info);
1255 exit_err_free_phy_buf:
1256 kfree(ax_info->phy_req.data);
1258 exit_err_free_int_buf:
1259 kfree(ax_info->int_buf);
1261 exit_err_free_int_urb:
1262 usb_free_urb(ax_info->int_urb);
1264 exit_err_unregister_net:
1265 ax_info->drv_state = AX_DRV_STATE_EXITING;
1266 unregister_netdev(net);
1272 if (ax_info->ctl_urb != NULL) {
1273 /* no need to unlink, since there should not be any ctl URBs
1274 pending at this point */
1275 usb_free_urb(ax_info->ctl_urb);
1281 err("%s: Failed to initialize\n", __FUNCTION__);
1285 static void ax8817x_disconnect(struct usb_interface *intf)
1287 struct ax8817x_info *ax_info = usb_get_intfdata(intf);
1289 usb_set_intfdata(intf, NULL);
1291 ax_info->drv_state = AX_DRV_STATE_EXITING;
1293 if (ax_info->int_urb != NULL) {
1294 usb_unlink_urb(ax_info->int_urb);
1295 usb_free_urb(ax_info->int_urb);
1296 kfree(ax_info->int_buf);
1299 unregister_netdev(ax_info->net);
1301 /* XXX: hmmm... need to go through and clear out the ctl queue, too... */
1302 if (ax_info->ctl_urb != NULL) {
1303 usb_unlink_urb(ax_info->ctl_urb);
1304 usb_free_urb(ax_info->ctl_urb);
1311 static struct usb_driver ax8817x_driver = {
1312 .owner = THIS_MODULE,
1313 .name = DRIVER_NAME,
1314 .probe = ax8817x_bind,
1315 .disconnect = ax8817x_disconnect,
1316 .id_table = ax8817x_id_table,
1319 static int __init ax8817x_init(void)
1324 n_rx_urbs = AX_RX_URBS_DEFAULT;
1326 ret = usb_register(&ax8817x_driver);
1328 err("%s: Failed to register\n", __FUNCTION__);
1330 info(DRIVER_DESC " " DRIVER_VERSION);
1336 static void __exit ax8817x_exit(void)
1338 usb_deregister(&ax8817x_driver);
1341 module_init(ax8817x_init);
1342 module_exit(ax8817x_exit);