/* take the first altsetting with in-bulk + out-bulk;
* remember any status endpoint, just in case;
- * ignore other endpoints and altsetttings.
+ * ignore other endpoints and altsettings.
*/
for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
struct usb_host_endpoint *e;
} else {
usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
buf, maxp, intr_complete, dev, period);
+ dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
dev_dbg(&intf->dev,
"status ep%din, %d bytes period %d\n",
usb_pipeendpoint(pipe), maxp, period);
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
skb->len + sizeof (struct ethhdr), skb->protocol);
memset (skb->cb, 0, sizeof (struct skb_data));
+
+ if (skb_defer_rx_timestamp(skb))
+ return;
+
status = netif_rx (skb);
if (status != NET_RX_SUCCESS)
netif_dbg(dev, rx_err, dev->net,
}
EXPORT_SYMBOL_GPL(usbnet_change_mtu);
+/* The caller must hold list->lock */
+static void __usbnet_queue_skb(struct sk_buff_head *list,
+ struct sk_buff *newsk, enum skb_state state)
+{
+ struct skb_data *entry = (struct skb_data *) newsk->cb;
+
+ __skb_queue_tail(list, newsk);
+ entry->state = state;
+}
+
/*-------------------------------------------------------------------------*/
/* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
* completion callbacks. 2.5 should have fixed those bugs...
*/
-static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
+static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
+ struct sk_buff_head *list, enum skb_state state)
{
unsigned long flags;
+ enum skb_state old_state;
+ struct skb_data *entry = (struct skb_data *) skb->cb;
spin_lock_irqsave(&list->lock, flags);
+ old_state = entry->state;
+ entry->state = state;
__skb_unlink(skb, list);
spin_unlock(&list->lock);
spin_lock(&dev->done.lock);
if (dev->done.qlen == 1)
tasklet_schedule(&dev->bh);
spin_unlock_irqrestore(&dev->done.lock, flags);
+ return old_state;
}
/* some work can't be done in tasklets, so we use keventd
unsigned long lockflags;
size_t size = dev->rx_urb_size;
- if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
+ skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
+ if (!skb) {
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
usb_free_urb (urb);
return -ENOMEM;
}
- skb_reserve (skb, NET_IP_ALIGN);
entry = (struct skb_data *) skb->cb;
entry->urb = urb;
entry->dev = dev;
- entry->state = rx_start;
entry->length = 0;
usb_fill_bulk_urb (urb, dev->udev, dev->in,
tasklet_schedule (&dev->bh);
break;
case 0:
- __skb_queue_tail (&dev->rxq, skb);
+ __usbnet_queue_skb(&dev->rxq, skb, rx_start);
}
} else {
netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
struct skb_data *entry = (struct skb_data *) skb->cb;
struct usbnet *dev = entry->dev;
int urb_status = urb->status;
+ enum skb_state state;
skb_put (skb, urb->actual_length);
- entry->state = rx_done;
+ state = rx_done;
entry->urb = NULL;
switch (urb_status) {
/* success */
case 0:
if (skb->len < dev->net->hard_header_len) {
- entry->state = rx_cleanup;
+ state = rx_cleanup;
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
netif_dbg(dev, rx_err, dev->net,
"rx throttle %d\n", urb_status);
}
block:
- entry->state = rx_cleanup;
+ state = rx_cleanup;
entry->urb = urb;
urb = NULL;
break;
// FALLTHROUGH
default:
- entry->state = rx_cleanup;
+ state = rx_cleanup;
dev->net->stats.rx_errors++;
netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
break;
}
- defer_bh(dev, skb, &dev->rxq);
+ state = defer_bh(dev, skb, &dev->rxq, state);
if (urb) {
if (netif_running (dev->net) &&
- !test_bit (EVENT_RX_HALT, &dev->flags)) {
+ !test_bit (EVENT_RX_HALT, &dev->flags) &&
+ state != unlink_start) {
rx_submit (dev, urb, GFP_ATOMIC);
+ usb_mark_last_busy(dev->udev);
return;
}
usb_free_urb (urb);
static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
{
unsigned long flags;
- struct sk_buff *skb, *skbnext;
+ struct sk_buff *skb;
int count = 0;
spin_lock_irqsave (&q->lock, flags);
- skb_queue_walk_safe(q, skb, skbnext) {
+ while (!skb_queue_empty(q)) {
struct skb_data *entry;
struct urb *urb;
int retval;
- entry = (struct skb_data *) skb->cb;
+ skb_queue_walk(q, skb) {
+ entry = (struct skb_data *) skb->cb;
+ if (entry->state != unlink_start)
+ goto found;
+ }
+ break;
+found:
+ entry->state = unlink_start;
urb = entry->urb;
+ /*
+ * Get reference count of the URB to avoid it to be
+ * freed during usb_unlink_urb, which may trigger
+ * use-after-free problem inside usb_unlink_urb since
+ * usb_unlink_urb is always racing with .complete
+ * handler(include defer_bh).
+ */
+ usb_get_urb(urb);
+ spin_unlock_irqrestore(&q->lock, flags);
// during some PM-driven resume scenarios,
// these (async) unlinks complete immediately
retval = usb_unlink_urb (urb);
netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
else
count++;
+ usb_put_urb(urb);
+ spin_lock_irqsave(&q->lock, flags);
}
spin_unlock_irqrestore (&q->lock, flags);
return count;
struct driver_info *info = dev->driver_info;
int retval;
+ clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
netif_info(dev, ifdown, dev->net,
}
}
+ set_bit(EVENT_DEV_OPEN, &dev->flags);
netif_start_queue (net);
netif_info(dev, ifup, dev->net,
"open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
}
usb_autopm_put_interface_async(dev->intf);
- urb->dev = NULL;
- entry->state = tx_done;
- defer_bh(dev, skb, &dev->txq);
+ (void) defer_bh(dev, skb, &dev->txq, tx_done);
}
/*-------------------------------------------------------------------------*/
unsigned long flags;
int retval;
+ if (skb)
+ skb_tx_timestamp(skb);
+
// some devices want funky USB-level framing, for
// win32 driver (usually) and/or hardware quirks
if (info->tx_fixup) {
entry = (struct skb_data *) skb->cb;
entry->urb = urb;
entry->dev = dev;
- entry->state = tx_start;
entry->length = length;
usb_fill_bulk_urb (urb, dev->udev, dev->out,
break;
case 0:
net->trans_start = jiffies;
- __skb_queue_tail (&dev->txq, skb);
+ __usbnet_queue_skb(&dev->txq, skb, tx_start);
if (dev->txq.qlen >= TX_QLEN (dev))
netif_stop_queue (net);
}
if (dev->driver_info->unbind)
dev->driver_info->unbind (dev, intf);
+ usb_kill_urb(dev->interrupt);
+ usb_free_urb(dev->interrupt);
+
free_netdev(net);
usb_put_dev (xdev);
}
// set up our own records
net = alloc_etherdev(sizeof(*dev));
- if (!net) {
- dbg ("can't kmalloc dev");
+ if (!net)
goto out;
- }
/* netdev_printk() needs this so do it as early as possible */
SET_NETDEV_DEV(net, &udev->dev);
// else "eth%d" when there's reasonable doubt. userspace
// can rename the link if it knows better.
if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
- (net->dev_addr [0] & 0x02) == 0)
+ ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
+ (net->dev_addr [0] & 0x02) == 0))
strcpy (net->name, "eth%d");
/* WLAN devices should always be named "wlan%d" */
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
status = register_netdev (net);
if (status)
- goto out3;
+ goto out4;
netif_info(dev, probe, dev->net,
"register '%s' at usb-%s-%s, %s, %pM\n",
udev->dev.driver->name,
return 0;
+out4:
+ usb_free_urb(dev->interrupt);
out3:
if (info->unbind)
info->unbind (dev, udev);
if (!dev->suspend_count++) {
spin_lock_irq(&dev->txq.lock);
/* don't autosuspend while transmitting */
- if (dev->txq.qlen && (message.event & PM_EVENT_AUTO)) {
+ if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
spin_unlock_irq(&dev->txq.lock);
return -EBUSY;
} else {
int retval;
if (!--dev->suspend_count) {
+ /* resume interrupt URBs */
+ if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
+ usb_submit_urb(dev->interrupt, GFP_NOIO);
+
spin_lock_irq(&dev->txq.lock);
while ((res = usb_get_from_anchor(&dev->deferred))) {
smp_mb();
clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
spin_unlock_irq(&dev->txq.lock);
- if (!(dev->txq.qlen >= TX_QLEN(dev)))
- netif_start_queue(dev->net);
- tasklet_schedule (&dev->bh);
+
+ if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+ if (!(dev->txq.qlen >= TX_QLEN(dev)))
+ netif_tx_wake_all_queues(dev->net);
+ tasklet_schedule (&dev->bh);
+ }
}
return 0;
}
static int __init usbnet_init(void)
{
- /* compiler should optimize this out */
- BUILD_BUG_ON (sizeof (((struct sk_buff *)0)->cb)
- < sizeof (struct skb_data));
+ /* Compiler should optimize this out. */
+ BUILD_BUG_ON(
+ FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
random_ether_addr(node_id);
return 0;