2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the Interfaces handler.
8 * Version: @(#)dev.h 1.0.10 08/12/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
23 * Moved to /usr/include/linux for NET3
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
29 #include <linux/if_ether.h>
30 #include <linux/if_packet.h>
33 #include <linux/timer.h>
34 #include <linux/delay.h>
36 #include <asm/atomic.h>
37 #include <asm/cache.h>
38 #include <asm/byteorder.h>
40 #include <linux/device.h>
41 #include <linux/percpu.h>
42 #include <linux/rculist.h>
43 #include <linux/dmaengine.h>
44 #include <linux/workqueue.h>
45 #include <trace/net.h>
47 #include <linux/ethtool.h>
48 #include <net/net_namespace.h>
51 #include <net/dcbnl.h>
58 /* source back-compat hooks */
59 #define SET_ETHTOOL_OPS(netdev,ops) \
60 ( (netdev)->ethtool_ops = (ops) )
62 #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
63 functions are available. */
64 #define HAVE_FREE_NETDEV /* free_netdev() */
65 #define HAVE_NETDEV_PRIV /* netdev_priv() */
67 /* Backlog congestion levels */
68 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
69 #define NET_RX_DROP 1 /* packet dropped */
72 * Transmit return codes: transmit return codes originate from three different
75 * - qdisc return codes
76 * - driver transmit return codes
79 * Drivers are allowed to return any one of those in their hard_start_xmit()
80 * function. Real network devices commonly used with qdiscs should only return
81 * the driver transmit return codes though - when qdiscs are used, the actual
82 * transmission happens asynchronously, so the value is not propagated to
83 * higher layers. Virtual network devices transmit synchronously, in this case
84 * the driver transmit return codes are consumed by dev_queue_xmit(), all
85 * others are propagated to higher layers.
88 /* qdisc ->enqueue() return codes. */
89 #define NET_XMIT_SUCCESS 0x00
90 #define NET_XMIT_DROP 0x01 /* skb dropped */
91 #define NET_XMIT_CN 0x02 /* congestion notification */
92 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
93 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
95 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
96 * indicates that the device will soon be dropping packets, or already drops
97 * some packets of the same priority; prompting us to send less aggressively. */
98 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
99 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
101 /* Driver transmit return codes */
102 #define NETDEV_TX_MASK 0xf0
105 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
106 NETDEV_TX_OK = 0x00, /* driver took care of packet */
107 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
108 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
110 typedef enum netdev_tx netdev_tx_t;
113 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
114 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
116 static inline bool dev_xmit_complete(int rc)
119 * Positive cases with an skb consumed by a driver:
120 * - successful transmission (rc == NETDEV_TX_OK)
121 * - error while transmitting (rc < 0)
122 * - error while queueing to a different device (rc & NET_XMIT_MASK)
124 if (likely(rc < NET_XMIT_MASK))
132 #define MAX_ADDR_LEN 32 /* Largest hardware address length */
136 * Compute the worst case header length according to the protocols
140 #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
141 # if defined(CONFIG_MAC80211_MESH)
142 # define LL_MAX_HEADER 128
144 # define LL_MAX_HEADER 96
146 #elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
147 # define LL_MAX_HEADER 48
149 # define LL_MAX_HEADER 32
152 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
153 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
154 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
155 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
156 #define MAX_HEADER LL_MAX_HEADER
158 #define MAX_HEADER (LL_MAX_HEADER + 48)
161 #endif /* __KERNEL__ */
164 * Network device statistics. Akin to the 2.0 ether stats but
165 * with byte counters.
168 struct net_device_stats {
169 unsigned long rx_packets; /* total packets received */
170 unsigned long tx_packets; /* total packets transmitted */
171 unsigned long rx_bytes; /* total bytes received */
172 unsigned long tx_bytes; /* total bytes transmitted */
173 unsigned long rx_errors; /* bad packets received */
174 unsigned long tx_errors; /* packet transmit problems */
175 unsigned long rx_dropped; /* no space in linux buffers */
176 unsigned long tx_dropped; /* no space available in linux */
177 unsigned long multicast; /* multicast packets received */
178 unsigned long collisions;
180 /* detailed rx_errors: */
181 unsigned long rx_length_errors;
182 unsigned long rx_over_errors; /* receiver ring buff overflow */
183 unsigned long rx_crc_errors; /* recved pkt with crc error */
184 unsigned long rx_frame_errors; /* recv'd frame alignment error */
185 unsigned long rx_fifo_errors; /* recv'r fifo overrun */
186 unsigned long rx_missed_errors; /* receiver missed packet */
188 /* detailed tx_errors */
189 unsigned long tx_aborted_errors;
190 unsigned long tx_carrier_errors;
191 unsigned long tx_fifo_errors;
192 unsigned long tx_heartbeat_errors;
193 unsigned long tx_window_errors;
196 unsigned long rx_compressed;
197 unsigned long tx_compressed;
201 /* Media selection options. */
214 #include <linux/cache.h>
215 #include <linux/skbuff.h>
221 struct netif_rx_stats {
224 unsigned time_squeeze;
225 unsigned cpu_collision;
228 DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
230 struct dev_addr_list {
231 struct dev_addr_list *next;
232 u8 da_addr[MAX_ADDR_LEN];
240 * We tag multicasts with these structures.
243 #define dev_mc_list dev_addr_list
244 #define dmi_addr da_addr
245 #define dmi_addrlen da_addrlen
246 #define dmi_users da_users
247 #define dmi_gusers da_gusers
249 struct netdev_hw_addr {
250 struct list_head list;
251 unsigned char addr[MAX_ADDR_LEN];
253 #define NETDEV_HW_ADDR_T_LAN 1
254 #define NETDEV_HW_ADDR_T_SAN 2
255 #define NETDEV_HW_ADDR_T_SLAVE 3
256 #define NETDEV_HW_ADDR_T_UNICAST 4
259 struct rcu_head rcu_head;
262 struct netdev_hw_addr_list {
263 struct list_head list;
268 struct hh_cache *hh_next; /* Next entry */
269 atomic_t hh_refcnt; /* number of users */
271 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
273 * They are mostly read, but hh_refcnt may be changed quite frequently,
274 * incurring cache line ping pongs.
276 __be16 hh_type ____cacheline_aligned_in_smp;
277 /* protocol identifier, f.e ETH_P_IP
278 * NOTE: For VLANs, this will be the
279 * encapuslated type. --BLG
281 u16 hh_len; /* length of header */
282 int (*hh_output)(struct sk_buff *skb);
285 /* cached hardware header; allow for machine alignment needs. */
286 #define HH_DATA_MOD 16
287 #define HH_DATA_OFF(__len) \
288 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
289 #define HH_DATA_ALIGN(__len) \
290 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
291 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
294 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
296 * dev->hard_header_len ? (dev->hard_header_len +
297 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
299 * We could use other alignment values, but we must maintain the
300 * relationship HH alignment <= LL alignment.
302 * LL_ALLOCATED_SPACE also takes into account the tailroom the device
305 #define LL_RESERVED_SPACE(dev) \
306 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
307 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
308 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
309 #define LL_ALLOCATED_SPACE(dev) \
310 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
313 int (*create) (struct sk_buff *skb, struct net_device *dev,
314 unsigned short type, const void *daddr,
315 const void *saddr, unsigned len);
316 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
317 int (*rebuild)(struct sk_buff *skb);
318 #define HAVE_HEADER_CACHE
319 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
320 void (*cache_update)(struct hh_cache *hh,
321 const struct net_device *dev,
322 const unsigned char *haddr);
325 /* These flag bits are private to the generic network queueing
326 * layer, they may not be explicitly referenced by any other
330 enum netdev_state_t {
332 __LINK_STATE_PRESENT,
333 __LINK_STATE_NOCARRIER,
334 __LINK_STATE_LINKWATCH_PENDING,
335 __LINK_STATE_DORMANT,
340 * This structure holds at boot time configured netdevice settings. They
341 * are then used in the device probing.
343 struct netdev_boot_setup {
347 #define NETDEV_BOOT_SETUP_MAX 8
349 extern int __init netdev_boot_setup(char *str);
352 * Structure for NAPI scheduling similar to tasklet but with weighting
355 /* The poll_list must only be managed by the entity which
356 * changes the state of the NAPI_STATE_SCHED bit. This means
357 * whoever atomically sets that bit can add this napi_struct
358 * to the per-cpu poll_list, and whoever clears that bit
359 * can remove from the list right before clearing the bit.
361 struct list_head poll_list;
365 int (*poll)(struct napi_struct *, int);
366 #ifdef CONFIG_NETPOLL
367 spinlock_t poll_lock;
371 unsigned int gro_count;
373 struct net_device *dev;
374 struct list_head dev_list;
375 struct sk_buff *gro_list;
380 NAPI_STATE_SCHED, /* Poll is scheduled */
381 NAPI_STATE_DISABLE, /* Disable pending */
382 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
392 typedef enum gro_result gro_result_t;
394 extern void __napi_schedule(struct napi_struct *n);
396 static inline int napi_disable_pending(struct napi_struct *n)
398 return test_bit(NAPI_STATE_DISABLE, &n->state);
402 * napi_schedule_prep - check if napi can be scheduled
405 * Test if NAPI routine is already running, and if not mark
406 * it as running. This is used as a condition variable
407 * insure only one NAPI poll instance runs. We also make
408 * sure there is no pending NAPI disable.
410 static inline int napi_schedule_prep(struct napi_struct *n)
412 return !napi_disable_pending(n) &&
413 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
417 * napi_schedule - schedule NAPI poll
420 * Schedule NAPI poll routine to be called if it is not already
423 static inline void napi_schedule(struct napi_struct *n)
425 if (napi_schedule_prep(n))
429 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
430 static inline int napi_reschedule(struct napi_struct *napi)
432 if (napi_schedule_prep(napi)) {
433 __napi_schedule(napi);
440 * napi_complete - NAPI processing complete
443 * Mark NAPI processing as complete.
445 extern void __napi_complete(struct napi_struct *n);
446 extern void napi_complete(struct napi_struct *n);
449 * napi_disable - prevent NAPI from scheduling
452 * Stop NAPI from being scheduled on this context.
453 * Waits till any outstanding processing completes.
455 static inline void napi_disable(struct napi_struct *n)
457 set_bit(NAPI_STATE_DISABLE, &n->state);
458 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
460 clear_bit(NAPI_STATE_DISABLE, &n->state);
464 * napi_enable - enable NAPI scheduling
467 * Resume NAPI from being scheduled on this context.
468 * Must be paired with napi_disable.
470 static inline void napi_enable(struct napi_struct *n)
472 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
473 smp_mb__before_clear_bit();
474 clear_bit(NAPI_STATE_SCHED, &n->state);
479 * napi_synchronize - wait until NAPI is not running
482 * Wait until NAPI is done being scheduled on this context.
483 * Waits till any outstanding processing completes but
484 * does not disable future activations.
486 static inline void napi_synchronize(const struct napi_struct *n)
488 while (test_bit(NAPI_STATE_SCHED, &n->state))
492 # define napi_synchronize(n) barrier()
495 enum netdev_queue_state_t {
497 __QUEUE_STATE_FROZEN,
500 struct netdev_queue {
504 struct net_device *dev;
507 struct Qdisc *qdisc_sleeping;
511 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
514 * please use this field instead of dev->trans_start
516 unsigned long trans_start;
517 unsigned long tx_bytes;
518 unsigned long tx_packets;
519 unsigned long tx_dropped;
520 } ____cacheline_aligned_in_smp;
524 * This structure defines the management hooks for network devices.
525 * The following hooks can be defined; unless noted otherwise, they are
526 * optional and can be filled with a null pointer.
528 * int (*ndo_init)(struct net_device *dev);
529 * This function is called once when network device is registered.
530 * The network device can use this to any late stage initializaton
531 * or semantic validattion. It can fail with an error code which will
532 * be propogated back to register_netdev
534 * void (*ndo_uninit)(struct net_device *dev);
535 * This function is called when device is unregistered or when registration
536 * fails. It is not called if init fails.
538 * int (*ndo_open)(struct net_device *dev);
539 * This function is called when network device transistions to the up
542 * int (*ndo_stop)(struct net_device *dev);
543 * This function is called when network device transistions to the down
546 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
547 * struct net_device *dev);
548 * Called when a packet needs to be transmitted.
549 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
550 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
551 * Required can not be NULL.
553 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
554 * Called to decide which queue to when device supports multiple
557 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
558 * This function is called to allow device receiver to make
559 * changes to configuration when multicast or promiscious is enabled.
561 * void (*ndo_set_rx_mode)(struct net_device *dev);
562 * This function is called device changes address list filtering.
564 * void (*ndo_set_multicast_list)(struct net_device *dev);
565 * This function is called when the multicast address list changes.
567 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
568 * This function is called when the Media Access Control address
569 * needs to be changed. If this interface is not defined, the
570 * mac address can not be changed.
572 * int (*ndo_validate_addr)(struct net_device *dev);
573 * Test if Media Access Control address is valid for the device.
575 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
576 * Called when a user request an ioctl which can't be handled by
577 * the generic interface code. If not defined ioctl's return
578 * not supported error code.
580 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
581 * Used to set network devices bus interface parameters. This interface
582 * is retained for legacy reason, new devices should use the bus
583 * interface (PCI) for low level management.
585 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
586 * Called when a user wants to change the Maximum Transfer Unit
587 * of a device. If not defined, any request to change MTU will
588 * will return an error.
590 * void (*ndo_tx_timeout)(struct net_device *dev);
591 * Callback uses when the transmitter has not made any progress
592 * for dev->watchdog ticks.
594 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
595 * Called when a user wants to get the network device usage
596 * statistics. If not defined, the counters in dev->stats will
599 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
600 * If device support VLAN receive accleration
601 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
602 * when vlan groups for the device changes. Note: grp is NULL
603 * if no vlan's groups are being used.
605 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
606 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
607 * this function is called when a VLAN id is registered.
609 * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
610 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
611 * this function is called when a VLAN id is unregistered.
613 * void (*ndo_poll_controller)(struct net_device *dev);
615 #define HAVE_NET_DEVICE_OPS
616 struct net_device_ops {
617 int (*ndo_init)(struct net_device *dev);
618 void (*ndo_uninit)(struct net_device *dev);
619 int (*ndo_open)(struct net_device *dev);
620 int (*ndo_stop)(struct net_device *dev);
621 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
622 struct net_device *dev);
623 u16 (*ndo_select_queue)(struct net_device *dev,
624 struct sk_buff *skb);
625 #define HAVE_CHANGE_RX_FLAGS
626 void (*ndo_change_rx_flags)(struct net_device *dev,
628 #define HAVE_SET_RX_MODE
629 void (*ndo_set_rx_mode)(struct net_device *dev);
630 #define HAVE_MULTICAST
631 void (*ndo_set_multicast_list)(struct net_device *dev);
632 #define HAVE_SET_MAC_ADDR
633 int (*ndo_set_mac_address)(struct net_device *dev,
635 #define HAVE_VALIDATE_ADDR
636 int (*ndo_validate_addr)(struct net_device *dev);
637 #define HAVE_PRIVATE_IOCTL
638 int (*ndo_do_ioctl)(struct net_device *dev,
639 struct ifreq *ifr, int cmd);
640 #define HAVE_SET_CONFIG
641 int (*ndo_set_config)(struct net_device *dev,
643 #define HAVE_CHANGE_MTU
644 int (*ndo_change_mtu)(struct net_device *dev,
646 int (*ndo_neigh_setup)(struct net_device *dev,
647 struct neigh_parms *);
648 #define HAVE_TX_TIMEOUT
649 void (*ndo_tx_timeout) (struct net_device *dev);
651 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
653 void (*ndo_vlan_rx_register)(struct net_device *dev,
654 struct vlan_group *grp);
655 void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
657 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
659 #ifdef CONFIG_NET_POLL_CONTROLLER
660 #define HAVE_NETDEV_POLL
661 void (*ndo_poll_controller)(struct net_device *dev);
663 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
664 int (*ndo_fcoe_enable)(struct net_device *dev);
665 int (*ndo_fcoe_disable)(struct net_device *dev);
666 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
668 struct scatterlist *sgl,
670 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
672 #define NETDEV_FCOE_WWNN 0
673 #define NETDEV_FCOE_WWPN 1
674 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
680 * The DEVICE structure.
681 * Actually, this whole structure is a big mistake. It mixes I/O
682 * data with strictly "high-level" data, and it has to know about
683 * almost every data structure used in the INET module.
685 * FIXME: cleanup struct net_device such that network protocol info
692 * This is the first field of the "visible" part of this structure
693 * (i.e. as seen by users in the "Space.c" file). It is the name
697 /* device name hash chain */
698 struct hlist_node name_hlist;
703 * I/O specific fields
704 * FIXME: Merge these and struct ifmap into one
706 unsigned long mem_end; /* shared mem end */
707 unsigned long mem_start; /* shared mem start */
708 unsigned long base_addr; /* device I/O address */
709 unsigned int irq; /* device IRQ number */
712 * Some hardware also needs these fields, but they are not
713 * part of the usual set specified in Space.c.
716 unsigned char if_port; /* Selectable AUI, TP,..*/
717 unsigned char dma; /* DMA channel */
721 struct list_head dev_list;
722 struct list_head napi_list;
723 struct list_head unreg_list;
725 /* Net device features */
726 unsigned long features;
727 #define NETIF_F_SG 1 /* Scatter/gather IO. */
728 #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
729 #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
730 #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
731 #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
732 #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
733 #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
734 #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
735 #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
736 #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
737 #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
738 #define NETIF_F_GSO 2048 /* Enable software GSO. */
739 #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
740 /* do not use LLTX in new drivers */
741 #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
742 #define NETIF_F_GRO 16384 /* Generic receive offload */
743 #define NETIF_F_LRO 32768 /* large receive offload */
745 /* the GSO_MASK reserves bits 16 through 23 */
746 #define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
747 #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
748 #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
750 /* Segmentation offload features */
751 #define NETIF_F_GSO_SHIFT 16
752 #define NETIF_F_GSO_MASK 0x00ff0000
753 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
754 #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
755 #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
756 #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
757 #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
758 #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
760 /* List of features with software fallbacks. */
761 #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
764 #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
765 #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
766 #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
767 #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
770 * If one device supports one of these features, then enable them
771 * for all in netdev_increment_features.
773 #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
774 NETIF_F_SG | NETIF_F_HIGHDMA | \
777 /* Interface index. Unique device identifier */
781 struct net_device_stats stats;
783 #ifdef CONFIG_WIRELESS_EXT
784 /* List of functions to handle Wireless Extensions (instead of ioctl).
785 * See <net/iw_handler.h> for details. Jean II */
786 const struct iw_handler_def * wireless_handlers;
787 /* Instance data managed by the core of Wireless Extensions. */
788 struct iw_public_data * wireless_data;
790 /* Management operations */
791 const struct net_device_ops *netdev_ops;
792 const struct ethtool_ops *ethtool_ops;
794 /* Hardware header description */
795 const struct header_ops *header_ops;
797 unsigned int flags; /* interface flags (a la BSD) */
798 unsigned short gflags;
799 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
800 unsigned short padded; /* How much padding added by alloc_netdev() */
802 unsigned char operstate; /* RFC2863 operstate */
803 unsigned char link_mode; /* mapping policy to operstate */
805 unsigned mtu; /* interface MTU value */
806 unsigned short type; /* interface hardware type */
807 unsigned short hard_header_len; /* hardware hdr length */
809 /* extra head- and tailroom the hardware may need, but not in all cases
810 * can this be guaranteed, especially tailroom. Some cases also use
811 * LL_MAX_HEADER instead to allocate the skb.
813 unsigned short needed_headroom;
814 unsigned short needed_tailroom;
816 struct net_device *master; /* Pointer to master device of a group,
817 * which this device is member of.
820 /* Interface address info. */
821 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
822 unsigned char addr_len; /* hardware address length */
823 unsigned short dev_id; /* for shared network cards */
825 struct netdev_hw_addr_list uc; /* Secondary unicast
828 spinlock_t addr_list_lock;
829 struct dev_addr_list *mc_list; /* Multicast mac addresses */
830 int mc_count; /* Number of installed mcasts */
831 unsigned int promiscuity;
832 unsigned int allmulti;
835 /* Protocol specific pointers */
837 #ifdef CONFIG_NET_DSA
838 void *dsa_ptr; /* dsa specific data */
840 void *atalk_ptr; /* AppleTalk link */
841 void *ip_ptr; /* IPv4 specific data */
842 void *dn_ptr; /* DECnet specific data */
843 void *ip6_ptr; /* IPv6 specific data */
844 void *ec_ptr; /* Econet specific data */
845 void *ax25_ptr; /* AX.25 specific data */
846 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
847 assign before registering */
850 * Cache line mostly used on receive path (including eth_type_trans())
852 unsigned long last_rx; /* Time of last Rx */
853 /* Interface address info used in eth_type_trans() */
854 unsigned char *dev_addr; /* hw address, (before bcast
855 because most packets are
858 struct netdev_hw_addr_list dev_addrs; /* list of device
861 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
863 struct netdev_queue rx_queue;
865 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
867 /* Number of TX queues allocated at alloc_netdev_mq() time */
868 unsigned int num_tx_queues;
870 /* Number of TX queues currently active in device */
871 unsigned int real_num_tx_queues;
873 /* root qdisc from userspace point of view */
876 unsigned long tx_queue_len; /* Max frames per queue allowed */
877 spinlock_t tx_global_lock;
879 * One part is mostly used on xmit path (device)
881 /* These may be needed for future network-power-down code. */
884 * trans_start here is expensive for high speed devices on SMP,
885 * please use netdev_queue->trans_start instead.
887 unsigned long trans_start; /* Time (in jiffies) of last Tx */
889 int watchdog_timeo; /* used by dev_watchdog() */
890 struct timer_list watchdog_timer;
892 /* Number of references to this device */
893 atomic_t refcnt ____cacheline_aligned_in_smp;
895 /* delayed register/unregister */
896 struct list_head todo_list;
897 /* device index hash chain */
898 struct hlist_node index_hlist;
900 struct list_head link_watch_list;
902 /* register/unregister state machine */
903 enum { NETREG_UNINITIALIZED=0,
904 NETREG_REGISTERED, /* completed register_netdevice */
905 NETREG_UNREGISTERING, /* called unregister_netdevice */
906 NETREG_UNREGISTERED, /* completed unregister todo */
907 NETREG_RELEASED, /* called free_netdev */
908 NETREG_DUMMY, /* dummy device for NAPI poll */
911 /* Called from unregister, can be used to call free_netdev */
912 void (*destructor)(struct net_device *dev);
914 #ifdef CONFIG_NETPOLL
915 struct netpoll_info *npinfo;
919 /* Network namespace this network device is inside */
923 /* mid-layer private */
927 struct net_bridge_port *br_port;
929 struct macvlan_port *macvlan_port;
931 struct garp_port *garp_port;
933 /* class/net/name entry */
935 /* space for optional device, statistics, and wireless sysfs groups */
936 const struct attribute_group *sysfs_groups[4];
938 /* rtnetlink link ops */
939 const struct rtnl_link_ops *rtnl_link_ops;
941 /* VLAN feature mask */
942 unsigned long vlan_features;
944 /* for setting kernel sock attribute on TCP connection setup */
945 #define GSO_MAX_SIZE 65536
946 unsigned int gso_max_size;
949 /* Data Center Bridging netlink ops */
950 const struct dcbnl_rtnl_ops *dcbnl_ops;
953 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
954 /* max exchange id for FCoE LRO by ddp */
955 unsigned int fcoe_ddp_xid;
958 #define to_net_dev(d) container_of(d, struct net_device, dev)
960 #define NETDEV_ALIGN 32
963 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
966 return &dev->_tx[index];
969 static inline void netdev_for_each_tx_queue(struct net_device *dev,
970 void (*f)(struct net_device *,
971 struct netdev_queue *,
977 for (i = 0; i < dev->num_tx_queues; i++)
978 f(dev, &dev->_tx[i], arg);
982 * Net namespace inlines
985 struct net *dev_net(const struct net_device *dev)
995 void dev_net_set(struct net_device *dev, struct net *net)
998 release_net(dev->nd_net);
999 dev->nd_net = hold_net(net);
1003 static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1005 #ifdef CONFIG_NET_DSA_TAG_DSA
1006 if (dev->dsa_ptr != NULL)
1007 return dsa_uses_dsa_tags(dev->dsa_ptr);
1013 static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1015 #ifdef CONFIG_NET_DSA_TAG_TRAILER
1016 if (dev->dsa_ptr != NULL)
1017 return dsa_uses_trailer_tags(dev->dsa_ptr);
1024 * netdev_priv - access network device private data
1025 * @dev: network device
1027 * Get network device private data
1029 static inline void *netdev_priv(const struct net_device *dev)
1031 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1034 /* Set the sysfs physical device reference for the network logical device
1035 * if set prior to registration will cause a symlink during initialization.
1037 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1039 /* Set the sysfs device type for the network logical device to allow
1040 * fin grained indentification of different network device types. For
1041 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1043 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1046 * netif_napi_add - initialize a napi context
1047 * @dev: network device
1048 * @napi: napi context
1049 * @poll: polling function
1050 * @weight: default weight
1052 * netif_napi_add() must be used to initialize a napi context prior to calling
1053 * *any* of the other napi related functions.
1055 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1056 int (*poll)(struct napi_struct *, int), int weight);
1059 * netif_napi_del - remove a napi context
1060 * @napi: napi context
1062 * netif_napi_del() removes a napi context from the network device napi list
1064 void netif_napi_del(struct napi_struct *napi);
1066 struct napi_gro_cb {
1067 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1070 /* Length of frag0. */
1071 unsigned int frag0_len;
1073 /* This indicates where we are processing relative to skb->data. */
1076 /* This is non-zero if the packet may be of the same flow. */
1079 /* This is non-zero if the packet cannot be merged with the new skb. */
1082 /* Number of segments aggregated. */
1089 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1091 struct packet_type {
1092 __be16 type; /* This is really htons(ether_type). */
1093 struct net_device *dev; /* NULL is wildcarded here */
1094 int (*func) (struct sk_buff *,
1095 struct net_device *,
1096 struct packet_type *,
1097 struct net_device *);
1098 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1100 int (*gso_send_check)(struct sk_buff *skb);
1101 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1102 struct sk_buff *skb);
1103 int (*gro_complete)(struct sk_buff *skb);
1104 void *af_packet_priv;
1105 struct list_head list;
1108 #include <linux/interrupt.h>
1109 #include <linux/notifier.h>
1111 extern rwlock_t dev_base_lock; /* Device list lock */
1114 #define for_each_netdev(net, d) \
1115 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1116 #define for_each_netdev_reverse(net, d) \
1117 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1118 #define for_each_netdev_rcu(net, d) \
1119 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1120 #define for_each_netdev_safe(net, d, n) \
1121 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1122 #define for_each_netdev_continue(net, d) \
1123 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1124 #define for_each_netdev_continue_rcu(net, d) \
1125 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1126 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1128 static inline struct net_device *next_net_device(struct net_device *dev)
1130 struct list_head *lh;
1134 lh = dev->dev_list.next;
1135 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1138 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1140 struct list_head *lh;
1144 lh = rcu_dereference(dev->dev_list.next);
1145 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1148 static inline struct net_device *first_net_device(struct net *net)
1150 return list_empty(&net->dev_base_head) ? NULL :
1151 net_device_entry(net->dev_base_head.next);
1154 extern int netdev_boot_setup_check(struct net_device *dev);
1155 extern unsigned long netdev_boot_base(const char *prefix, int unit);
1156 extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
1157 extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1158 extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1159 extern void dev_add_pack(struct packet_type *pt);
1160 extern void dev_remove_pack(struct packet_type *pt);
1161 extern void __dev_remove_pack(struct packet_type *pt);
1163 extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
1164 unsigned short mask);
1165 extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1166 extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1167 extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1168 extern int dev_alloc_name(struct net_device *dev, const char *name);
1169 extern int dev_open(struct net_device *dev);
1170 extern int dev_close(struct net_device *dev);
1171 extern void dev_disable_lro(struct net_device *dev);
1172 extern int dev_queue_xmit(struct sk_buff *skb);
1173 extern int register_netdevice(struct net_device *dev);
1174 extern void unregister_netdevice_queue(struct net_device *dev,
1175 struct list_head *head);
1176 extern void unregister_netdevice_many(struct list_head *head);
1177 static inline void unregister_netdevice(struct net_device *dev)
1179 unregister_netdevice_queue(dev, NULL);
1182 extern void free_netdev(struct net_device *dev);
1183 extern void synchronize_net(void);
1184 extern int register_netdevice_notifier(struct notifier_block *nb);
1185 extern int unregister_netdevice_notifier(struct notifier_block *nb);
1186 extern int init_dummy_netdev(struct net_device *dev);
1187 extern void netdev_resync_ops(struct net_device *dev);
1189 extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1190 extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1191 extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1192 extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1193 extern int dev_restart(struct net_device *dev);
1194 #ifdef CONFIG_NETPOLL_TRAP
1195 extern int netpoll_trap(void);
1197 extern int skb_gro_receive(struct sk_buff **head,
1198 struct sk_buff *skb);
1199 extern void skb_gro_reset_offset(struct sk_buff *skb);
1201 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1203 return NAPI_GRO_CB(skb)->data_offset;
1206 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1208 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1211 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1213 NAPI_GRO_CB(skb)->data_offset += len;
1216 static inline void *skb_gro_header_fast(struct sk_buff *skb,
1217 unsigned int offset)
1219 return NAPI_GRO_CB(skb)->frag0 + offset;
1222 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1224 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1227 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1228 unsigned int offset)
1230 NAPI_GRO_CB(skb)->frag0 = NULL;
1231 NAPI_GRO_CB(skb)->frag0_len = 0;
1232 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
1235 static inline void *skb_gro_mac_header(struct sk_buff *skb)
1237 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1240 static inline void *skb_gro_network_header(struct sk_buff *skb)
1242 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1243 skb_network_offset(skb);
1246 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1247 unsigned short type,
1248 const void *daddr, const void *saddr,
1251 if (!dev->header_ops || !dev->header_ops->create)
1254 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1257 static inline int dev_parse_header(const struct sk_buff *skb,
1258 unsigned char *haddr)
1260 const struct net_device *dev = skb->dev;
1262 if (!dev->header_ops || !dev->header_ops->parse)
1264 return dev->header_ops->parse(skb, haddr);
1267 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1268 extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1269 static inline int unregister_gifconf(unsigned int family)
1271 return register_gifconf(family, NULL);
1275 * Incoming packets are placed on per-cpu queues so that
1276 * no locking is needed.
1278 struct softnet_data {
1279 struct Qdisc *output_queue;
1280 struct sk_buff_head input_pkt_queue;
1281 struct list_head poll_list;
1282 struct sk_buff *completion_queue;
1284 struct napi_struct backlog;
1287 DECLARE_PER_CPU(struct softnet_data,softnet_data);
1289 #define HAVE_NETIF_QUEUE
1291 extern void __netif_schedule(struct Qdisc *q);
1293 static inline void netif_schedule_queue(struct netdev_queue *txq)
1295 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1296 __netif_schedule(txq->qdisc);
1299 static inline void netif_tx_schedule_all(struct net_device *dev)
1303 for (i = 0; i < dev->num_tx_queues; i++)
1304 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1307 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1309 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1313 * netif_start_queue - allow transmit
1314 * @dev: network device
1316 * Allow upper layers to call the device hard_start_xmit routine.
1318 static inline void netif_start_queue(struct net_device *dev)
1320 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1323 static inline void netif_tx_start_all_queues(struct net_device *dev)
1327 for (i = 0; i < dev->num_tx_queues; i++) {
1328 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1329 netif_tx_start_queue(txq);
1333 static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1335 #ifdef CONFIG_NETPOLL_TRAP
1336 if (netpoll_trap()) {
1337 netif_tx_start_queue(dev_queue);
1341 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
1342 __netif_schedule(dev_queue->qdisc);
1346 * netif_wake_queue - restart transmit
1347 * @dev: network device
1349 * Allow upper layers to call the device hard_start_xmit routine.
1350 * Used for flow control when transmit resources are available.
1352 static inline void netif_wake_queue(struct net_device *dev)
1354 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1357 static inline void netif_tx_wake_all_queues(struct net_device *dev)
1361 for (i = 0; i < dev->num_tx_queues; i++) {
1362 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1363 netif_tx_wake_queue(txq);
1367 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1369 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1373 * netif_stop_queue - stop transmitted packets
1374 * @dev: network device
1376 * Stop upper layers calling the device hard_start_xmit routine.
1377 * Used for flow control when transmit resources are unavailable.
1379 static inline void netif_stop_queue(struct net_device *dev)
1381 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1384 static inline void netif_tx_stop_all_queues(struct net_device *dev)
1388 for (i = 0; i < dev->num_tx_queues; i++) {
1389 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1390 netif_tx_stop_queue(txq);
1394 static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1396 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1400 * netif_queue_stopped - test if transmit queue is flowblocked
1401 * @dev: network device
1403 * Test if transmit queue on device is currently unable to send.
1405 static inline int netif_queue_stopped(const struct net_device *dev)
1407 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1410 static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
1412 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
1416 * netif_running - test if up
1417 * @dev: network device
1419 * Test if the device has been brought up.
1421 static inline int netif_running(const struct net_device *dev)
1423 return test_bit(__LINK_STATE_START, &dev->state);
1427 * Routines to manage the subqueues on a device. We only need start
1428 * stop, and a check if it's stopped. All other device management is
1429 * done at the overall netdevice level.
1430 * Also test the device if we're multiqueue.
1434 * netif_start_subqueue - allow sending packets on subqueue
1435 * @dev: network device
1436 * @queue_index: sub queue index
1438 * Start individual transmit queue of a device with multiple transmit queues.
1440 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1442 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1444 netif_tx_start_queue(txq);
1448 * netif_stop_subqueue - stop sending packets on subqueue
1449 * @dev: network device
1450 * @queue_index: sub queue index
1452 * Stop individual transmit queue of a device with multiple transmit queues.
1454 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1456 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1457 #ifdef CONFIG_NETPOLL_TRAP
1461 netif_tx_stop_queue(txq);
1465 * netif_subqueue_stopped - test status of subqueue
1466 * @dev: network device
1467 * @queue_index: sub queue index
1469 * Check individual transmit queue of a device with multiple transmit queues.
1471 static inline int __netif_subqueue_stopped(const struct net_device *dev,
1474 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1476 return netif_tx_queue_stopped(txq);
1479 static inline int netif_subqueue_stopped(const struct net_device *dev,
1480 struct sk_buff *skb)
1482 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1486 * netif_wake_subqueue - allow sending packets on subqueue
1487 * @dev: network device
1488 * @queue_index: sub queue index
1490 * Resume individual transmit queue of a device with multiple transmit queues.
1492 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1494 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1495 #ifdef CONFIG_NETPOLL_TRAP
1499 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
1500 __netif_schedule(txq->qdisc);
1504 * netif_is_multiqueue - test if device has multiple transmit queues
1505 * @dev: network device
1507 * Check if device has multiple transmit queues
1509 static inline int netif_is_multiqueue(const struct net_device *dev)
1511 return (dev->num_tx_queues > 1);
1514 /* Use this variant when it is known for sure that it
1515 * is executing from hardware interrupt context or with hardware interrupts
1518 extern void dev_kfree_skb_irq(struct sk_buff *skb);
1520 /* Use this variant in places where it could be invoked
1521 * from either hardware interrupt or other context, with hardware interrupts
1522 * either disabled or enabled.
1524 extern void dev_kfree_skb_any(struct sk_buff *skb);
1526 #define HAVE_NETIF_RX 1
1527 extern int netif_rx(struct sk_buff *skb);
1528 extern int netif_rx_ni(struct sk_buff *skb);
1529 #define HAVE_NETIF_RECEIVE_SKB 1
1530 extern int netif_receive_skb(struct sk_buff *skb);
1531 extern void napi_gro_flush(struct napi_struct *napi);
1532 extern gro_result_t dev_gro_receive(struct napi_struct *napi,
1533 struct sk_buff *skb);
1534 extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
1535 extern gro_result_t napi_gro_receive(struct napi_struct *napi,
1536 struct sk_buff *skb);
1537 extern void napi_reuse_skb(struct napi_struct *napi,
1538 struct sk_buff *skb);
1539 extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
1540 extern gro_result_t napi_frags_finish(struct napi_struct *napi,
1541 struct sk_buff *skb,
1543 extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
1544 extern gro_result_t napi_gro_frags(struct napi_struct *napi);
1546 static inline void napi_free_frags(struct napi_struct *napi)
1548 kfree_skb(napi->skb);
1552 extern void netif_nit_deliver(struct sk_buff *skb);
1553 extern int dev_valid_name(const char *name);
1554 extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1555 extern int dev_ethtool(struct net *net, struct ifreq *);
1556 extern unsigned dev_get_flags(const struct net_device *);
1557 extern int dev_change_flags(struct net_device *, unsigned);
1558 extern int dev_change_name(struct net_device *, const char *);
1559 extern int dev_set_alias(struct net_device *, const char *, size_t);
1560 extern int dev_change_net_namespace(struct net_device *,
1561 struct net *, const char *);
1562 extern int dev_set_mtu(struct net_device *, int);
1563 extern int dev_set_mac_address(struct net_device *,
1565 extern int dev_hard_start_xmit(struct sk_buff *skb,
1566 struct net_device *dev,
1567 struct netdev_queue *txq);
1568 extern int dev_forward_skb(struct net_device *dev,
1569 struct sk_buff *skb);
1571 extern int netdev_budget;
1573 /* Called by rtnetlink.c:rtnl_unlock() */
1574 extern void netdev_run_todo(void);
1577 * dev_put - release reference to device
1578 * @dev: network device
1580 * Release reference to device to allow it to be freed.
1582 static inline void dev_put(struct net_device *dev)
1584 atomic_dec(&dev->refcnt);
1588 * dev_hold - get reference to device
1589 * @dev: network device
1591 * Hold reference to device to keep it from being freed.
1593 static inline void dev_hold(struct net_device *dev)
1595 atomic_inc(&dev->refcnt);
1598 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
1599 * and _off may be called from IRQ context, but it is caller
1600 * who is responsible for serialization of these calls.
1602 * The name carrier is inappropriate, these functions should really be
1603 * called netif_lowerlayer_*() because they represent the state of any
1604 * kind of lower layer not just hardware media.
1607 extern void linkwatch_fire_event(struct net_device *dev);
1608 extern void linkwatch_forget_dev(struct net_device *dev);
1611 * netif_carrier_ok - test if carrier present
1612 * @dev: network device
1614 * Check if carrier is present on device
1616 static inline int netif_carrier_ok(const struct net_device *dev)
1618 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1621 extern unsigned long dev_trans_start(struct net_device *dev);
1623 extern void __netdev_watchdog_up(struct net_device *dev);
1625 extern void netif_carrier_on(struct net_device *dev);
1627 extern void netif_carrier_off(struct net_device *dev);
1630 * netif_dormant_on - mark device as dormant.
1631 * @dev: network device
1633 * Mark device as dormant (as per RFC2863).
1635 * The dormant state indicates that the relevant interface is not
1636 * actually in a condition to pass packets (i.e., it is not 'up') but is
1637 * in a "pending" state, waiting for some external event. For "on-
1638 * demand" interfaces, this new state identifies the situation where the
1639 * interface is waiting for events to place it in the up state.
1642 static inline void netif_dormant_on(struct net_device *dev)
1644 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1645 linkwatch_fire_event(dev);
1649 * netif_dormant_off - set device as not dormant.
1650 * @dev: network device
1652 * Device is not in dormant state.
1654 static inline void netif_dormant_off(struct net_device *dev)
1656 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1657 linkwatch_fire_event(dev);
1661 * netif_dormant - test if carrier present
1662 * @dev: network device
1664 * Check if carrier is present on device
1666 static inline int netif_dormant(const struct net_device *dev)
1668 return test_bit(__LINK_STATE_DORMANT, &dev->state);
1673 * netif_oper_up - test if device is operational
1674 * @dev: network device
1676 * Check if carrier is operational
1678 static inline int netif_oper_up(const struct net_device *dev)
1680 return (dev->operstate == IF_OPER_UP ||
1681 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
1685 * netif_device_present - is device available or removed
1686 * @dev: network device
1688 * Check if device has not been removed from system.
1690 static inline int netif_device_present(struct net_device *dev)
1692 return test_bit(__LINK_STATE_PRESENT, &dev->state);
1695 extern void netif_device_detach(struct net_device *dev);
1697 extern void netif_device_attach(struct net_device *dev);
1700 * Network interface message level settings
1702 #define HAVE_NETIF_MSG 1
1705 NETIF_MSG_DRV = 0x0001,
1706 NETIF_MSG_PROBE = 0x0002,
1707 NETIF_MSG_LINK = 0x0004,
1708 NETIF_MSG_TIMER = 0x0008,
1709 NETIF_MSG_IFDOWN = 0x0010,
1710 NETIF_MSG_IFUP = 0x0020,
1711 NETIF_MSG_RX_ERR = 0x0040,
1712 NETIF_MSG_TX_ERR = 0x0080,
1713 NETIF_MSG_TX_QUEUED = 0x0100,
1714 NETIF_MSG_INTR = 0x0200,
1715 NETIF_MSG_TX_DONE = 0x0400,
1716 NETIF_MSG_RX_STATUS = 0x0800,
1717 NETIF_MSG_PKTDATA = 0x1000,
1718 NETIF_MSG_HW = 0x2000,
1719 NETIF_MSG_WOL = 0x4000,
1722 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1723 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1724 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1725 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1726 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1727 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1728 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1729 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1730 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1731 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1732 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1733 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1734 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1735 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1736 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1738 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1741 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1742 return default_msg_enable_bits;
1743 if (debug_value == 0) /* no output */
1745 /* set low N bits */
1746 return (1 << debug_value) - 1;
1749 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
1751 spin_lock(&txq->_xmit_lock);
1752 txq->xmit_lock_owner = cpu;
1755 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1757 spin_lock_bh(&txq->_xmit_lock);
1758 txq->xmit_lock_owner = smp_processor_id();
1761 static inline int __netif_tx_trylock(struct netdev_queue *txq)
1763 int ok = spin_trylock(&txq->_xmit_lock);
1765 txq->xmit_lock_owner = smp_processor_id();
1769 static inline void __netif_tx_unlock(struct netdev_queue *txq)
1771 txq->xmit_lock_owner = -1;
1772 spin_unlock(&txq->_xmit_lock);
1775 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1777 txq->xmit_lock_owner = -1;
1778 spin_unlock_bh(&txq->_xmit_lock);
1781 static inline void txq_trans_update(struct netdev_queue *txq)
1783 if (txq->xmit_lock_owner != -1)
1784 txq->trans_start = jiffies;
1788 * netif_tx_lock - grab network device transmit lock
1789 * @dev: network device
1791 * Get network device transmit lock
1793 static inline void netif_tx_lock(struct net_device *dev)
1798 spin_lock(&dev->tx_global_lock);
1799 cpu = smp_processor_id();
1800 for (i = 0; i < dev->num_tx_queues; i++) {
1801 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1803 /* We are the only thread of execution doing a
1804 * freeze, but we have to grab the _xmit_lock in
1805 * order to synchronize with threads which are in
1806 * the ->hard_start_xmit() handler and already
1807 * checked the frozen bit.
1809 __netif_tx_lock(txq, cpu);
1810 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
1811 __netif_tx_unlock(txq);
1815 static inline void netif_tx_lock_bh(struct net_device *dev)
1821 static inline void netif_tx_unlock(struct net_device *dev)
1825 for (i = 0; i < dev->num_tx_queues; i++) {
1826 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1828 /* No need to grab the _xmit_lock here. If the
1829 * queue is not stopped for another reason, we
1832 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
1833 netif_schedule_queue(txq);
1835 spin_unlock(&dev->tx_global_lock);
1838 static inline void netif_tx_unlock_bh(struct net_device *dev)
1840 netif_tx_unlock(dev);
1844 #define HARD_TX_LOCK(dev, txq, cpu) { \
1845 if ((dev->features & NETIF_F_LLTX) == 0) { \
1846 __netif_tx_lock(txq, cpu); \
1850 #define HARD_TX_UNLOCK(dev, txq) { \
1851 if ((dev->features & NETIF_F_LLTX) == 0) { \
1852 __netif_tx_unlock(txq); \
1856 static inline void netif_tx_disable(struct net_device *dev)
1862 cpu = smp_processor_id();
1863 for (i = 0; i < dev->num_tx_queues; i++) {
1864 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1866 __netif_tx_lock(txq, cpu);
1867 netif_tx_stop_queue(txq);
1868 __netif_tx_unlock(txq);
1873 static inline void netif_addr_lock(struct net_device *dev)
1875 spin_lock(&dev->addr_list_lock);
1878 static inline void netif_addr_lock_bh(struct net_device *dev)
1880 spin_lock_bh(&dev->addr_list_lock);
1883 static inline void netif_addr_unlock(struct net_device *dev)
1885 spin_unlock(&dev->addr_list_lock);
1888 static inline void netif_addr_unlock_bh(struct net_device *dev)
1890 spin_unlock_bh(&dev->addr_list_lock);
1894 * dev_addrs walker. Should be used only for read access. Call with
1895 * rcu_read_lock held.
1897 #define for_each_dev_addr(dev, ha) \
1898 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
1900 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
1902 extern void ether_setup(struct net_device *dev);
1904 /* Support for loadable net-drivers */
1905 extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1906 void (*setup)(struct net_device *),
1907 unsigned int queue_count);
1908 #define alloc_netdev(sizeof_priv, name, setup) \
1909 alloc_netdev_mq(sizeof_priv, name, setup, 1)
1910 extern int register_netdev(struct net_device *dev);
1911 extern void unregister_netdev(struct net_device *dev);
1913 /* Functions used for device addresses handling */
1914 extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
1915 unsigned char addr_type);
1916 extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
1917 unsigned char addr_type);
1918 extern int dev_addr_add_multiple(struct net_device *to_dev,
1919 struct net_device *from_dev,
1920 unsigned char addr_type);
1921 extern int dev_addr_del_multiple(struct net_device *to_dev,
1922 struct net_device *from_dev,
1923 unsigned char addr_type);
1925 /* Functions used for secondary unicast and multicast support */
1926 extern void dev_set_rx_mode(struct net_device *dev);
1927 extern void __dev_set_rx_mode(struct net_device *dev);
1928 extern int dev_unicast_delete(struct net_device *dev, void *addr);
1929 extern int dev_unicast_add(struct net_device *dev, void *addr);
1930 extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
1931 extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
1932 extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1933 extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
1934 extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1935 extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
1936 extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1937 extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
1938 extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1939 extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1940 extern int dev_set_promiscuity(struct net_device *dev, int inc);
1941 extern int dev_set_allmulti(struct net_device *dev, int inc);
1942 extern void netdev_state_change(struct net_device *dev);
1943 extern void netdev_bonding_change(struct net_device *dev,
1944 unsigned long event);
1945 extern void netdev_features_change(struct net_device *dev);
1946 /* Load a device via the kmod */
1947 extern void dev_load(struct net *net, const char *name);
1948 extern void dev_mcast_init(void);
1949 extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
1950 extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats);
1952 extern int netdev_max_backlog;
1953 extern int weight_p;
1954 extern int netdev_set_master(struct net_device *dev, struct net_device *master);
1955 extern int skb_checksum_help(struct sk_buff *skb);
1956 extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
1958 extern void netdev_rx_csum_fault(struct net_device *dev);
1960 static inline void netdev_rx_csum_fault(struct net_device *dev)
1964 /* rx skb timestamps */
1965 extern void net_enable_timestamp(void);
1966 extern void net_disable_timestamp(void);
1968 #ifdef CONFIG_PROC_FS
1969 extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1970 extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1971 extern void dev_seq_stop(struct seq_file *seq, void *v);
1974 extern int netdev_class_create_file(struct class_attribute *class_attr);
1975 extern void netdev_class_remove_file(struct class_attribute *class_attr);
1977 extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
1979 extern void linkwatch_run_queue(void);
1981 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
1982 unsigned long mask);
1983 unsigned long netdev_fix_features(unsigned long features, const char *name);
1985 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
1986 struct net_device *dev);
1988 static inline int net_gso_ok(int features, int gso_type)
1990 int feature = gso_type << NETIF_F_GSO_SHIFT;
1991 return (features & feature) == feature;
1994 static inline int skb_gso_ok(struct sk_buff *skb, int features)
1996 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
1997 (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST));
2000 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
2002 return skb_is_gso(skb) &&
2003 (!skb_gso_ok(skb, dev->features) ||
2004 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
2007 static inline void netif_set_gso_max_size(struct net_device *dev,
2010 dev->gso_max_size = size;
2013 static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2014 struct net_device *master)
2016 if (skb->pkt_type == PACKET_HOST) {
2017 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2019 memcpy(dest, master->dev_addr, ETH_ALEN);
2023 /* On bonding slaves other than the currently active slave, suppress
2024 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2025 * ARP on active-backup slaves with arp_validate enabled.
2027 static inline int skb_bond_should_drop(struct sk_buff *skb)
2029 struct net_device *dev = skb->dev;
2030 struct net_device *master = dev->master;
2033 if (master->priv_flags & IFF_MASTER_ARPMON)
2034 dev->last_rx = jiffies;
2036 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
2037 /* Do address unmangle. The local destination address
2038 * will be always the one master has. Provides the right
2039 * functionality in a bridge.
2041 skb_bond_set_mac_by_master(skb, master);
2044 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2045 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2046 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2049 if (master->priv_flags & IFF_MASTER_ALB) {
2050 if (skb->pkt_type != PACKET_BROADCAST &&
2051 skb->pkt_type != PACKET_MULTICAST)
2054 if (master->priv_flags & IFF_MASTER_8023AD &&
2055 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2064 extern struct pernet_operations __net_initdata loopback_net_ops;
2066 static inline int dev_ethtool_get_settings(struct net_device *dev,
2067 struct ethtool_cmd *cmd)
2069 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
2071 return dev->ethtool_ops->get_settings(dev, cmd);
2074 static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
2076 if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
2078 return dev->ethtool_ops->get_rx_csum(dev);
2081 static inline u32 dev_ethtool_get_flags(struct net_device *dev)
2083 if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
2085 return dev->ethtool_ops->get_flags(dev);
2087 #endif /* __KERNEL__ */
2089 #endif /* _LINUX_NETDEVICE_H */