net: Add Open vSwitch kernel components.
[linux-flexiantxendom0-3.2.10.git] / net / openvswitch / datapath.c
1 /*
2  * Copyright (c) 2007-2011 Nicira Networks.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/system.h>
43 #include <asm/div64.h>
44 #include <linux/highmem.h>
45 #include <linux/netfilter_bridge.h>
46 #include <linux/netfilter_ipv4.h>
47 #include <linux/inetdevice.h>
48 #include <linux/list.h>
49 #include <linux/openvswitch.h>
50 #include <linux/rculist.h>
51 #include <linux/dmi.h>
52 #include <linux/workqueue.h>
53 #include <net/genetlink.h>
54
55 #include "datapath.h"
56 #include "flow.h"
57 #include "vport-internal_dev.h"
58
59 /**
60  * DOC: Locking:
61  *
62  * Writes to device state (add/remove datapath, port, set operations on vports,
63  * etc.) are protected by RTNL.
64  *
65  * Writes to other state (flow table modifications, set miscellaneous datapath
66  * parameters, etc.) are protected by genl_mutex.  The RTNL lock nests inside
67  * genl_mutex.
68  *
69  * Reads are protected by RCU.
70  *
71  * There are a few special cases (mostly stats) that have their own
72  * synchronization but they nest under all of above and don't interact with
73  * each other.
74  */
75
76 /* Global list of datapaths to enable dumping them all out.
77  * Protected by genl_mutex.
78  */
79 static LIST_HEAD(dps);
80
81 #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
82 static void rehash_flow_table(struct work_struct *work);
83 static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
84
85 static struct vport *new_vport(const struct vport_parms *);
86 static int queue_gso_packets(int dp_ifindex, struct sk_buff *,
87                              const struct dp_upcall_info *);
88 static int queue_userspace_packet(int dp_ifindex, struct sk_buff *,
89                                   const struct dp_upcall_info *);
90
91 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
92 static struct datapath *get_dp(int dp_ifindex)
93 {
94         struct datapath *dp = NULL;
95         struct net_device *dev;
96
97         rcu_read_lock();
98         dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
99         if (dev) {
100                 struct vport *vport = ovs_internal_dev_get_vport(dev);
101                 if (vport)
102                         dp = vport->dp;
103         }
104         rcu_read_unlock();
105
106         return dp;
107 }
108
109 /* Must be called with rcu_read_lock or RTNL lock. */
110 const char *ovs_dp_name(const struct datapath *dp)
111 {
112         struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
113         return vport->ops->get_name(vport);
114 }
115
116 static int get_dpifindex(struct datapath *dp)
117 {
118         struct vport *local;
119         int ifindex;
120
121         rcu_read_lock();
122
123         local = rcu_dereference(dp->ports[OVSP_LOCAL]);
124         if (local)
125                 ifindex = local->ops->get_ifindex(local);
126         else
127                 ifindex = 0;
128
129         rcu_read_unlock();
130
131         return ifindex;
132 }
133
134 static void destroy_dp_rcu(struct rcu_head *rcu)
135 {
136         struct datapath *dp = container_of(rcu, struct datapath, rcu);
137
138         ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
139         free_percpu(dp->stats_percpu);
140         kfree(dp);
141 }
142
143 /* Called with RTNL lock and genl_lock. */
144 static struct vport *new_vport(const struct vport_parms *parms)
145 {
146         struct vport *vport;
147
148         vport = ovs_vport_add(parms);
149         if (!IS_ERR(vport)) {
150                 struct datapath *dp = parms->dp;
151
152                 rcu_assign_pointer(dp->ports[parms->port_no], vport);
153                 list_add(&vport->node, &dp->port_list);
154         }
155
156         return vport;
157 }
158
159 /* Called with RTNL lock. */
160 void ovs_dp_detach_port(struct vport *p)
161 {
162         ASSERT_RTNL();
163
164         /* First drop references to device. */
165         list_del(&p->node);
166         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
167
168         /* Then destroy it. */
169         ovs_vport_del(p);
170 }
171
172 /* Must be called with rcu_read_lock. */
173 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
174 {
175         struct datapath *dp = p->dp;
176         struct sw_flow *flow;
177         struct dp_stats_percpu *stats;
178         struct sw_flow_key key;
179         u64 *stats_counter;
180         int error;
181         int key_len;
182
183         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
184
185         /* Extract flow from 'skb' into 'key'. */
186         error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
187         if (unlikely(error)) {
188                 kfree_skb(skb);
189                 return;
190         }
191
192         /* Look up flow. */
193         flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
194         if (unlikely(!flow)) {
195                 struct dp_upcall_info upcall;
196
197                 upcall.cmd = OVS_PACKET_CMD_MISS;
198                 upcall.key = &key;
199                 upcall.userdata = NULL;
200                 upcall.pid = p->upcall_pid;
201                 ovs_dp_upcall(dp, skb, &upcall);
202                 consume_skb(skb);
203                 stats_counter = &stats->n_missed;
204                 goto out;
205         }
206
207         OVS_CB(skb)->flow = flow;
208
209         stats_counter = &stats->n_hit;
210         ovs_flow_used(OVS_CB(skb)->flow, skb);
211         ovs_execute_actions(dp, skb);
212
213 out:
214         /* Update datapath statistics. */
215         u64_stats_update_begin(&stats->sync);
216         (*stats_counter)++;
217         u64_stats_update_end(&stats->sync);
218 }
219
220 static struct genl_family dp_packet_genl_family = {
221         .id = GENL_ID_GENERATE,
222         .hdrsize = sizeof(struct ovs_header),
223         .name = OVS_PACKET_FAMILY,
224         .version = OVS_PACKET_VERSION,
225         .maxattr = OVS_PACKET_ATTR_MAX
226 };
227
228 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
229               const struct dp_upcall_info *upcall_info)
230 {
231         struct dp_stats_percpu *stats;
232         int dp_ifindex;
233         int err;
234
235         if (upcall_info->pid == 0) {
236                 err = -ENOTCONN;
237                 goto err;
238         }
239
240         dp_ifindex = get_dpifindex(dp);
241         if (!dp_ifindex) {
242                 err = -ENODEV;
243                 goto err;
244         }
245
246         if (!skb_is_gso(skb))
247                 err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
248         else
249                 err = queue_gso_packets(dp_ifindex, skb, upcall_info);
250         if (err)
251                 goto err;
252
253         return 0;
254
255 err:
256         stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
257
258         u64_stats_update_begin(&stats->sync);
259         stats->n_lost++;
260         u64_stats_update_end(&stats->sync);
261
262         return err;
263 }
264
265 static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
266                              const struct dp_upcall_info *upcall_info)
267 {
268         struct dp_upcall_info later_info;
269         struct sw_flow_key later_key;
270         struct sk_buff *segs, *nskb;
271         int err;
272
273         segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
274         if (IS_ERR(skb))
275                 return PTR_ERR(skb);
276
277         /* Queue all of the segments. */
278         skb = segs;
279         do {
280                 err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
281                 if (err)
282                         break;
283
284                 if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
285                         /* The initial flow key extracted by ovs_flow_extract()
286                          * in this case is for a first fragment, so we need to
287                          * properly mark later fragments.
288                          */
289                         later_key = *upcall_info->key;
290                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
291
292                         later_info = *upcall_info;
293                         later_info.key = &later_key;
294                         upcall_info = &later_info;
295                 }
296         } while ((skb = skb->next));
297
298         /* Free all of the segments. */
299         skb = segs;
300         do {
301                 nskb = skb->next;
302                 if (err)
303                         kfree_skb(skb);
304                 else
305                         consume_skb(skb);
306         } while ((skb = nskb));
307         return err;
308 }
309
310 static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
311                                   const struct dp_upcall_info *upcall_info)
312 {
313         struct ovs_header *upcall;
314         struct sk_buff *nskb = NULL;
315         struct sk_buff *user_skb; /* to be queued to userspace */
316         struct nlattr *nla;
317         unsigned int len;
318         int err;
319
320         if (vlan_tx_tag_present(skb)) {
321                 nskb = skb_clone(skb, GFP_ATOMIC);
322                 if (!nskb)
323                         return -ENOMEM;
324
325                 nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
326                 if (!skb)
327                         return -ENOMEM;
328
329                 nskb->vlan_tci = 0;
330                 skb = nskb;
331         }
332
333         if (nla_attr_size(skb->len) > USHRT_MAX) {
334                 err = -EFBIG;
335                 goto out;
336         }
337
338         len = sizeof(struct ovs_header);
339         len += nla_total_size(skb->len);
340         len += nla_total_size(FLOW_BUFSIZE);
341         if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
342                 len += nla_total_size(8);
343
344         user_skb = genlmsg_new(len, GFP_ATOMIC);
345         if (!user_skb) {
346                 err = -ENOMEM;
347                 goto out;
348         }
349
350         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
351                              0, upcall_info->cmd);
352         upcall->dp_ifindex = dp_ifindex;
353
354         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
355         ovs_flow_to_nlattrs(upcall_info->key, user_skb);
356         nla_nest_end(user_skb, nla);
357
358         if (upcall_info->userdata)
359                 nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
360                             nla_get_u64(upcall_info->userdata));
361
362         nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
363
364         skb_copy_and_csum_dev(skb, nla_data(nla));
365
366         err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
367
368 out:
369         kfree_skb(nskb);
370         return err;
371 }
372
373 /* Called with genl_mutex. */
374 static int flush_flows(int dp_ifindex)
375 {
376         struct flow_table *old_table;
377         struct flow_table *new_table;
378         struct datapath *dp;
379
380         dp = get_dp(dp_ifindex);
381         if (!dp)
382                 return -ENODEV;
383
384         old_table = genl_dereference(dp->table);
385         new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
386         if (!new_table)
387                 return -ENOMEM;
388
389         rcu_assign_pointer(dp->table, new_table);
390
391         ovs_flow_tbl_deferred_destroy(old_table);
392         return 0;
393 }
394
395 static int validate_actions(const struct nlattr *attr,
396                                 const struct sw_flow_key *key, int depth);
397
398 static int validate_sample(const struct nlattr *attr,
399                                 const struct sw_flow_key *key, int depth)
400 {
401         const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
402         const struct nlattr *probability, *actions;
403         const struct nlattr *a;
404         int rem;
405
406         memset(attrs, 0, sizeof(attrs));
407         nla_for_each_nested(a, attr, rem) {
408                 int type = nla_type(a);
409                 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
410                         return -EINVAL;
411                 attrs[type] = a;
412         }
413         if (rem)
414                 return -EINVAL;
415
416         probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
417         if (!probability || nla_len(probability) != sizeof(u32))
418                 return -EINVAL;
419
420         actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
421         if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
422                 return -EINVAL;
423         return validate_actions(actions, key, depth + 1);
424 }
425
426 static int validate_set(const struct nlattr *a,
427                         const struct sw_flow_key *flow_key)
428 {
429         const struct nlattr *ovs_key = nla_data(a);
430         int key_type = nla_type(ovs_key);
431
432         /* There can be only one key in a action */
433         if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
434                 return -EINVAL;
435
436         if (key_type > OVS_KEY_ATTR_MAX ||
437             nla_len(ovs_key) != ovs_key_lens[key_type])
438                 return -EINVAL;
439
440         switch (key_type) {
441         const struct ovs_key_ipv4 *ipv4_key;
442
443         case OVS_KEY_ATTR_PRIORITY:
444         case OVS_KEY_ATTR_ETHERNET:
445                 break;
446
447         case OVS_KEY_ATTR_IPV4:
448                 if (flow_key->eth.type != htons(ETH_P_IP))
449                         return -EINVAL;
450
451                 if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst)
452                         return -EINVAL;
453
454                 ipv4_key = nla_data(ovs_key);
455                 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
456                         return -EINVAL;
457
458                 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
459                         return -EINVAL;
460
461                 break;
462
463         case OVS_KEY_ATTR_TCP:
464                 if (flow_key->ip.proto != IPPROTO_TCP)
465                         return -EINVAL;
466
467                 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
468                         return -EINVAL;
469
470                 break;
471
472         case OVS_KEY_ATTR_UDP:
473                 if (flow_key->ip.proto != IPPROTO_UDP)
474                         return -EINVAL;
475
476                 if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
477                         return -EINVAL;
478                 break;
479
480         default:
481                 return -EINVAL;
482         }
483
484         return 0;
485 }
486
487 static int validate_userspace(const struct nlattr *attr)
488 {
489         static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =   {
490                 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
491                 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
492         };
493         struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
494         int error;
495
496         error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
497                                  attr, userspace_policy);
498         if (error)
499                 return error;
500
501         if (!a[OVS_USERSPACE_ATTR_PID] ||
502             !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
503                 return -EINVAL;
504
505         return 0;
506 }
507
508 static int validate_actions(const struct nlattr *attr,
509                                 const struct sw_flow_key *key,  int depth)
510 {
511         const struct nlattr *a;
512         int rem, err;
513
514         if (depth >= SAMPLE_ACTION_DEPTH)
515                 return -EOVERFLOW;
516
517         nla_for_each_nested(a, attr, rem) {
518                 /* Expected argument lengths, (u32)-1 for variable length. */
519                 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
520                         [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
521                         [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
522                         [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
523                         [OVS_ACTION_ATTR_POP_VLAN] = 0,
524                         [OVS_ACTION_ATTR_SET] = (u32)-1,
525                         [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
526                 };
527                 const struct ovs_action_push_vlan *vlan;
528                 int type = nla_type(a);
529
530                 if (type > OVS_ACTION_ATTR_MAX ||
531                     (action_lens[type] != nla_len(a) &&
532                      action_lens[type] != (u32)-1))
533                         return -EINVAL;
534
535                 switch (type) {
536                 case OVS_ACTION_ATTR_UNSPEC:
537                         return -EINVAL;
538
539                 case OVS_ACTION_ATTR_USERSPACE:
540                         err = validate_userspace(a);
541                         if (err)
542                                 return err;
543                         break;
544
545                 case OVS_ACTION_ATTR_OUTPUT:
546                         if (nla_get_u32(a) >= DP_MAX_PORTS)
547                                 return -EINVAL;
548                         break;
549
550
551                 case OVS_ACTION_ATTR_POP_VLAN:
552                         break;
553
554                 case OVS_ACTION_ATTR_PUSH_VLAN:
555                         vlan = nla_data(a);
556                         if (vlan->vlan_tpid != htons(ETH_P_8021Q))
557                                 return -EINVAL;
558                         if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
559                                 return -EINVAL;
560                         break;
561
562                 case OVS_ACTION_ATTR_SET:
563                         err = validate_set(a, key);
564                         if (err)
565                                 return err;
566                         break;
567
568                 case OVS_ACTION_ATTR_SAMPLE:
569                         err = validate_sample(a, key, depth);
570                         if (err)
571                                 return err;
572                         break;
573
574                 default:
575                         return -EINVAL;
576                 }
577         }
578
579         if (rem > 0)
580                 return -EINVAL;
581
582         return 0;
583 }
584
585 static void clear_stats(struct sw_flow *flow)
586 {
587         flow->used = 0;
588         flow->tcp_flags = 0;
589         flow->packet_count = 0;
590         flow->byte_count = 0;
591 }
592
593 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
594 {
595         struct ovs_header *ovs_header = info->userhdr;
596         struct nlattr **a = info->attrs;
597         struct sw_flow_actions *acts;
598         struct sk_buff *packet;
599         struct sw_flow *flow;
600         struct datapath *dp;
601         struct ethhdr *eth;
602         int len;
603         int err;
604         int key_len;
605
606         err = -EINVAL;
607         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
608             !a[OVS_PACKET_ATTR_ACTIONS] ||
609             nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
610                 goto err;
611
612         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
613         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
614         err = -ENOMEM;
615         if (!packet)
616                 goto err;
617         skb_reserve(packet, NET_IP_ALIGN);
618
619         memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
620
621         skb_reset_mac_header(packet);
622         eth = eth_hdr(packet);
623
624         /* Normally, setting the skb 'protocol' field would be handled by a
625          * call to eth_type_trans(), but it assumes there's a sending
626          * device, which we may not have. */
627         if (ntohs(eth->h_proto) >= 1536)
628                 packet->protocol = eth->h_proto;
629         else
630                 packet->protocol = htons(ETH_P_802_2);
631
632         /* Build an sw_flow for sending this packet. */
633         flow = ovs_flow_alloc();
634         err = PTR_ERR(flow);
635         if (IS_ERR(flow))
636                 goto err_kfree_skb;
637
638         err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
639         if (err)
640                 goto err_flow_free;
641
642         err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
643                                              &flow->key.phy.in_port,
644                                              a[OVS_PACKET_ATTR_KEY]);
645         if (err)
646                 goto err_flow_free;
647
648         err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
649         if (err)
650                 goto err_flow_free;
651
652         flow->hash = ovs_flow_hash(&flow->key, key_len);
653
654         acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
655         err = PTR_ERR(acts);
656         if (IS_ERR(acts))
657                 goto err_flow_free;
658         rcu_assign_pointer(flow->sf_acts, acts);
659
660         OVS_CB(packet)->flow = flow;
661         packet->priority = flow->key.phy.priority;
662
663         rcu_read_lock();
664         dp = get_dp(ovs_header->dp_ifindex);
665         err = -ENODEV;
666         if (!dp)
667                 goto err_unlock;
668
669         local_bh_disable();
670         err = ovs_execute_actions(dp, packet);
671         local_bh_enable();
672         rcu_read_unlock();
673
674         ovs_flow_free(flow);
675         return err;
676
677 err_unlock:
678         rcu_read_unlock();
679 err_flow_free:
680         ovs_flow_free(flow);
681 err_kfree_skb:
682         kfree_skb(packet);
683 err:
684         return err;
685 }
686
687 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
688         [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
689         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
690         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
691 };
692
693 static struct genl_ops dp_packet_genl_ops[] = {
694         { .cmd = OVS_PACKET_CMD_EXECUTE,
695           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
696           .policy = packet_policy,
697           .doit = ovs_packet_cmd_execute
698         }
699 };
700
701 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
702 {
703         int i;
704         struct flow_table *table = genl_dereference(dp->table);
705
706         stats->n_flows = ovs_flow_tbl_count(table);
707
708         stats->n_hit = stats->n_missed = stats->n_lost = 0;
709         for_each_possible_cpu(i) {
710                 const struct dp_stats_percpu *percpu_stats;
711                 struct dp_stats_percpu local_stats;
712                 unsigned int start;
713
714                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
715
716                 do {
717                         start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
718                         local_stats = *percpu_stats;
719                 } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
720
721                 stats->n_hit += local_stats.n_hit;
722                 stats->n_missed += local_stats.n_missed;
723                 stats->n_lost += local_stats.n_lost;
724         }
725 }
726
727 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
728         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
729         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
730         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
731 };
732
733 static struct genl_family dp_flow_genl_family = {
734         .id = GENL_ID_GENERATE,
735         .hdrsize = sizeof(struct ovs_header),
736         .name = OVS_FLOW_FAMILY,
737         .version = OVS_FLOW_VERSION,
738         .maxattr = OVS_FLOW_ATTR_MAX
739 };
740
741 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
742         .name = OVS_FLOW_MCGROUP
743 };
744
745 /* Called with genl_lock. */
746 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
747                                   struct sk_buff *skb, u32 pid,
748                                   u32 seq, u32 flags, u8 cmd)
749 {
750         const int skb_orig_len = skb->len;
751         const struct sw_flow_actions *sf_acts;
752         struct ovs_flow_stats stats;
753         struct ovs_header *ovs_header;
754         struct nlattr *nla;
755         unsigned long used;
756         u8 tcp_flags;
757         int err;
758
759         sf_acts = rcu_dereference_protected(flow->sf_acts,
760                                             lockdep_genl_is_held());
761
762         ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
763         if (!ovs_header)
764                 return -EMSGSIZE;
765
766         ovs_header->dp_ifindex = get_dpifindex(dp);
767
768         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
769         if (!nla)
770                 goto nla_put_failure;
771         err = ovs_flow_to_nlattrs(&flow->key, skb);
772         if (err)
773                 goto error;
774         nla_nest_end(skb, nla);
775
776         spin_lock_bh(&flow->lock);
777         used = flow->used;
778         stats.n_packets = flow->packet_count;
779         stats.n_bytes = flow->byte_count;
780         tcp_flags = flow->tcp_flags;
781         spin_unlock_bh(&flow->lock);
782
783         if (used)
784                 NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
785
786         if (stats.n_packets)
787                 NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
788                         sizeof(struct ovs_flow_stats), &stats);
789
790         if (tcp_flags)
791                 NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
792
793         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
794          * this is the first flow to be dumped into 'skb'.  This is unusual for
795          * Netlink but individual action lists can be longer than
796          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
797          * The userspace caller can always fetch the actions separately if it
798          * really wants them.  (Most userspace callers in fact don't care.)
799          *
800          * This can only fail for dump operations because the skb is always
801          * properly sized for single flows.
802          */
803         err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
804                       sf_acts->actions);
805         if (err < 0 && skb_orig_len)
806                 goto error;
807
808         return genlmsg_end(skb, ovs_header);
809
810 nla_put_failure:
811         err = -EMSGSIZE;
812 error:
813         genlmsg_cancel(skb, ovs_header);
814         return err;
815 }
816
817 static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
818 {
819         const struct sw_flow_actions *sf_acts;
820         int len;
821
822         sf_acts = rcu_dereference_protected(flow->sf_acts,
823                                             lockdep_genl_is_held());
824
825         /* OVS_FLOW_ATTR_KEY */
826         len = nla_total_size(FLOW_BUFSIZE);
827         /* OVS_FLOW_ATTR_ACTIONS */
828         len += nla_total_size(sf_acts->actions_len);
829         /* OVS_FLOW_ATTR_STATS */
830         len += nla_total_size(sizeof(struct ovs_flow_stats));
831         /* OVS_FLOW_ATTR_TCP_FLAGS */
832         len += nla_total_size(1);
833         /* OVS_FLOW_ATTR_USED */
834         len += nla_total_size(8);
835
836         len += NLMSG_ALIGN(sizeof(struct ovs_header));
837
838         return genlmsg_new(len, GFP_KERNEL);
839 }
840
841 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
842                                                struct datapath *dp,
843                                                u32 pid, u32 seq, u8 cmd)
844 {
845         struct sk_buff *skb;
846         int retval;
847
848         skb = ovs_flow_cmd_alloc_info(flow);
849         if (!skb)
850                 return ERR_PTR(-ENOMEM);
851
852         retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
853         BUG_ON(retval < 0);
854         return skb;
855 }
856
857 static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
858 {
859         struct nlattr **a = info->attrs;
860         struct ovs_header *ovs_header = info->userhdr;
861         struct sw_flow_key key;
862         struct sw_flow *flow;
863         struct sk_buff *reply;
864         struct datapath *dp;
865         struct flow_table *table;
866         int error;
867         int key_len;
868
869         /* Extract key. */
870         error = -EINVAL;
871         if (!a[OVS_FLOW_ATTR_KEY])
872                 goto error;
873         error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
874         if (error)
875                 goto error;
876
877         /* Validate actions. */
878         if (a[OVS_FLOW_ATTR_ACTIONS]) {
879                 error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0);
880                 if (error)
881                         goto error;
882         } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
883                 error = -EINVAL;
884                 goto error;
885         }
886
887         dp = get_dp(ovs_header->dp_ifindex);
888         error = -ENODEV;
889         if (!dp)
890                 goto error;
891
892         table = genl_dereference(dp->table);
893         flow = ovs_flow_tbl_lookup(table, &key, key_len);
894         if (!flow) {
895                 struct sw_flow_actions *acts;
896
897                 /* Bail out if we're not allowed to create a new flow. */
898                 error = -ENOENT;
899                 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
900                         goto error;
901
902                 /* Expand table, if necessary, to make room. */
903                 if (ovs_flow_tbl_need_to_expand(table)) {
904                         struct flow_table *new_table;
905
906                         new_table = ovs_flow_tbl_expand(table);
907                         if (!IS_ERR(new_table)) {
908                                 rcu_assign_pointer(dp->table, new_table);
909                                 ovs_flow_tbl_deferred_destroy(table);
910                                 table = genl_dereference(dp->table);
911                         }
912                 }
913
914                 /* Allocate flow. */
915                 flow = ovs_flow_alloc();
916                 if (IS_ERR(flow)) {
917                         error = PTR_ERR(flow);
918                         goto error;
919                 }
920                 flow->key = key;
921                 clear_stats(flow);
922
923                 /* Obtain actions. */
924                 acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
925                 error = PTR_ERR(acts);
926                 if (IS_ERR(acts))
927                         goto error_free_flow;
928                 rcu_assign_pointer(flow->sf_acts, acts);
929
930                 /* Put flow in bucket. */
931                 flow->hash = ovs_flow_hash(&key, key_len);
932                 ovs_flow_tbl_insert(table, flow);
933
934                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
935                                                 info->snd_seq,
936                                                 OVS_FLOW_CMD_NEW);
937         } else {
938                 /* We found a matching flow. */
939                 struct sw_flow_actions *old_acts;
940                 struct nlattr *acts_attrs;
941
942                 /* Bail out if we're not allowed to modify an existing flow.
943                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
944                  * because Generic Netlink treats the latter as a dump
945                  * request.  We also accept NLM_F_EXCL in case that bug ever
946                  * gets fixed.
947                  */
948                 error = -EEXIST;
949                 if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
950                     info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
951                         goto error;
952
953                 /* Update actions. */
954                 old_acts = rcu_dereference_protected(flow->sf_acts,
955                                                      lockdep_genl_is_held());
956                 acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
957                 if (acts_attrs &&
958                    (old_acts->actions_len != nla_len(acts_attrs) ||
959                    memcmp(old_acts->actions, nla_data(acts_attrs),
960                           old_acts->actions_len))) {
961                         struct sw_flow_actions *new_acts;
962
963                         new_acts = ovs_flow_actions_alloc(acts_attrs);
964                         error = PTR_ERR(new_acts);
965                         if (IS_ERR(new_acts))
966                                 goto error;
967
968                         rcu_assign_pointer(flow->sf_acts, new_acts);
969                         ovs_flow_deferred_free_acts(old_acts);
970                 }
971
972                 reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
973                                                info->snd_seq, OVS_FLOW_CMD_NEW);
974
975                 /* Clear stats. */
976                 if (a[OVS_FLOW_ATTR_CLEAR]) {
977                         spin_lock_bh(&flow->lock);
978                         clear_stats(flow);
979                         spin_unlock_bh(&flow->lock);
980                 }
981         }
982
983         if (!IS_ERR(reply))
984                 genl_notify(reply, genl_info_net(info), info->snd_pid,
985                            ovs_dp_flow_multicast_group.id, info->nlhdr,
986                            GFP_KERNEL);
987         else
988                 netlink_set_err(init_net.genl_sock, 0,
989                                 ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
990         return 0;
991
992 error_free_flow:
993         ovs_flow_free(flow);
994 error:
995         return error;
996 }
997
998 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
999 {
1000         struct nlattr **a = info->attrs;
1001         struct ovs_header *ovs_header = info->userhdr;
1002         struct sw_flow_key key;
1003         struct sk_buff *reply;
1004         struct sw_flow *flow;
1005         struct datapath *dp;
1006         struct flow_table *table;
1007         int err;
1008         int key_len;
1009
1010         if (!a[OVS_FLOW_ATTR_KEY])
1011                 return -EINVAL;
1012         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1013         if (err)
1014                 return err;
1015
1016         dp = get_dp(ovs_header->dp_ifindex);
1017         if (!dp)
1018                 return -ENODEV;
1019
1020         table = genl_dereference(dp->table);
1021         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1022         if (!flow)
1023                 return -ENOENT;
1024
1025         reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
1026                                         info->snd_seq, OVS_FLOW_CMD_NEW);
1027         if (IS_ERR(reply))
1028                 return PTR_ERR(reply);
1029
1030         return genlmsg_reply(reply, info);
1031 }
1032
1033 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1034 {
1035         struct nlattr **a = info->attrs;
1036         struct ovs_header *ovs_header = info->userhdr;
1037         struct sw_flow_key key;
1038         struct sk_buff *reply;
1039         struct sw_flow *flow;
1040         struct datapath *dp;
1041         struct flow_table *table;
1042         int err;
1043         int key_len;
1044
1045         if (!a[OVS_FLOW_ATTR_KEY])
1046                 return flush_flows(ovs_header->dp_ifindex);
1047         err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
1048         if (err)
1049                 return err;
1050
1051         dp = get_dp(ovs_header->dp_ifindex);
1052         if (!dp)
1053                 return -ENODEV;
1054
1055         table = genl_dereference(dp->table);
1056         flow = ovs_flow_tbl_lookup(table, &key, key_len);
1057         if (!flow)
1058                 return -ENOENT;
1059
1060         reply = ovs_flow_cmd_alloc_info(flow);
1061         if (!reply)
1062                 return -ENOMEM;
1063
1064         ovs_flow_tbl_remove(table, flow);
1065
1066         err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
1067                                      info->snd_seq, 0, OVS_FLOW_CMD_DEL);
1068         BUG_ON(err < 0);
1069
1070         ovs_flow_deferred_free(flow);
1071
1072         genl_notify(reply, genl_info_net(info), info->snd_pid,
1073                     ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
1074         return 0;
1075 }
1076
1077 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1078 {
1079         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1080         struct datapath *dp;
1081         struct flow_table *table;
1082
1083         dp = get_dp(ovs_header->dp_ifindex);
1084         if (!dp)
1085                 return -ENODEV;
1086
1087         table = genl_dereference(dp->table);
1088
1089         for (;;) {
1090                 struct sw_flow *flow;
1091                 u32 bucket, obj;
1092
1093                 bucket = cb->args[0];
1094                 obj = cb->args[1];
1095                 flow = ovs_flow_tbl_next(table, &bucket, &obj);
1096                 if (!flow)
1097                         break;
1098
1099                 if (ovs_flow_cmd_fill_info(flow, dp, skb,
1100                                            NETLINK_CB(cb->skb).pid,
1101                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1102                                            OVS_FLOW_CMD_NEW) < 0)
1103                         break;
1104
1105                 cb->args[0] = bucket;
1106                 cb->args[1] = obj;
1107         }
1108         return skb->len;
1109 }
1110
1111 static struct genl_ops dp_flow_genl_ops[] = {
1112         { .cmd = OVS_FLOW_CMD_NEW,
1113           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1114           .policy = flow_policy,
1115           .doit = ovs_flow_cmd_new_or_set
1116         },
1117         { .cmd = OVS_FLOW_CMD_DEL,
1118           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1119           .policy = flow_policy,
1120           .doit = ovs_flow_cmd_del
1121         },
1122         { .cmd = OVS_FLOW_CMD_GET,
1123           .flags = 0,               /* OK for unprivileged users. */
1124           .policy = flow_policy,
1125           .doit = ovs_flow_cmd_get,
1126           .dumpit = ovs_flow_cmd_dump
1127         },
1128         { .cmd = OVS_FLOW_CMD_SET,
1129           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1130           .policy = flow_policy,
1131           .doit = ovs_flow_cmd_new_or_set,
1132         },
1133 };
1134
1135 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1136         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1137         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1138 };
1139
1140 static struct genl_family dp_datapath_genl_family = {
1141         .id = GENL_ID_GENERATE,
1142         .hdrsize = sizeof(struct ovs_header),
1143         .name = OVS_DATAPATH_FAMILY,
1144         .version = OVS_DATAPATH_VERSION,
1145         .maxattr = OVS_DP_ATTR_MAX
1146 };
1147
1148 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
1149         .name = OVS_DATAPATH_MCGROUP
1150 };
1151
1152 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1153                                 u32 pid, u32 seq, u32 flags, u8 cmd)
1154 {
1155         struct ovs_header *ovs_header;
1156         struct ovs_dp_stats dp_stats;
1157         int err;
1158
1159         ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
1160                                    flags, cmd);
1161         if (!ovs_header)
1162                 goto error;
1163
1164         ovs_header->dp_ifindex = get_dpifindex(dp);
1165
1166         rcu_read_lock();
1167         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1168         rcu_read_unlock();
1169         if (err)
1170                 goto nla_put_failure;
1171
1172         get_dp_stats(dp, &dp_stats);
1173         NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
1174
1175         return genlmsg_end(skb, ovs_header);
1176
1177 nla_put_failure:
1178         genlmsg_cancel(skb, ovs_header);
1179 error:
1180         return -EMSGSIZE;
1181 }
1182
1183 static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
1184                                              u32 seq, u8 cmd)
1185 {
1186         struct sk_buff *skb;
1187         int retval;
1188
1189         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1190         if (!skb)
1191                 return ERR_PTR(-ENOMEM);
1192
1193         retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
1194         if (retval < 0) {
1195                 kfree_skb(skb);
1196                 return ERR_PTR(retval);
1197         }
1198         return skb;
1199 }
1200
1201 /* Called with genl_mutex and optionally with RTNL lock also. */
1202 static struct datapath *lookup_datapath(struct ovs_header *ovs_header,
1203                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1204 {
1205         struct datapath *dp;
1206
1207         if (!a[OVS_DP_ATTR_NAME])
1208                 dp = get_dp(ovs_header->dp_ifindex);
1209         else {
1210                 struct vport *vport;
1211
1212                 rcu_read_lock();
1213                 vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
1214                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1215                 rcu_read_unlock();
1216         }
1217         return dp ? dp : ERR_PTR(-ENODEV);
1218 }
1219
1220 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1221 {
1222         struct nlattr **a = info->attrs;
1223         struct vport_parms parms;
1224         struct sk_buff *reply;
1225         struct datapath *dp;
1226         struct vport *vport;
1227         int err;
1228
1229         err = -EINVAL;
1230         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1231                 goto err;
1232
1233         rtnl_lock();
1234         err = -ENODEV;
1235         if (!try_module_get(THIS_MODULE))
1236                 goto err_unlock_rtnl;
1237
1238         err = -ENOMEM;
1239         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1240         if (dp == NULL)
1241                 goto err_put_module;
1242         INIT_LIST_HEAD(&dp->port_list);
1243
1244         /* Allocate table. */
1245         err = -ENOMEM;
1246         rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
1247         if (!dp->table)
1248                 goto err_free_dp;
1249
1250         dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1251         if (!dp->stats_percpu) {
1252                 err = -ENOMEM;
1253                 goto err_destroy_table;
1254         }
1255
1256         /* Set up our datapath device. */
1257         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1258         parms.type = OVS_VPORT_TYPE_INTERNAL;
1259         parms.options = NULL;
1260         parms.dp = dp;
1261         parms.port_no = OVSP_LOCAL;
1262         parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
1263
1264         vport = new_vport(&parms);
1265         if (IS_ERR(vport)) {
1266                 err = PTR_ERR(vport);
1267                 if (err == -EBUSY)
1268                         err = -EEXIST;
1269
1270                 goto err_destroy_percpu;
1271         }
1272
1273         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1274                                       info->snd_seq, OVS_DP_CMD_NEW);
1275         err = PTR_ERR(reply);
1276         if (IS_ERR(reply))
1277                 goto err_destroy_local_port;
1278
1279         list_add_tail(&dp->list_node, &dps);
1280         rtnl_unlock();
1281
1282         genl_notify(reply, genl_info_net(info), info->snd_pid,
1283                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1284                     GFP_KERNEL);
1285         return 0;
1286
1287 err_destroy_local_port:
1288         ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
1289 err_destroy_percpu:
1290         free_percpu(dp->stats_percpu);
1291 err_destroy_table:
1292         ovs_flow_tbl_destroy(genl_dereference(dp->table));
1293 err_free_dp:
1294         kfree(dp);
1295 err_put_module:
1296         module_put(THIS_MODULE);
1297 err_unlock_rtnl:
1298         rtnl_unlock();
1299 err:
1300         return err;
1301 }
1302
1303 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1304 {
1305         struct vport *vport, *next_vport;
1306         struct sk_buff *reply;
1307         struct datapath *dp;
1308         int err;
1309
1310         rtnl_lock();
1311         dp = lookup_datapath(info->userhdr, info->attrs);
1312         err = PTR_ERR(dp);
1313         if (IS_ERR(dp))
1314                 goto exit_unlock;
1315
1316         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1317                                       info->snd_seq, OVS_DP_CMD_DEL);
1318         err = PTR_ERR(reply);
1319         if (IS_ERR(reply))
1320                 goto exit_unlock;
1321
1322         list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
1323                 if (vport->port_no != OVSP_LOCAL)
1324                         ovs_dp_detach_port(vport);
1325
1326         list_del(&dp->list_node);
1327         ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
1328
1329         /* rtnl_unlock() will wait until all the references to devices that
1330          * are pending unregistration have been dropped.  We do it here to
1331          * ensure that any internal devices (which contain DP pointers) are
1332          * fully destroyed before freeing the datapath.
1333          */
1334         rtnl_unlock();
1335
1336         call_rcu(&dp->rcu, destroy_dp_rcu);
1337         module_put(THIS_MODULE);
1338
1339         genl_notify(reply, genl_info_net(info), info->snd_pid,
1340                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1341                     GFP_KERNEL);
1342
1343         return 0;
1344
1345 exit_unlock:
1346         rtnl_unlock();
1347         return err;
1348 }
1349
1350 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1351 {
1352         struct sk_buff *reply;
1353         struct datapath *dp;
1354         int err;
1355
1356         dp = lookup_datapath(info->userhdr, info->attrs);
1357         if (IS_ERR(dp))
1358                 return PTR_ERR(dp);
1359
1360         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1361                                       info->snd_seq, OVS_DP_CMD_NEW);
1362         if (IS_ERR(reply)) {
1363                 err = PTR_ERR(reply);
1364                 netlink_set_err(init_net.genl_sock, 0,
1365                                 ovs_dp_datapath_multicast_group.id, err);
1366                 return 0;
1367         }
1368
1369         genl_notify(reply, genl_info_net(info), info->snd_pid,
1370                     ovs_dp_datapath_multicast_group.id, info->nlhdr,
1371                     GFP_KERNEL);
1372
1373         return 0;
1374 }
1375
1376 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1377 {
1378         struct sk_buff *reply;
1379         struct datapath *dp;
1380
1381         dp = lookup_datapath(info->userhdr, info->attrs);
1382         if (IS_ERR(dp))
1383                 return PTR_ERR(dp);
1384
1385         reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
1386                                       info->snd_seq, OVS_DP_CMD_NEW);
1387         if (IS_ERR(reply))
1388                 return PTR_ERR(reply);
1389
1390         return genlmsg_reply(reply, info);
1391 }
1392
1393 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1394 {
1395         struct datapath *dp;
1396         int skip = cb->args[0];
1397         int i = 0;
1398
1399         list_for_each_entry(dp, &dps, list_node) {
1400                 if (i < skip)
1401                         continue;
1402                 if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
1403                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1404                                          OVS_DP_CMD_NEW) < 0)
1405                         break;
1406                 i++;
1407         }
1408
1409         cb->args[0] = i;
1410
1411         return skb->len;
1412 }
1413
1414 static struct genl_ops dp_datapath_genl_ops[] = {
1415         { .cmd = OVS_DP_CMD_NEW,
1416           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1417           .policy = datapath_policy,
1418           .doit = ovs_dp_cmd_new
1419         },
1420         { .cmd = OVS_DP_CMD_DEL,
1421           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1422           .policy = datapath_policy,
1423           .doit = ovs_dp_cmd_del
1424         },
1425         { .cmd = OVS_DP_CMD_GET,
1426           .flags = 0,               /* OK for unprivileged users. */
1427           .policy = datapath_policy,
1428           .doit = ovs_dp_cmd_get,
1429           .dumpit = ovs_dp_cmd_dump
1430         },
1431         { .cmd = OVS_DP_CMD_SET,
1432           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1433           .policy = datapath_policy,
1434           .doit = ovs_dp_cmd_set,
1435         },
1436 };
1437
1438 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1439         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1440         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1441         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1442         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1443         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1444         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1445 };
1446
1447 static struct genl_family dp_vport_genl_family = {
1448         .id = GENL_ID_GENERATE,
1449         .hdrsize = sizeof(struct ovs_header),
1450         .name = OVS_VPORT_FAMILY,
1451         .version = OVS_VPORT_VERSION,
1452         .maxattr = OVS_VPORT_ATTR_MAX
1453 };
1454
1455 struct genl_multicast_group ovs_dp_vport_multicast_group = {
1456         .name = OVS_VPORT_MCGROUP
1457 };
1458
1459 /* Called with RTNL lock or RCU read lock. */
1460 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1461                                    u32 pid, u32 seq, u32 flags, u8 cmd)
1462 {
1463         struct ovs_header *ovs_header;
1464         struct ovs_vport_stats vport_stats;
1465         int err;
1466
1467         ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
1468                                  flags, cmd);
1469         if (!ovs_header)
1470                 return -EMSGSIZE;
1471
1472         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1473
1474         NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
1475         NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
1476         NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
1477         NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
1478
1479         ovs_vport_get_stats(vport, &vport_stats);
1480         NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1481                 &vport_stats);
1482
1483         err = ovs_vport_get_options(vport, skb);
1484         if (err == -EMSGSIZE)
1485                 goto error;
1486
1487         return genlmsg_end(skb, ovs_header);
1488
1489 nla_put_failure:
1490         err = -EMSGSIZE;
1491 error:
1492         genlmsg_cancel(skb, ovs_header);
1493         return err;
1494 }
1495
1496 /* Called with RTNL lock or RCU read lock. */
1497 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
1498                                          u32 seq, u8 cmd)
1499 {
1500         struct sk_buff *skb;
1501         int retval;
1502
1503         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1504         if (!skb)
1505                 return ERR_PTR(-ENOMEM);
1506
1507         retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
1508         if (retval < 0) {
1509                 kfree_skb(skb);
1510                 return ERR_PTR(retval);
1511         }
1512         return skb;
1513 }
1514
1515 /* Called with RTNL lock or RCU read lock. */
1516 static struct vport *lookup_vport(struct ovs_header *ovs_header,
1517                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1518 {
1519         struct datapath *dp;
1520         struct vport *vport;
1521
1522         if (a[OVS_VPORT_ATTR_NAME]) {
1523                 vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
1524                 if (!vport)
1525                         return ERR_PTR(-ENODEV);
1526                 return vport;
1527         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1528                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1529
1530                 if (port_no >= DP_MAX_PORTS)
1531                         return ERR_PTR(-EFBIG);
1532
1533                 dp = get_dp(ovs_header->dp_ifindex);
1534                 if (!dp)
1535                         return ERR_PTR(-ENODEV);
1536
1537                 vport = rcu_dereference_rtnl(dp->ports[port_no]);
1538                 if (!vport)
1539                         return ERR_PTR(-ENOENT);
1540                 return vport;
1541         } else
1542                 return ERR_PTR(-EINVAL);
1543 }
1544
1545 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1546 {
1547         struct nlattr **a = info->attrs;
1548         struct ovs_header *ovs_header = info->userhdr;
1549         struct vport_parms parms;
1550         struct sk_buff *reply;
1551         struct vport *vport;
1552         struct datapath *dp;
1553         u32 port_no;
1554         int err;
1555
1556         err = -EINVAL;
1557         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1558             !a[OVS_VPORT_ATTR_UPCALL_PID])
1559                 goto exit;
1560
1561         rtnl_lock();
1562         dp = get_dp(ovs_header->dp_ifindex);
1563         err = -ENODEV;
1564         if (!dp)
1565                 goto exit_unlock;
1566
1567         if (a[OVS_VPORT_ATTR_PORT_NO]) {
1568                 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1569
1570                 err = -EFBIG;
1571                 if (port_no >= DP_MAX_PORTS)
1572                         goto exit_unlock;
1573
1574                 vport = rtnl_dereference(dp->ports[port_no]);
1575                 err = -EBUSY;
1576                 if (vport)
1577                         goto exit_unlock;
1578         } else {
1579                 for (port_no = 1; ; port_no++) {
1580                         if (port_no >= DP_MAX_PORTS) {
1581                                 err = -EFBIG;
1582                                 goto exit_unlock;
1583                         }
1584                         vport = rtnl_dereference(dp->ports[port_no]);
1585                         if (!vport)
1586                                 break;
1587                 }
1588         }
1589
1590         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1591         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1592         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1593         parms.dp = dp;
1594         parms.port_no = port_no;
1595         parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1596
1597         vport = new_vport(&parms);
1598         err = PTR_ERR(vport);
1599         if (IS_ERR(vport))
1600                 goto exit_unlock;
1601
1602         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1603                                          OVS_VPORT_CMD_NEW);
1604         if (IS_ERR(reply)) {
1605                 err = PTR_ERR(reply);
1606                 ovs_dp_detach_port(vport);
1607                 goto exit_unlock;
1608         }
1609         genl_notify(reply, genl_info_net(info), info->snd_pid,
1610                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1611
1612 exit_unlock:
1613         rtnl_unlock();
1614 exit:
1615         return err;
1616 }
1617
1618 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1619 {
1620         struct nlattr **a = info->attrs;
1621         struct sk_buff *reply;
1622         struct vport *vport;
1623         int err;
1624
1625         rtnl_lock();
1626         vport = lookup_vport(info->userhdr, a);
1627         err = PTR_ERR(vport);
1628         if (IS_ERR(vport))
1629                 goto exit_unlock;
1630
1631         err = 0;
1632         if (a[OVS_VPORT_ATTR_TYPE] &&
1633             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
1634                 err = -EINVAL;
1635
1636         if (!err && a[OVS_VPORT_ATTR_OPTIONS])
1637                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1638         if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
1639                 vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1640
1641         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1642                                          OVS_VPORT_CMD_NEW);
1643         if (IS_ERR(reply)) {
1644                 err = PTR_ERR(reply);
1645                 netlink_set_err(init_net.genl_sock, 0,
1646                                 ovs_dp_vport_multicast_group.id, err);
1647                 return 0;
1648         }
1649
1650         genl_notify(reply, genl_info_net(info), info->snd_pid,
1651                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1652
1653 exit_unlock:
1654         rtnl_unlock();
1655         return err;
1656 }
1657
1658 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1659 {
1660         struct nlattr **a = info->attrs;
1661         struct sk_buff *reply;
1662         struct vport *vport;
1663         int err;
1664
1665         rtnl_lock();
1666         vport = lookup_vport(info->userhdr, a);
1667         err = PTR_ERR(vport);
1668         if (IS_ERR(vport))
1669                 goto exit_unlock;
1670
1671         if (vport->port_no == OVSP_LOCAL) {
1672                 err = -EINVAL;
1673                 goto exit_unlock;
1674         }
1675
1676         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1677                                          OVS_VPORT_CMD_DEL);
1678         err = PTR_ERR(reply);
1679         if (IS_ERR(reply))
1680                 goto exit_unlock;
1681
1682         ovs_dp_detach_port(vport);
1683
1684         genl_notify(reply, genl_info_net(info), info->snd_pid,
1685                     ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1686
1687 exit_unlock:
1688         rtnl_unlock();
1689         return err;
1690 }
1691
1692 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1693 {
1694         struct nlattr **a = info->attrs;
1695         struct ovs_header *ovs_header = info->userhdr;
1696         struct sk_buff *reply;
1697         struct vport *vport;
1698         int err;
1699
1700         rcu_read_lock();
1701         vport = lookup_vport(ovs_header, a);
1702         err = PTR_ERR(vport);
1703         if (IS_ERR(vport))
1704                 goto exit_unlock;
1705
1706         reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
1707                                          OVS_VPORT_CMD_NEW);
1708         err = PTR_ERR(reply);
1709         if (IS_ERR(reply))
1710                 goto exit_unlock;
1711
1712         rcu_read_unlock();
1713
1714         return genlmsg_reply(reply, info);
1715
1716 exit_unlock:
1717         rcu_read_unlock();
1718         return err;
1719 }
1720
1721 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1722 {
1723         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1724         struct datapath *dp;
1725         u32 port_no;
1726         int retval;
1727
1728         dp = get_dp(ovs_header->dp_ifindex);
1729         if (!dp)
1730                 return -ENODEV;
1731
1732         rcu_read_lock();
1733         for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
1734                 struct vport *vport;
1735
1736                 vport = rcu_dereference(dp->ports[port_no]);
1737                 if (!vport)
1738                         continue;
1739
1740                 if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
1741                                             cb->nlh->nlmsg_seq, NLM_F_MULTI,
1742                                             OVS_VPORT_CMD_NEW) < 0)
1743                         break;
1744         }
1745         rcu_read_unlock();
1746
1747         cb->args[0] = port_no;
1748         retval = skb->len;
1749
1750         return retval;
1751 }
1752
1753 static void rehash_flow_table(struct work_struct *work)
1754 {
1755         struct datapath *dp;
1756
1757         genl_lock();
1758
1759         list_for_each_entry(dp, &dps, list_node) {
1760                 struct flow_table *old_table = genl_dereference(dp->table);
1761                 struct flow_table *new_table;
1762
1763                 new_table = ovs_flow_tbl_rehash(old_table);
1764                 if (!IS_ERR(new_table)) {
1765                         rcu_assign_pointer(dp->table, new_table);
1766                         ovs_flow_tbl_deferred_destroy(old_table);
1767                 }
1768         }
1769
1770         genl_unlock();
1771
1772         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
1773 }
1774
1775 static struct genl_ops dp_vport_genl_ops[] = {
1776         { .cmd = OVS_VPORT_CMD_NEW,
1777           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1778           .policy = vport_policy,
1779           .doit = ovs_vport_cmd_new
1780         },
1781         { .cmd = OVS_VPORT_CMD_DEL,
1782           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1783           .policy = vport_policy,
1784           .doit = ovs_vport_cmd_del
1785         },
1786         { .cmd = OVS_VPORT_CMD_GET,
1787           .flags = 0,               /* OK for unprivileged users. */
1788           .policy = vport_policy,
1789           .doit = ovs_vport_cmd_get,
1790           .dumpit = ovs_vport_cmd_dump
1791         },
1792         { .cmd = OVS_VPORT_CMD_SET,
1793           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1794           .policy = vport_policy,
1795           .doit = ovs_vport_cmd_set,
1796         },
1797 };
1798
1799 struct genl_family_and_ops {
1800         struct genl_family *family;
1801         struct genl_ops *ops;
1802         int n_ops;
1803         struct genl_multicast_group *group;
1804 };
1805
1806 static const struct genl_family_and_ops dp_genl_families[] = {
1807         { &dp_datapath_genl_family,
1808           dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
1809           &ovs_dp_datapath_multicast_group },
1810         { &dp_vport_genl_family,
1811           dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
1812           &ovs_dp_vport_multicast_group },
1813         { &dp_flow_genl_family,
1814           dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
1815           &ovs_dp_flow_multicast_group },
1816         { &dp_packet_genl_family,
1817           dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
1818           NULL },
1819 };
1820
1821 static void dp_unregister_genl(int n_families)
1822 {
1823         int i;
1824
1825         for (i = 0; i < n_families; i++)
1826                 genl_unregister_family(dp_genl_families[i].family);
1827 }
1828
1829 static int dp_register_genl(void)
1830 {
1831         int n_registered;
1832         int err;
1833         int i;
1834
1835         n_registered = 0;
1836         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
1837                 const struct genl_family_and_ops *f = &dp_genl_families[i];
1838
1839                 err = genl_register_family_with_ops(f->family, f->ops,
1840                                                     f->n_ops);
1841                 if (err)
1842                         goto error;
1843                 n_registered++;
1844
1845                 if (f->group) {
1846                         err = genl_register_mc_group(f->family, f->group);
1847                         if (err)
1848                                 goto error;
1849                 }
1850         }
1851
1852         return 0;
1853
1854 error:
1855         dp_unregister_genl(n_registered);
1856         return err;
1857 }
1858
1859 static int __init dp_init(void)
1860 {
1861         struct sk_buff *dummy_skb;
1862         int err;
1863
1864         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
1865
1866         pr_info("Open vSwitch switching datapath\n");
1867
1868         err = ovs_flow_init();
1869         if (err)
1870                 goto error;
1871
1872         err = ovs_vport_init();
1873         if (err)
1874                 goto error_flow_exit;
1875
1876         err = register_netdevice_notifier(&ovs_dp_device_notifier);
1877         if (err)
1878                 goto error_vport_exit;
1879
1880         err = dp_register_genl();
1881         if (err < 0)
1882                 goto error_unreg_notifier;
1883
1884         schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
1885
1886         return 0;
1887
1888 error_unreg_notifier:
1889         unregister_netdevice_notifier(&ovs_dp_device_notifier);
1890 error_vport_exit:
1891         ovs_vport_exit();
1892 error_flow_exit:
1893         ovs_flow_exit();
1894 error:
1895         return err;
1896 }
1897
1898 static void dp_cleanup(void)
1899 {
1900         cancel_delayed_work_sync(&rehash_flow_wq);
1901         rcu_barrier();
1902         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
1903         unregister_netdevice_notifier(&ovs_dp_device_notifier);
1904         ovs_vport_exit();
1905         ovs_flow_exit();
1906 }
1907
1908 module_init(dp_init);
1909 module_exit(dp_cleanup);
1910
1911 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1912 MODULE_LICENSE("GPL");