rocker: put port in FORWADING state after leaving bridge
[muen/linux.git] / drivers / net / ethernet / rocker / rocker.c
1 /*
2  * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3  * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <net/switchdev.h>
34 #include <net/rtnetlink.h>
35 #include <asm-generic/io-64-nonatomic-lo-hi.h>
36 #include <generated/utsrelease.h>
37
38 #include "rocker.h"
39
40 static const char rocker_driver_name[] = "rocker";
41
42 static const struct pci_device_id rocker_pci_id_table[] = {
43         {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
44         {0, }
45 };
46
47 struct rocker_flow_tbl_key {
48         u32 priority;
49         enum rocker_of_dpa_table_id tbl_id;
50         union {
51                 struct {
52                         u32 in_pport;
53                         u32 in_pport_mask;
54                         enum rocker_of_dpa_table_id goto_tbl;
55                 } ig_port;
56                 struct {
57                         u32 in_pport;
58                         __be16 vlan_id;
59                         __be16 vlan_id_mask;
60                         enum rocker_of_dpa_table_id goto_tbl;
61                         bool untagged;
62                         __be16 new_vlan_id;
63                 } vlan;
64                 struct {
65                         u32 in_pport;
66                         u32 in_pport_mask;
67                         __be16 eth_type;
68                         u8 eth_dst[ETH_ALEN];
69                         u8 eth_dst_mask[ETH_ALEN];
70                         __be16 vlan_id;
71                         __be16 vlan_id_mask;
72                         enum rocker_of_dpa_table_id goto_tbl;
73                         bool copy_to_cpu;
74                 } term_mac;
75                 struct {
76                         __be16 eth_type;
77                         __be32 dst4;
78                         __be32 dst4_mask;
79                         enum rocker_of_dpa_table_id goto_tbl;
80                         u32 group_id;
81                 } ucast_routing;
82                 struct {
83                         u8 eth_dst[ETH_ALEN];
84                         u8 eth_dst_mask[ETH_ALEN];
85                         int has_eth_dst;
86                         int has_eth_dst_mask;
87                         __be16 vlan_id;
88                         u32 tunnel_id;
89                         enum rocker_of_dpa_table_id goto_tbl;
90                         u32 group_id;
91                         bool copy_to_cpu;
92                 } bridge;
93                 struct {
94                         u32 in_pport;
95                         u32 in_pport_mask;
96                         u8 eth_src[ETH_ALEN];
97                         u8 eth_src_mask[ETH_ALEN];
98                         u8 eth_dst[ETH_ALEN];
99                         u8 eth_dst_mask[ETH_ALEN];
100                         __be16 eth_type;
101                         __be16 vlan_id;
102                         __be16 vlan_id_mask;
103                         u8 ip_proto;
104                         u8 ip_proto_mask;
105                         u8 ip_tos;
106                         u8 ip_tos_mask;
107                         u32 group_id;
108                 } acl;
109         };
110 };
111
112 struct rocker_flow_tbl_entry {
113         struct hlist_node entry;
114         u32 ref_count;
115         u64 cookie;
116         struct rocker_flow_tbl_key key;
117         u32 key_crc32; /* key */
118 };
119
120 struct rocker_group_tbl_entry {
121         struct hlist_node entry;
122         u32 cmd;
123         u32 group_id; /* key */
124         u16 group_count;
125         u32 *group_ids;
126         union {
127                 struct {
128                         u8 pop_vlan;
129                 } l2_interface;
130                 struct {
131                         u8 eth_src[ETH_ALEN];
132                         u8 eth_dst[ETH_ALEN];
133                         __be16 vlan_id;
134                         u32 group_id;
135                 } l2_rewrite;
136                 struct {
137                         u8 eth_src[ETH_ALEN];
138                         u8 eth_dst[ETH_ALEN];
139                         __be16 vlan_id;
140                         bool ttl_check;
141                         u32 group_id;
142                 } l3_unicast;
143         };
144 };
145
146 struct rocker_fdb_tbl_entry {
147         struct hlist_node entry;
148         u32 key_crc32; /* key */
149         bool learned;
150         struct rocker_fdb_tbl_key {
151                 u32 pport;
152                 u8 addr[ETH_ALEN];
153                 __be16 vlan_id;
154         } key;
155 };
156
157 struct rocker_internal_vlan_tbl_entry {
158         struct hlist_node entry;
159         int ifindex; /* key */
160         u32 ref_count;
161         __be16 vlan_id;
162 };
163
164 struct rocker_desc_info {
165         char *data; /* mapped */
166         size_t data_size;
167         size_t tlv_size;
168         struct rocker_desc *desc;
169         DEFINE_DMA_UNMAP_ADDR(mapaddr);
170 };
171
172 struct rocker_dma_ring_info {
173         size_t size;
174         u32 head;
175         u32 tail;
176         struct rocker_desc *desc; /* mapped */
177         dma_addr_t mapaddr;
178         struct rocker_desc_info *desc_info;
179         unsigned int type;
180 };
181
182 struct rocker;
183
184 enum {
185         ROCKER_CTRL_LINK_LOCAL_MCAST,
186         ROCKER_CTRL_LOCAL_ARP,
187         ROCKER_CTRL_IPV4_MCAST,
188         ROCKER_CTRL_IPV6_MCAST,
189         ROCKER_CTRL_DFLT_BRIDGING,
190         ROCKER_CTRL_MAX,
191 };
192
193 #define ROCKER_INTERNAL_VLAN_ID_BASE    0x0f00
194 #define ROCKER_N_INTERNAL_VLANS         255
195 #define ROCKER_VLAN_BITMAP_LEN          BITS_TO_LONGS(VLAN_N_VID)
196 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
197
198 struct rocker_port {
199         struct net_device *dev;
200         struct net_device *bridge_dev;
201         struct rocker *rocker;
202         unsigned int port_number;
203         u32 pport;
204         __be16 internal_vlan_id;
205         int stp_state;
206         u32 brport_flags;
207         bool ctrls[ROCKER_CTRL_MAX];
208         unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
209         struct napi_struct napi_tx;
210         struct napi_struct napi_rx;
211         struct rocker_dma_ring_info tx_ring;
212         struct rocker_dma_ring_info rx_ring;
213 };
214
215 struct rocker {
216         struct pci_dev *pdev;
217         u8 __iomem *hw_addr;
218         struct msix_entry *msix_entries;
219         unsigned int port_count;
220         struct rocker_port **ports;
221         struct {
222                 u64 id;
223         } hw;
224         spinlock_t cmd_ring_lock;
225         struct rocker_dma_ring_info cmd_ring;
226         struct rocker_dma_ring_info event_ring;
227         DECLARE_HASHTABLE(flow_tbl, 16);
228         spinlock_t flow_tbl_lock;
229         u64 flow_tbl_next_cookie;
230         DECLARE_HASHTABLE(group_tbl, 16);
231         spinlock_t group_tbl_lock;
232         DECLARE_HASHTABLE(fdb_tbl, 16);
233         spinlock_t fdb_tbl_lock;
234         unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
235         DECLARE_HASHTABLE(internal_vlan_tbl, 8);
236         spinlock_t internal_vlan_tbl_lock;
237 };
238
239 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
240 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
241 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
242 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
243 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
244 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
245 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
246 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
247 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
248
249 /* Rocker priority levels for flow table entries.  Higher
250  * priority match takes precedence over lower priority match.
251  */
252
253 enum {
254         ROCKER_PRIORITY_UNKNOWN = 0,
255         ROCKER_PRIORITY_IG_PORT = 1,
256         ROCKER_PRIORITY_VLAN = 1,
257         ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
258         ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
259         ROCKER_PRIORITY_UNICAST_ROUTING = 1,
260         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
261         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
262         ROCKER_PRIORITY_BRIDGING_VLAN = 3,
263         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
264         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
265         ROCKER_PRIORITY_BRIDGING_TENANT = 3,
266         ROCKER_PRIORITY_ACL_CTRL = 3,
267         ROCKER_PRIORITY_ACL_NORMAL = 2,
268         ROCKER_PRIORITY_ACL_DFLT = 1,
269 };
270
271 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
272 {
273         u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
274         u16 end = 0xffe;
275         u16 _vlan_id = ntohs(vlan_id);
276
277         return (_vlan_id >= start && _vlan_id <= end);
278 }
279
280 static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
281                                       u16 vid, bool *pop_vlan)
282 {
283         __be16 vlan_id;
284
285         if (pop_vlan)
286                 *pop_vlan = false;
287         vlan_id = htons(vid);
288         if (!vlan_id) {
289                 vlan_id = rocker_port->internal_vlan_id;
290                 if (pop_vlan)
291                         *pop_vlan = true;
292         }
293
294         return vlan_id;
295 }
296
297 static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
298                                    __be16 vlan_id)
299 {
300         if (rocker_vlan_id_is_internal(vlan_id))
301                 return 0;
302
303         return ntohs(vlan_id);
304 }
305
306 static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
307 {
308         return !!rocker_port->bridge_dev;
309 }
310
311 struct rocker_wait {
312         wait_queue_head_t wait;
313         bool done;
314         bool nowait;
315 };
316
317 static void rocker_wait_reset(struct rocker_wait *wait)
318 {
319         wait->done = false;
320         wait->nowait = false;
321 }
322
323 static void rocker_wait_init(struct rocker_wait *wait)
324 {
325         init_waitqueue_head(&wait->wait);
326         rocker_wait_reset(wait);
327 }
328
329 static struct rocker_wait *rocker_wait_create(gfp_t gfp)
330 {
331         struct rocker_wait *wait;
332
333         wait = kmalloc(sizeof(*wait), gfp);
334         if (!wait)
335                 return NULL;
336         rocker_wait_init(wait);
337         return wait;
338 }
339
340 static void rocker_wait_destroy(struct rocker_wait *work)
341 {
342         kfree(work);
343 }
344
345 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
346                                       unsigned long timeout)
347 {
348         wait_event_timeout(wait->wait, wait->done, HZ / 10);
349         if (!wait->done)
350                 return false;
351         return true;
352 }
353
354 static void rocker_wait_wake_up(struct rocker_wait *wait)
355 {
356         wait->done = true;
357         wake_up(&wait->wait);
358 }
359
360 static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
361 {
362         return rocker->msix_entries[vector].vector;
363 }
364
365 static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
366 {
367         return rocker_msix_vector(rocker_port->rocker,
368                                   ROCKER_MSIX_VEC_TX(rocker_port->port_number));
369 }
370
371 static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
372 {
373         return rocker_msix_vector(rocker_port->rocker,
374                                   ROCKER_MSIX_VEC_RX(rocker_port->port_number));
375 }
376
377 #define rocker_write32(rocker, reg, val)        \
378         writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
379 #define rocker_read32(rocker, reg)      \
380         readl((rocker)->hw_addr + (ROCKER_ ## reg))
381 #define rocker_write64(rocker, reg, val)        \
382         writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
383 #define rocker_read64(rocker, reg)      \
384         readq((rocker)->hw_addr + (ROCKER_ ## reg))
385
386 /*****************************
387  * HW basic testing functions
388  *****************************/
389
390 static int rocker_reg_test(struct rocker *rocker)
391 {
392         struct pci_dev *pdev = rocker->pdev;
393         u64 test_reg;
394         u64 rnd;
395
396         rnd = prandom_u32();
397         rnd >>= 1;
398         rocker_write32(rocker, TEST_REG, rnd);
399         test_reg = rocker_read32(rocker, TEST_REG);
400         if (test_reg != rnd * 2) {
401                 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
402                         test_reg, rnd * 2);
403                 return -EIO;
404         }
405
406         rnd = prandom_u32();
407         rnd <<= 31;
408         rnd |= prandom_u32();
409         rocker_write64(rocker, TEST_REG64, rnd);
410         test_reg = rocker_read64(rocker, TEST_REG64);
411         if (test_reg != rnd * 2) {
412                 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
413                         test_reg, rnd * 2);
414                 return -EIO;
415         }
416
417         return 0;
418 }
419
420 static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
421                                u32 test_type, dma_addr_t dma_handle,
422                                unsigned char *buf, unsigned char *expect,
423                                size_t size)
424 {
425         struct pci_dev *pdev = rocker->pdev;
426         int i;
427
428         rocker_wait_reset(wait);
429         rocker_write32(rocker, TEST_DMA_CTRL, test_type);
430
431         if (!rocker_wait_event_timeout(wait, HZ / 10)) {
432                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
433                 return -EIO;
434         }
435
436         for (i = 0; i < size; i++) {
437                 if (buf[i] != expect[i]) {
438                         dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
439                                 buf[i], i, expect[i]);
440                         return -EIO;
441                 }
442         }
443         return 0;
444 }
445
446 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
447 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
448
449 static int rocker_dma_test_offset(struct rocker *rocker,
450                                   struct rocker_wait *wait, int offset)
451 {
452         struct pci_dev *pdev = rocker->pdev;
453         unsigned char *alloc;
454         unsigned char *buf;
455         unsigned char *expect;
456         dma_addr_t dma_handle;
457         int i;
458         int err;
459
460         alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
461                         GFP_KERNEL | GFP_DMA);
462         if (!alloc)
463                 return -ENOMEM;
464         buf = alloc + offset;
465         expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
466
467         dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
468                                     PCI_DMA_BIDIRECTIONAL);
469         if (pci_dma_mapping_error(pdev, dma_handle)) {
470                 err = -EIO;
471                 goto free_alloc;
472         }
473
474         rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
475         rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
476
477         memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
478         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
479                                   dma_handle, buf, expect,
480                                   ROCKER_TEST_DMA_BUF_SIZE);
481         if (err)
482                 goto unmap;
483
484         memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
485         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
486                                   dma_handle, buf, expect,
487                                   ROCKER_TEST_DMA_BUF_SIZE);
488         if (err)
489                 goto unmap;
490
491         prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
492         for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
493                 expect[i] = ~buf[i];
494         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
495                                   dma_handle, buf, expect,
496                                   ROCKER_TEST_DMA_BUF_SIZE);
497         if (err)
498                 goto unmap;
499
500 unmap:
501         pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
502                          PCI_DMA_BIDIRECTIONAL);
503 free_alloc:
504         kfree(alloc);
505
506         return err;
507 }
508
509 static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
510 {
511         int i;
512         int err;
513
514         for (i = 0; i < 8; i++) {
515                 err = rocker_dma_test_offset(rocker, wait, i);
516                 if (err)
517                         return err;
518         }
519         return 0;
520 }
521
522 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
523 {
524         struct rocker_wait *wait = dev_id;
525
526         rocker_wait_wake_up(wait);
527
528         return IRQ_HANDLED;
529 }
530
531 static int rocker_basic_hw_test(struct rocker *rocker)
532 {
533         struct pci_dev *pdev = rocker->pdev;
534         struct rocker_wait wait;
535         int err;
536
537         err = rocker_reg_test(rocker);
538         if (err) {
539                 dev_err(&pdev->dev, "reg test failed\n");
540                 return err;
541         }
542
543         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
544                           rocker_test_irq_handler, 0,
545                           rocker_driver_name, &wait);
546         if (err) {
547                 dev_err(&pdev->dev, "cannot assign test irq\n");
548                 return err;
549         }
550
551         rocker_wait_init(&wait);
552         rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
553
554         if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
555                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
556                 err = -EIO;
557                 goto free_irq;
558         }
559
560         err = rocker_dma_test(rocker, &wait);
561         if (err)
562                 dev_err(&pdev->dev, "dma test failed\n");
563
564 free_irq:
565         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
566         return err;
567 }
568
569 /******
570  * TLV
571  ******/
572
573 #define ROCKER_TLV_ALIGNTO 8U
574 #define ROCKER_TLV_ALIGN(len) \
575         (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
576 #define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
577
578 /*  <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
579  * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
580  * |             Header          | Pad |           Payload           | Pad |
581  * |      (struct rocker_tlv)    | ing |                             | ing |
582  * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
583  *  <--------------------------- tlv->len -------------------------->
584  */
585
586 static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
587                                           int *remaining)
588 {
589         int totlen = ROCKER_TLV_ALIGN(tlv->len);
590
591         *remaining -= totlen;
592         return (struct rocker_tlv *) ((char *) tlv + totlen);
593 }
594
595 static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
596 {
597         return remaining >= (int) ROCKER_TLV_HDRLEN &&
598                tlv->len >= ROCKER_TLV_HDRLEN &&
599                tlv->len <= remaining;
600 }
601
602 #define rocker_tlv_for_each(pos, head, len, rem)        \
603         for (pos = head, rem = len;                     \
604              rocker_tlv_ok(pos, rem);                   \
605              pos = rocker_tlv_next(pos, &(rem)))
606
607 #define rocker_tlv_for_each_nested(pos, tlv, rem)       \
608         rocker_tlv_for_each(pos, rocker_tlv_data(tlv),  \
609                             rocker_tlv_len(tlv), rem)
610
611 static int rocker_tlv_attr_size(int payload)
612 {
613         return ROCKER_TLV_HDRLEN + payload;
614 }
615
616 static int rocker_tlv_total_size(int payload)
617 {
618         return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
619 }
620
621 static int rocker_tlv_padlen(int payload)
622 {
623         return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
624 }
625
626 static int rocker_tlv_type(const struct rocker_tlv *tlv)
627 {
628         return tlv->type;
629 }
630
631 static void *rocker_tlv_data(const struct rocker_tlv *tlv)
632 {
633         return (char *) tlv + ROCKER_TLV_HDRLEN;
634 }
635
636 static int rocker_tlv_len(const struct rocker_tlv *tlv)
637 {
638         return tlv->len - ROCKER_TLV_HDRLEN;
639 }
640
641 static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
642 {
643         return *(u8 *) rocker_tlv_data(tlv);
644 }
645
646 static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
647 {
648         return *(u16 *) rocker_tlv_data(tlv);
649 }
650
651 static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
652 {
653         return *(__be16 *) rocker_tlv_data(tlv);
654 }
655
656 static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
657 {
658         return *(u32 *) rocker_tlv_data(tlv);
659 }
660
661 static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
662 {
663         return *(u64 *) rocker_tlv_data(tlv);
664 }
665
666 static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
667                              const char *buf, int buf_len)
668 {
669         const struct rocker_tlv *tlv;
670         const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
671         int rem;
672
673         memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
674
675         rocker_tlv_for_each(tlv, head, buf_len, rem) {
676                 u32 type = rocker_tlv_type(tlv);
677
678                 if (type > 0 && type <= maxtype)
679                         tb[type] = (struct rocker_tlv *) tlv;
680         }
681 }
682
683 static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
684                                     const struct rocker_tlv *tlv)
685 {
686         rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
687                          rocker_tlv_len(tlv));
688 }
689
690 static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
691                                   struct rocker_desc_info *desc_info)
692 {
693         rocker_tlv_parse(tb, maxtype, desc_info->data,
694                          desc_info->desc->tlv_size);
695 }
696
697 static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
698 {
699         return (struct rocker_tlv *) ((char *) desc_info->data +
700                                                desc_info->tlv_size);
701 }
702
703 static int rocker_tlv_put(struct rocker_desc_info *desc_info,
704                           int attrtype, int attrlen, const void *data)
705 {
706         int tail_room = desc_info->data_size - desc_info->tlv_size;
707         int total_size = rocker_tlv_total_size(attrlen);
708         struct rocker_tlv *tlv;
709
710         if (unlikely(tail_room < total_size))
711                 return -EMSGSIZE;
712
713         tlv = rocker_tlv_start(desc_info);
714         desc_info->tlv_size += total_size;
715         tlv->type = attrtype;
716         tlv->len = rocker_tlv_attr_size(attrlen);
717         memcpy(rocker_tlv_data(tlv), data, attrlen);
718         memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
719         return 0;
720 }
721
722 static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
723                              int attrtype, u8 value)
724 {
725         return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
726 }
727
728 static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
729                               int attrtype, u16 value)
730 {
731         return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
732 }
733
734 static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
735                                int attrtype, __be16 value)
736 {
737         return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
738 }
739
740 static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
741                               int attrtype, u32 value)
742 {
743         return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
744 }
745
746 static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
747                                int attrtype, __be32 value)
748 {
749         return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
750 }
751
752 static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
753                               int attrtype, u64 value)
754 {
755         return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
756 }
757
758 static struct rocker_tlv *
759 rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
760 {
761         struct rocker_tlv *start = rocker_tlv_start(desc_info);
762
763         if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
764                 return NULL;
765
766         return start;
767 }
768
769 static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
770                                 struct rocker_tlv *start)
771 {
772         start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
773 }
774
775 static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
776                                    struct rocker_tlv *start)
777 {
778         desc_info->tlv_size = (char *) start - desc_info->data;
779 }
780
781 /******************************************
782  * DMA rings and descriptors manipulations
783  ******************************************/
784
785 static u32 __pos_inc(u32 pos, size_t limit)
786 {
787         return ++pos == limit ? 0 : pos;
788 }
789
790 static int rocker_desc_err(struct rocker_desc_info *desc_info)
791 {
792         int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
793
794         switch (err) {
795         case ROCKER_OK:
796                 return 0;
797         case -ROCKER_ENOENT:
798                 return -ENOENT;
799         case -ROCKER_ENXIO:
800                 return -ENXIO;
801         case -ROCKER_ENOMEM:
802                 return -ENOMEM;
803         case -ROCKER_EEXIST:
804                 return -EEXIST;
805         case -ROCKER_EINVAL:
806                 return -EINVAL;
807         case -ROCKER_EMSGSIZE:
808                 return -EMSGSIZE;
809         case -ROCKER_ENOTSUP:
810                 return -EOPNOTSUPP;
811         case -ROCKER_ENOBUFS:
812                 return -ENOBUFS;
813         }
814
815         return -EINVAL;
816 }
817
818 static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
819 {
820         desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
821 }
822
823 static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
824 {
825         u32 comp_err = desc_info->desc->comp_err;
826
827         return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
828 }
829
830 static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
831 {
832         return (void *)(uintptr_t)desc_info->desc->cookie;
833 }
834
835 static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
836                                        void *ptr)
837 {
838         desc_info->desc->cookie = (uintptr_t) ptr;
839 }
840
841 static struct rocker_desc_info *
842 rocker_desc_head_get(struct rocker_dma_ring_info *info)
843 {
844         static struct rocker_desc_info *desc_info;
845         u32 head = __pos_inc(info->head, info->size);
846
847         desc_info = &info->desc_info[info->head];
848         if (head == info->tail)
849                 return NULL; /* ring full */
850         desc_info->tlv_size = 0;
851         return desc_info;
852 }
853
854 static void rocker_desc_commit(struct rocker_desc_info *desc_info)
855 {
856         desc_info->desc->buf_size = desc_info->data_size;
857         desc_info->desc->tlv_size = desc_info->tlv_size;
858 }
859
860 static void rocker_desc_head_set(struct rocker *rocker,
861                                  struct rocker_dma_ring_info *info,
862                                  struct rocker_desc_info *desc_info)
863 {
864         u32 head = __pos_inc(info->head, info->size);
865
866         BUG_ON(head == info->tail);
867         rocker_desc_commit(desc_info);
868         info->head = head;
869         rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
870 }
871
872 static struct rocker_desc_info *
873 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
874 {
875         static struct rocker_desc_info *desc_info;
876
877         if (info->tail == info->head)
878                 return NULL; /* nothing to be done between head and tail */
879         desc_info = &info->desc_info[info->tail];
880         if (!rocker_desc_gen(desc_info))
881                 return NULL; /* gen bit not set, desc is not ready yet */
882         info->tail = __pos_inc(info->tail, info->size);
883         desc_info->tlv_size = desc_info->desc->tlv_size;
884         return desc_info;
885 }
886
887 static void rocker_dma_ring_credits_set(struct rocker *rocker,
888                                         struct rocker_dma_ring_info *info,
889                                         u32 credits)
890 {
891         if (credits)
892                 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
893 }
894
895 static unsigned long rocker_dma_ring_size_fix(size_t size)
896 {
897         return max(ROCKER_DMA_SIZE_MIN,
898                    min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
899 }
900
901 static int rocker_dma_ring_create(struct rocker *rocker,
902                                   unsigned int type,
903                                   size_t size,
904                                   struct rocker_dma_ring_info *info)
905 {
906         int i;
907
908         BUG_ON(size != rocker_dma_ring_size_fix(size));
909         info->size = size;
910         info->type = type;
911         info->head = 0;
912         info->tail = 0;
913         info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
914                                   GFP_KERNEL);
915         if (!info->desc_info)
916                 return -ENOMEM;
917
918         info->desc = pci_alloc_consistent(rocker->pdev,
919                                           info->size * sizeof(*info->desc),
920                                           &info->mapaddr);
921         if (!info->desc) {
922                 kfree(info->desc_info);
923                 return -ENOMEM;
924         }
925
926         for (i = 0; i < info->size; i++)
927                 info->desc_info[i].desc = &info->desc[i];
928
929         rocker_write32(rocker, DMA_DESC_CTRL(info->type),
930                        ROCKER_DMA_DESC_CTRL_RESET);
931         rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
932         rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
933
934         return 0;
935 }
936
937 static void rocker_dma_ring_destroy(struct rocker *rocker,
938                                     struct rocker_dma_ring_info *info)
939 {
940         rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
941
942         pci_free_consistent(rocker->pdev,
943                             info->size * sizeof(struct rocker_desc),
944                             info->desc, info->mapaddr);
945         kfree(info->desc_info);
946 }
947
948 static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
949                                              struct rocker_dma_ring_info *info)
950 {
951         int i;
952
953         BUG_ON(info->head || info->tail);
954
955         /* When ring is consumer, we need to advance head for each desc.
956          * That tells hw that the desc is ready to be used by it.
957          */
958         for (i = 0; i < info->size - 1; i++)
959                 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
960         rocker_desc_commit(&info->desc_info[i]);
961 }
962
963 static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
964                                       struct rocker_dma_ring_info *info,
965                                       int direction, size_t buf_size)
966 {
967         struct pci_dev *pdev = rocker->pdev;
968         int i;
969         int err;
970
971         for (i = 0; i < info->size; i++) {
972                 struct rocker_desc_info *desc_info = &info->desc_info[i];
973                 struct rocker_desc *desc = &info->desc[i];
974                 dma_addr_t dma_handle;
975                 char *buf;
976
977                 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
978                 if (!buf) {
979                         err = -ENOMEM;
980                         goto rollback;
981                 }
982
983                 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
984                 if (pci_dma_mapping_error(pdev, dma_handle)) {
985                         kfree(buf);
986                         err = -EIO;
987                         goto rollback;
988                 }
989
990                 desc_info->data = buf;
991                 desc_info->data_size = buf_size;
992                 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
993
994                 desc->buf_addr = dma_handle;
995                 desc->buf_size = buf_size;
996         }
997         return 0;
998
999 rollback:
1000         for (i--; i >= 0; i--) {
1001                 struct rocker_desc_info *desc_info = &info->desc_info[i];
1002
1003                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1004                                  desc_info->data_size, direction);
1005                 kfree(desc_info->data);
1006         }
1007         return err;
1008 }
1009
1010 static void rocker_dma_ring_bufs_free(struct rocker *rocker,
1011                                       struct rocker_dma_ring_info *info,
1012                                       int direction)
1013 {
1014         struct pci_dev *pdev = rocker->pdev;
1015         int i;
1016
1017         for (i = 0; i < info->size; i++) {
1018                 struct rocker_desc_info *desc_info = &info->desc_info[i];
1019                 struct rocker_desc *desc = &info->desc[i];
1020
1021                 desc->buf_addr = 0;
1022                 desc->buf_size = 0;
1023                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1024                                  desc_info->data_size, direction);
1025                 kfree(desc_info->data);
1026         }
1027 }
1028
1029 static int rocker_dma_rings_init(struct rocker *rocker)
1030 {
1031         struct pci_dev *pdev = rocker->pdev;
1032         int err;
1033
1034         err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1035                                      ROCKER_DMA_CMD_DEFAULT_SIZE,
1036                                      &rocker->cmd_ring);
1037         if (err) {
1038                 dev_err(&pdev->dev, "failed to create command dma ring\n");
1039                 return err;
1040         }
1041
1042         spin_lock_init(&rocker->cmd_ring_lock);
1043
1044         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1045                                          PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1046         if (err) {
1047                 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1048                 goto err_dma_cmd_ring_bufs_alloc;
1049         }
1050
1051         err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1052                                      ROCKER_DMA_EVENT_DEFAULT_SIZE,
1053                                      &rocker->event_ring);
1054         if (err) {
1055                 dev_err(&pdev->dev, "failed to create event dma ring\n");
1056                 goto err_dma_event_ring_create;
1057         }
1058
1059         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1060                                          PCI_DMA_FROMDEVICE, PAGE_SIZE);
1061         if (err) {
1062                 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1063                 goto err_dma_event_ring_bufs_alloc;
1064         }
1065         rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1066         return 0;
1067
1068 err_dma_event_ring_bufs_alloc:
1069         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1070 err_dma_event_ring_create:
1071         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1072                                   PCI_DMA_BIDIRECTIONAL);
1073 err_dma_cmd_ring_bufs_alloc:
1074         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1075         return err;
1076 }
1077
1078 static void rocker_dma_rings_fini(struct rocker *rocker)
1079 {
1080         rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1081                                   PCI_DMA_BIDIRECTIONAL);
1082         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1083         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1084                                   PCI_DMA_BIDIRECTIONAL);
1085         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1086 }
1087
1088 static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
1089                                       struct rocker_port *rocker_port,
1090                                       struct rocker_desc_info *desc_info,
1091                                       struct sk_buff *skb, size_t buf_len)
1092 {
1093         struct pci_dev *pdev = rocker->pdev;
1094         dma_addr_t dma_handle;
1095
1096         dma_handle = pci_map_single(pdev, skb->data, buf_len,
1097                                     PCI_DMA_FROMDEVICE);
1098         if (pci_dma_mapping_error(pdev, dma_handle))
1099                 return -EIO;
1100         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1101                 goto tlv_put_failure;
1102         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1103                 goto tlv_put_failure;
1104         return 0;
1105
1106 tlv_put_failure:
1107         pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1108         desc_info->tlv_size = 0;
1109         return -EMSGSIZE;
1110 }
1111
1112 static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
1113 {
1114         return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1115 }
1116
1117 static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
1118                                         struct rocker_port *rocker_port,
1119                                         struct rocker_desc_info *desc_info)
1120 {
1121         struct net_device *dev = rocker_port->dev;
1122         struct sk_buff *skb;
1123         size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1124         int err;
1125
1126         /* Ensure that hw will see tlv_size zero in case of an error.
1127          * That tells hw to use another descriptor.
1128          */
1129         rocker_desc_cookie_ptr_set(desc_info, NULL);
1130         desc_info->tlv_size = 0;
1131
1132         skb = netdev_alloc_skb_ip_align(dev, buf_len);
1133         if (!skb)
1134                 return -ENOMEM;
1135         err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
1136                                          skb, buf_len);
1137         if (err) {
1138                 dev_kfree_skb_any(skb);
1139                 return err;
1140         }
1141         rocker_desc_cookie_ptr_set(desc_info, skb);
1142         return 0;
1143 }
1144
1145 static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
1146                                          struct rocker_tlv **attrs)
1147 {
1148         struct pci_dev *pdev = rocker->pdev;
1149         dma_addr_t dma_handle;
1150         size_t len;
1151
1152         if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1153             !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1154                 return;
1155         dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1156         len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1157         pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1158 }
1159
1160 static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
1161                                         struct rocker_desc_info *desc_info)
1162 {
1163         struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1164         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1165
1166         if (!skb)
1167                 return;
1168         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1169         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1170         dev_kfree_skb_any(skb);
1171 }
1172
1173 static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
1174                                          struct rocker_port *rocker_port)
1175 {
1176         struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1177         int i;
1178         int err;
1179
1180         for (i = 0; i < rx_ring->size; i++) {
1181                 err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
1182                                                    &rx_ring->desc_info[i]);
1183                 if (err)
1184                         goto rollback;
1185         }
1186         return 0;
1187
1188 rollback:
1189         for (i--; i >= 0; i--)
1190                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1191         return err;
1192 }
1193
1194 static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
1195                                          struct rocker_port *rocker_port)
1196 {
1197         struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1198         int i;
1199
1200         for (i = 0; i < rx_ring->size; i++)
1201                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1202 }
1203
1204 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1205 {
1206         struct rocker *rocker = rocker_port->rocker;
1207         int err;
1208
1209         err = rocker_dma_ring_create(rocker,
1210                                      ROCKER_DMA_TX(rocker_port->port_number),
1211                                      ROCKER_DMA_TX_DEFAULT_SIZE,
1212                                      &rocker_port->tx_ring);
1213         if (err) {
1214                 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1215                 return err;
1216         }
1217
1218         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1219                                          PCI_DMA_TODEVICE,
1220                                          ROCKER_DMA_TX_DESC_SIZE);
1221         if (err) {
1222                 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1223                 goto err_dma_tx_ring_bufs_alloc;
1224         }
1225
1226         err = rocker_dma_ring_create(rocker,
1227                                      ROCKER_DMA_RX(rocker_port->port_number),
1228                                      ROCKER_DMA_RX_DEFAULT_SIZE,
1229                                      &rocker_port->rx_ring);
1230         if (err) {
1231                 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1232                 goto err_dma_rx_ring_create;
1233         }
1234
1235         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1236                                          PCI_DMA_BIDIRECTIONAL,
1237                                          ROCKER_DMA_RX_DESC_SIZE);
1238         if (err) {
1239                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1240                 goto err_dma_rx_ring_bufs_alloc;
1241         }
1242
1243         err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
1244         if (err) {
1245                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1246                 goto err_dma_rx_ring_skbs_alloc;
1247         }
1248         rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1249
1250         return 0;
1251
1252 err_dma_rx_ring_skbs_alloc:
1253         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1254                                   PCI_DMA_BIDIRECTIONAL);
1255 err_dma_rx_ring_bufs_alloc:
1256         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1257 err_dma_rx_ring_create:
1258         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1259                                   PCI_DMA_TODEVICE);
1260 err_dma_tx_ring_bufs_alloc:
1261         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1262         return err;
1263 }
1264
1265 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1266 {
1267         struct rocker *rocker = rocker_port->rocker;
1268
1269         rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
1270         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1271                                   PCI_DMA_BIDIRECTIONAL);
1272         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1273         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1274                                   PCI_DMA_TODEVICE);
1275         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1276 }
1277
1278 static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1279 {
1280         u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1281
1282         if (enable)
1283                 val |= 1 << rocker_port->pport;
1284         else
1285                 val &= ~(1 << rocker_port->pport);
1286         rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1287 }
1288
1289 /********************************
1290  * Interrupt handler and helpers
1291  ********************************/
1292
1293 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1294 {
1295         struct rocker *rocker = dev_id;
1296         struct rocker_desc_info *desc_info;
1297         struct rocker_wait *wait;
1298         u32 credits = 0;
1299
1300         spin_lock(&rocker->cmd_ring_lock);
1301         while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1302                 wait = rocker_desc_cookie_ptr_get(desc_info);
1303                 if (wait->nowait) {
1304                         rocker_desc_gen_clear(desc_info);
1305                         rocker_wait_destroy(wait);
1306                 } else {
1307                         rocker_wait_wake_up(wait);
1308                 }
1309                 credits++;
1310         }
1311         spin_unlock(&rocker->cmd_ring_lock);
1312         rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1313
1314         return IRQ_HANDLED;
1315 }
1316
1317 static void rocker_port_link_up(struct rocker_port *rocker_port)
1318 {
1319         netif_carrier_on(rocker_port->dev);
1320         netdev_info(rocker_port->dev, "Link is up\n");
1321 }
1322
1323 static void rocker_port_link_down(struct rocker_port *rocker_port)
1324 {
1325         netif_carrier_off(rocker_port->dev);
1326         netdev_info(rocker_port->dev, "Link is down\n");
1327 }
1328
1329 static int rocker_event_link_change(struct rocker *rocker,
1330                                     const struct rocker_tlv *info)
1331 {
1332         struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1333         unsigned int port_number;
1334         bool link_up;
1335         struct rocker_port *rocker_port;
1336
1337         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1338         if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1339             !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1340                 return -EIO;
1341         port_number =
1342                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1343         link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1344
1345         if (port_number >= rocker->port_count)
1346                 return -EINVAL;
1347
1348         rocker_port = rocker->ports[port_number];
1349         if (netif_carrier_ok(rocker_port->dev) != link_up) {
1350                 if (link_up)
1351                         rocker_port_link_up(rocker_port);
1352                 else
1353                         rocker_port_link_down(rocker_port);
1354         }
1355
1356         return 0;
1357 }
1358
1359 #define ROCKER_OP_FLAG_REMOVE           BIT(0)
1360 #define ROCKER_OP_FLAG_NOWAIT           BIT(1)
1361 #define ROCKER_OP_FLAG_LEARNED          BIT(2)
1362 #define ROCKER_OP_FLAG_REFRESH          BIT(3)
1363
1364 static int rocker_port_fdb(struct rocker_port *rocker_port,
1365                            const unsigned char *addr,
1366                            __be16 vlan_id, int flags);
1367
1368 static int rocker_event_mac_vlan_seen(struct rocker *rocker,
1369                                       const struct rocker_tlv *info)
1370 {
1371         struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1372         unsigned int port_number;
1373         struct rocker_port *rocker_port;
1374         unsigned char *addr;
1375         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1376         __be16 vlan_id;
1377
1378         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1379         if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1380             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1381             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1382                 return -EIO;
1383         port_number =
1384                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1385         addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1386         vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1387
1388         if (port_number >= rocker->port_count)
1389                 return -EINVAL;
1390
1391         rocker_port = rocker->ports[port_number];
1392
1393         if (rocker_port->stp_state != BR_STATE_LEARNING &&
1394             rocker_port->stp_state != BR_STATE_FORWARDING)
1395                 return 0;
1396
1397         return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
1398 }
1399
1400 static int rocker_event_process(struct rocker *rocker,
1401                                 struct rocker_desc_info *desc_info)
1402 {
1403         struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1404         struct rocker_tlv *info;
1405         u16 type;
1406
1407         rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1408         if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1409             !attrs[ROCKER_TLV_EVENT_INFO])
1410                 return -EIO;
1411
1412         type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1413         info = attrs[ROCKER_TLV_EVENT_INFO];
1414
1415         switch (type) {
1416         case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1417                 return rocker_event_link_change(rocker, info);
1418         case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1419                 return rocker_event_mac_vlan_seen(rocker, info);
1420         }
1421
1422         return -EOPNOTSUPP;
1423 }
1424
1425 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1426 {
1427         struct rocker *rocker = dev_id;
1428         struct pci_dev *pdev = rocker->pdev;
1429         struct rocker_desc_info *desc_info;
1430         u32 credits = 0;
1431         int err;
1432
1433         while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1434                 err = rocker_desc_err(desc_info);
1435                 if (err) {
1436                         dev_err(&pdev->dev, "event desc received with err %d\n",
1437                                 err);
1438                 } else {
1439                         err = rocker_event_process(rocker, desc_info);
1440                         if (err)
1441                                 dev_err(&pdev->dev, "event processing failed with err %d\n",
1442                                         err);
1443                 }
1444                 rocker_desc_gen_clear(desc_info);
1445                 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1446                 credits++;
1447         }
1448         rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1449
1450         return IRQ_HANDLED;
1451 }
1452
1453 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1454 {
1455         struct rocker_port *rocker_port = dev_id;
1456
1457         napi_schedule(&rocker_port->napi_tx);
1458         return IRQ_HANDLED;
1459 }
1460
1461 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1462 {
1463         struct rocker_port *rocker_port = dev_id;
1464
1465         napi_schedule(&rocker_port->napi_rx);
1466         return IRQ_HANDLED;
1467 }
1468
1469 /********************
1470  * Command interface
1471  ********************/
1472
1473 typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
1474                                struct rocker_port *rocker_port,
1475                                struct rocker_desc_info *desc_info,
1476                                void *priv);
1477
1478 static int rocker_cmd_exec(struct rocker *rocker,
1479                            struct rocker_port *rocker_port,
1480                            rocker_cmd_cb_t prepare, void *prepare_priv,
1481                            rocker_cmd_cb_t process, void *process_priv,
1482                            bool nowait)
1483 {
1484         struct rocker_desc_info *desc_info;
1485         struct rocker_wait *wait;
1486         unsigned long flags;
1487         int err;
1488
1489         wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
1490         if (!wait)
1491                 return -ENOMEM;
1492         wait->nowait = nowait;
1493
1494         spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
1495         desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1496         if (!desc_info) {
1497                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1498                 err = -EAGAIN;
1499                 goto out;
1500         }
1501         err = prepare(rocker, rocker_port, desc_info, prepare_priv);
1502         if (err) {
1503                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1504                 goto out;
1505         }
1506         rocker_desc_cookie_ptr_set(desc_info, wait);
1507         rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1508         spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1509
1510         if (nowait)
1511                 return 0;
1512
1513         if (!rocker_wait_event_timeout(wait, HZ / 10))
1514                 return -EIO;
1515
1516         err = rocker_desc_err(desc_info);
1517         if (err)
1518                 return err;
1519
1520         if (process)
1521                 err = process(rocker, rocker_port, desc_info, process_priv);
1522
1523         rocker_desc_gen_clear(desc_info);
1524 out:
1525         rocker_wait_destroy(wait);
1526         return err;
1527 }
1528
1529 static int
1530 rocker_cmd_get_port_settings_prep(struct rocker *rocker,
1531                                   struct rocker_port *rocker_port,
1532                                   struct rocker_desc_info *desc_info,
1533                                   void *priv)
1534 {
1535         struct rocker_tlv *cmd_info;
1536
1537         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1538                                ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1539                 return -EMSGSIZE;
1540         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1541         if (!cmd_info)
1542                 return -EMSGSIZE;
1543         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1544                                rocker_port->pport))
1545                 return -EMSGSIZE;
1546         rocker_tlv_nest_end(desc_info, cmd_info);
1547         return 0;
1548 }
1549
1550 static int
1551 rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
1552                                           struct rocker_port *rocker_port,
1553                                           struct rocker_desc_info *desc_info,
1554                                           void *priv)
1555 {
1556         struct ethtool_cmd *ecmd = priv;
1557         struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1558         struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1559         u32 speed;
1560         u8 duplex;
1561         u8 autoneg;
1562
1563         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1564         if (!attrs[ROCKER_TLV_CMD_INFO])
1565                 return -EIO;
1566
1567         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1568                                 attrs[ROCKER_TLV_CMD_INFO]);
1569         if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1570             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1571             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1572                 return -EIO;
1573
1574         speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1575         duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1576         autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1577
1578         ecmd->transceiver = XCVR_INTERNAL;
1579         ecmd->supported = SUPPORTED_TP;
1580         ecmd->phy_address = 0xff;
1581         ecmd->port = PORT_TP;
1582         ethtool_cmd_speed_set(ecmd, speed);
1583         ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1584         ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1585
1586         return 0;
1587 }
1588
1589 static int
1590 rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
1591                                           struct rocker_port *rocker_port,
1592                                           struct rocker_desc_info *desc_info,
1593                                           void *priv)
1594 {
1595         unsigned char *macaddr = priv;
1596         struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1597         struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1598         struct rocker_tlv *attr;
1599
1600         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1601         if (!attrs[ROCKER_TLV_CMD_INFO])
1602                 return -EIO;
1603
1604         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1605                                 attrs[ROCKER_TLV_CMD_INFO]);
1606         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1607         if (!attr)
1608                 return -EIO;
1609
1610         if (rocker_tlv_len(attr) != ETH_ALEN)
1611                 return -EINVAL;
1612
1613         ether_addr_copy(macaddr, rocker_tlv_data(attr));
1614         return 0;
1615 }
1616
1617 static int
1618 rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
1619                                           struct rocker_port *rocker_port,
1620                                           struct rocker_desc_info *desc_info,
1621                                           void *priv)
1622 {
1623         struct ethtool_cmd *ecmd = priv;
1624         struct rocker_tlv *cmd_info;
1625
1626         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1627                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1628                 return -EMSGSIZE;
1629         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1630         if (!cmd_info)
1631                 return -EMSGSIZE;
1632         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1633                                rocker_port->pport))
1634                 return -EMSGSIZE;
1635         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1636                                ethtool_cmd_speed(ecmd)))
1637                 return -EMSGSIZE;
1638         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1639                               ecmd->duplex))
1640                 return -EMSGSIZE;
1641         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1642                               ecmd->autoneg))
1643                 return -EMSGSIZE;
1644         rocker_tlv_nest_end(desc_info, cmd_info);
1645         return 0;
1646 }
1647
1648 static int
1649 rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
1650                                           struct rocker_port *rocker_port,
1651                                           struct rocker_desc_info *desc_info,
1652                                           void *priv)
1653 {
1654         unsigned char *macaddr = priv;
1655         struct rocker_tlv *cmd_info;
1656
1657         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1658                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1659                 return -EMSGSIZE;
1660         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1661         if (!cmd_info)
1662                 return -EMSGSIZE;
1663         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1664                                rocker_port->pport))
1665                 return -EMSGSIZE;
1666         if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1667                            ETH_ALEN, macaddr))
1668                 return -EMSGSIZE;
1669         rocker_tlv_nest_end(desc_info, cmd_info);
1670         return 0;
1671 }
1672
1673 static int
1674 rocker_cmd_set_port_learning_prep(struct rocker *rocker,
1675                                   struct rocker_port *rocker_port,
1676                                   struct rocker_desc_info *desc_info,
1677                                   void *priv)
1678 {
1679         struct rocker_tlv *cmd_info;
1680
1681         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1682                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1683                 return -EMSGSIZE;
1684         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1685         if (!cmd_info)
1686                 return -EMSGSIZE;
1687         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1688                                rocker_port->pport))
1689                 return -EMSGSIZE;
1690         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1691                               !!(rocker_port->brport_flags & BR_LEARNING)))
1692                 return -EMSGSIZE;
1693         rocker_tlv_nest_end(desc_info, cmd_info);
1694         return 0;
1695 }
1696
1697 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1698                                                 struct ethtool_cmd *ecmd)
1699 {
1700         return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1701                                rocker_cmd_get_port_settings_prep, NULL,
1702                                rocker_cmd_get_port_settings_ethtool_proc,
1703                                ecmd, false);
1704 }
1705
1706 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1707                                                 unsigned char *macaddr)
1708 {
1709         return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1710                                rocker_cmd_get_port_settings_prep, NULL,
1711                                rocker_cmd_get_port_settings_macaddr_proc,
1712                                macaddr, false);
1713 }
1714
1715 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1716                                                 struct ethtool_cmd *ecmd)
1717 {
1718         return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1719                                rocker_cmd_set_port_settings_ethtool_prep,
1720                                ecmd, NULL, NULL, false);
1721 }
1722
1723 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1724                                                 unsigned char *macaddr)
1725 {
1726         return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1727                                rocker_cmd_set_port_settings_macaddr_prep,
1728                                macaddr, NULL, NULL, false);
1729 }
1730
1731 static int rocker_port_set_learning(struct rocker_port *rocker_port)
1732 {
1733         return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1734                                rocker_cmd_set_port_learning_prep,
1735                                NULL, NULL, NULL, false);
1736 }
1737
1738 static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1739                                            struct rocker_flow_tbl_entry *entry)
1740 {
1741         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1742                                entry->key.ig_port.in_pport))
1743                 return -EMSGSIZE;
1744         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1745                                entry->key.ig_port.in_pport_mask))
1746                 return -EMSGSIZE;
1747         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1748                                entry->key.ig_port.goto_tbl))
1749                 return -EMSGSIZE;
1750
1751         return 0;
1752 }
1753
1754 static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1755                                         struct rocker_flow_tbl_entry *entry)
1756 {
1757         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1758                                entry->key.vlan.in_pport))
1759                 return -EMSGSIZE;
1760         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1761                                 entry->key.vlan.vlan_id))
1762                 return -EMSGSIZE;
1763         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1764                                 entry->key.vlan.vlan_id_mask))
1765                 return -EMSGSIZE;
1766         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1767                                entry->key.vlan.goto_tbl))
1768                 return -EMSGSIZE;
1769         if (entry->key.vlan.untagged &&
1770             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1771                                 entry->key.vlan.new_vlan_id))
1772                 return -EMSGSIZE;
1773
1774         return 0;
1775 }
1776
1777 static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1778                                             struct rocker_flow_tbl_entry *entry)
1779 {
1780         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1781                                entry->key.term_mac.in_pport))
1782                 return -EMSGSIZE;
1783         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1784                                entry->key.term_mac.in_pport_mask))
1785                 return -EMSGSIZE;
1786         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1787                                 entry->key.term_mac.eth_type))
1788                 return -EMSGSIZE;
1789         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1790                            ETH_ALEN, entry->key.term_mac.eth_dst))
1791                 return -EMSGSIZE;
1792         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1793                            ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1794                 return -EMSGSIZE;
1795         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1796                                 entry->key.term_mac.vlan_id))
1797                 return -EMSGSIZE;
1798         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1799                                 entry->key.term_mac.vlan_id_mask))
1800                 return -EMSGSIZE;
1801         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1802                                entry->key.term_mac.goto_tbl))
1803                 return -EMSGSIZE;
1804         if (entry->key.term_mac.copy_to_cpu &&
1805             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1806                               entry->key.term_mac.copy_to_cpu))
1807                 return -EMSGSIZE;
1808
1809         return 0;
1810 }
1811
1812 static int
1813 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1814                                       struct rocker_flow_tbl_entry *entry)
1815 {
1816         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1817                                 entry->key.ucast_routing.eth_type))
1818                 return -EMSGSIZE;
1819         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1820                                 entry->key.ucast_routing.dst4))
1821                 return -EMSGSIZE;
1822         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1823                                 entry->key.ucast_routing.dst4_mask))
1824                 return -EMSGSIZE;
1825         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1826                                entry->key.ucast_routing.goto_tbl))
1827                 return -EMSGSIZE;
1828         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1829                                entry->key.ucast_routing.group_id))
1830                 return -EMSGSIZE;
1831
1832         return 0;
1833 }
1834
1835 static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
1836                                           struct rocker_flow_tbl_entry *entry)
1837 {
1838         if (entry->key.bridge.has_eth_dst &&
1839             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1840                            ETH_ALEN, entry->key.bridge.eth_dst))
1841                 return -EMSGSIZE;
1842         if (entry->key.bridge.has_eth_dst_mask &&
1843             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1844                            ETH_ALEN, entry->key.bridge.eth_dst_mask))
1845                 return -EMSGSIZE;
1846         if (entry->key.bridge.vlan_id &&
1847             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1848                                 entry->key.bridge.vlan_id))
1849                 return -EMSGSIZE;
1850         if (entry->key.bridge.tunnel_id &&
1851             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
1852                                entry->key.bridge.tunnel_id))
1853                 return -EMSGSIZE;
1854         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1855                                entry->key.bridge.goto_tbl))
1856                 return -EMSGSIZE;
1857         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1858                                entry->key.bridge.group_id))
1859                 return -EMSGSIZE;
1860         if (entry->key.bridge.copy_to_cpu &&
1861             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1862                               entry->key.bridge.copy_to_cpu))
1863                 return -EMSGSIZE;
1864
1865         return 0;
1866 }
1867
1868 static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
1869                                        struct rocker_flow_tbl_entry *entry)
1870 {
1871         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1872                                entry->key.acl.in_pport))
1873                 return -EMSGSIZE;
1874         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1875                                entry->key.acl.in_pport_mask))
1876                 return -EMSGSIZE;
1877         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1878                            ETH_ALEN, entry->key.acl.eth_src))
1879                 return -EMSGSIZE;
1880         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
1881                            ETH_ALEN, entry->key.acl.eth_src_mask))
1882                 return -EMSGSIZE;
1883         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1884                            ETH_ALEN, entry->key.acl.eth_dst))
1885                 return -EMSGSIZE;
1886         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1887                            ETH_ALEN, entry->key.acl.eth_dst_mask))
1888                 return -EMSGSIZE;
1889         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1890                                 entry->key.acl.eth_type))
1891                 return -EMSGSIZE;
1892         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1893                                 entry->key.acl.vlan_id))
1894                 return -EMSGSIZE;
1895         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1896                                 entry->key.acl.vlan_id_mask))
1897                 return -EMSGSIZE;
1898
1899         switch (ntohs(entry->key.acl.eth_type)) {
1900         case ETH_P_IP:
1901         case ETH_P_IPV6:
1902                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
1903                                       entry->key.acl.ip_proto))
1904                         return -EMSGSIZE;
1905                 if (rocker_tlv_put_u8(desc_info,
1906                                       ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
1907                                       entry->key.acl.ip_proto_mask))
1908                         return -EMSGSIZE;
1909                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
1910                                       entry->key.acl.ip_tos & 0x3f))
1911                         return -EMSGSIZE;
1912                 if (rocker_tlv_put_u8(desc_info,
1913                                       ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
1914                                       entry->key.acl.ip_tos_mask & 0x3f))
1915                         return -EMSGSIZE;
1916                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
1917                                       (entry->key.acl.ip_tos & 0xc0) >> 6))
1918                         return -EMSGSIZE;
1919                 if (rocker_tlv_put_u8(desc_info,
1920                                       ROCKER_TLV_OF_DPA_IP_ECN_MASK,
1921                                       (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
1922                         return -EMSGSIZE;
1923                 break;
1924         }
1925
1926         if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
1927             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1928                                entry->key.acl.group_id))
1929                 return -EMSGSIZE;
1930
1931         return 0;
1932 }
1933
1934 static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
1935                                    struct rocker_port *rocker_port,
1936                                    struct rocker_desc_info *desc_info,
1937                                    void *priv)
1938 {
1939         struct rocker_flow_tbl_entry *entry = priv;
1940         struct rocker_tlv *cmd_info;
1941         int err = 0;
1942
1943         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1944                                ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
1945                 return -EMSGSIZE;
1946         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1947         if (!cmd_info)
1948                 return -EMSGSIZE;
1949         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
1950                                entry->key.tbl_id))
1951                 return -EMSGSIZE;
1952         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
1953                                entry->key.priority))
1954                 return -EMSGSIZE;
1955         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
1956                 return -EMSGSIZE;
1957         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1958                                entry->cookie))
1959                 return -EMSGSIZE;
1960
1961         switch (entry->key.tbl_id) {
1962         case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1963                 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
1964                 break;
1965         case ROCKER_OF_DPA_TABLE_ID_VLAN:
1966                 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
1967                 break;
1968         case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1969                 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
1970                 break;
1971         case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1972                 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
1973                 break;
1974         case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1975                 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
1976                 break;
1977         case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1978                 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
1979                 break;
1980         default:
1981                 err = -ENOTSUPP;
1982                 break;
1983         }
1984
1985         if (err)
1986                 return err;
1987
1988         rocker_tlv_nest_end(desc_info, cmd_info);
1989
1990         return 0;
1991 }
1992
1993 static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
1994                                    struct rocker_port *rocker_port,
1995                                    struct rocker_desc_info *desc_info,
1996                                    void *priv)
1997 {
1998         const struct rocker_flow_tbl_entry *entry = priv;
1999         struct rocker_tlv *cmd_info;
2000
2001         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
2002                                ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
2003                 return -EMSGSIZE;
2004         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2005         if (!cmd_info)
2006                 return -EMSGSIZE;
2007         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2008                                entry->cookie))
2009                 return -EMSGSIZE;
2010         rocker_tlv_nest_end(desc_info, cmd_info);
2011
2012         return 0;
2013 }
2014
2015 static int
2016 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2017                                       struct rocker_group_tbl_entry *entry)
2018 {
2019         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2020                                ROCKER_GROUP_PORT_GET(entry->group_id)))
2021                 return -EMSGSIZE;
2022         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2023                               entry->l2_interface.pop_vlan))
2024                 return -EMSGSIZE;
2025
2026         return 0;
2027 }
2028
2029 static int
2030 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2031                                     struct rocker_group_tbl_entry *entry)
2032 {
2033         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2034                                entry->l2_rewrite.group_id))
2035                 return -EMSGSIZE;
2036         if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2037             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2038                            ETH_ALEN, entry->l2_rewrite.eth_src))
2039                 return -EMSGSIZE;
2040         if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2041             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2042                            ETH_ALEN, entry->l2_rewrite.eth_dst))
2043                 return -EMSGSIZE;
2044         if (entry->l2_rewrite.vlan_id &&
2045             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2046                                 entry->l2_rewrite.vlan_id))
2047                 return -EMSGSIZE;
2048
2049         return 0;
2050 }
2051
2052 static int
2053 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2054                                    struct rocker_group_tbl_entry *entry)
2055 {
2056         int i;
2057         struct rocker_tlv *group_ids;
2058
2059         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2060                                entry->group_count))
2061                 return -EMSGSIZE;
2062
2063         group_ids = rocker_tlv_nest_start(desc_info,
2064                                           ROCKER_TLV_OF_DPA_GROUP_IDS);
2065         if (!group_ids)
2066                 return -EMSGSIZE;
2067
2068         for (i = 0; i < entry->group_count; i++)
2069                 /* Note TLV array is 1-based */
2070                 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2071                         return -EMSGSIZE;
2072
2073         rocker_tlv_nest_end(desc_info, group_ids);
2074
2075         return 0;
2076 }
2077
2078 static int
2079 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2080                                     struct rocker_group_tbl_entry *entry)
2081 {
2082         if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2083             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2084                            ETH_ALEN, entry->l3_unicast.eth_src))
2085                 return -EMSGSIZE;
2086         if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2087             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2088                            ETH_ALEN, entry->l3_unicast.eth_dst))
2089                 return -EMSGSIZE;
2090         if (entry->l3_unicast.vlan_id &&
2091             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2092                                 entry->l3_unicast.vlan_id))
2093                 return -EMSGSIZE;
2094         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2095                               entry->l3_unicast.ttl_check))
2096                 return -EMSGSIZE;
2097         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2098                                entry->l3_unicast.group_id))
2099                 return -EMSGSIZE;
2100
2101         return 0;
2102 }
2103
2104 static int rocker_cmd_group_tbl_add(struct rocker *rocker,
2105                                     struct rocker_port *rocker_port,
2106                                     struct rocker_desc_info *desc_info,
2107                                     void *priv)
2108 {
2109         struct rocker_group_tbl_entry *entry = priv;
2110         struct rocker_tlv *cmd_info;
2111         int err = 0;
2112
2113         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2114                 return -EMSGSIZE;
2115         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2116         if (!cmd_info)
2117                 return -EMSGSIZE;
2118
2119         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2120                                entry->group_id))
2121                 return -EMSGSIZE;
2122
2123         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2124         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2125                 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2126                 break;
2127         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2128                 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2129                 break;
2130         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2131         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2132                 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2133                 break;
2134         case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2135                 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2136                 break;
2137         default:
2138                 err = -ENOTSUPP;
2139                 break;
2140         }
2141
2142         if (err)
2143                 return err;
2144
2145         rocker_tlv_nest_end(desc_info, cmd_info);
2146
2147         return 0;
2148 }
2149
2150 static int rocker_cmd_group_tbl_del(struct rocker *rocker,
2151                                     struct rocker_port *rocker_port,
2152                                     struct rocker_desc_info *desc_info,
2153                                     void *priv)
2154 {
2155         const struct rocker_group_tbl_entry *entry = priv;
2156         struct rocker_tlv *cmd_info;
2157
2158         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2159                 return -EMSGSIZE;
2160         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2161         if (!cmd_info)
2162                 return -EMSGSIZE;
2163         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2164                                entry->group_id))
2165                 return -EMSGSIZE;
2166         rocker_tlv_nest_end(desc_info, cmd_info);
2167
2168         return 0;
2169 }
2170
2171 /*****************************************
2172  * Flow, group, FDB, internal VLAN tables
2173  *****************************************/
2174
2175 static int rocker_init_tbls(struct rocker *rocker)
2176 {
2177         hash_init(rocker->flow_tbl);
2178         spin_lock_init(&rocker->flow_tbl_lock);
2179
2180         hash_init(rocker->group_tbl);
2181         spin_lock_init(&rocker->group_tbl_lock);
2182
2183         hash_init(rocker->fdb_tbl);
2184         spin_lock_init(&rocker->fdb_tbl_lock);
2185
2186         hash_init(rocker->internal_vlan_tbl);
2187         spin_lock_init(&rocker->internal_vlan_tbl_lock);
2188
2189         return 0;
2190 }
2191
2192 static void rocker_free_tbls(struct rocker *rocker)
2193 {
2194         unsigned long flags;
2195         struct rocker_flow_tbl_entry *flow_entry;
2196         struct rocker_group_tbl_entry *group_entry;
2197         struct rocker_fdb_tbl_entry *fdb_entry;
2198         struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2199         struct hlist_node *tmp;
2200         int bkt;
2201
2202         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2203         hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2204                 hash_del(&flow_entry->entry);
2205         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2206
2207         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2208         hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2209                 hash_del(&group_entry->entry);
2210         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2211
2212         spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2213         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2214                 hash_del(&fdb_entry->entry);
2215         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2216
2217         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2218         hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2219                            tmp, internal_vlan_entry, entry)
2220                 hash_del(&internal_vlan_entry->entry);
2221         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2222 }
2223
2224 static struct rocker_flow_tbl_entry *
2225 rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
2226 {
2227         struct rocker_flow_tbl_entry *found;
2228
2229         hash_for_each_possible(rocker->flow_tbl, found,
2230                                entry, match->key_crc32) {
2231                 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
2232                         return found;
2233         }
2234
2235         return NULL;
2236 }
2237
2238 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2239                                struct rocker_flow_tbl_entry *match,
2240                                bool nowait)
2241 {
2242         struct rocker *rocker = rocker_port->rocker;
2243         struct rocker_flow_tbl_entry *found;
2244         unsigned long flags;
2245         bool add_to_hw = false;
2246         int err = 0;
2247
2248         match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
2249
2250         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2251
2252         found = rocker_flow_tbl_find(rocker, match);
2253
2254         if (found) {
2255                 kfree(match);
2256         } else {
2257                 found = match;
2258                 found->cookie = rocker->flow_tbl_next_cookie++;
2259                 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2260                 add_to_hw = true;
2261         }
2262
2263         found->ref_count++;
2264
2265         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2266
2267         if (add_to_hw) {
2268                 err = rocker_cmd_exec(rocker, rocker_port,
2269                                       rocker_cmd_flow_tbl_add,
2270                                       found, NULL, NULL, nowait);
2271                 if (err) {
2272                         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2273                         hash_del(&found->entry);
2274                         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2275                         kfree(found);
2276                 }
2277         }
2278
2279         return err;
2280 }
2281
2282 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2283                                struct rocker_flow_tbl_entry *match,
2284                                bool nowait)
2285 {
2286         struct rocker *rocker = rocker_port->rocker;
2287         struct rocker_flow_tbl_entry *found;
2288         unsigned long flags;
2289         bool del_from_hw = false;
2290         int err = 0;
2291
2292         match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
2293
2294         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2295
2296         found = rocker_flow_tbl_find(rocker, match);
2297
2298         if (found) {
2299                 found->ref_count--;
2300                 if (found->ref_count == 0) {
2301                         hash_del(&found->entry);
2302                         del_from_hw = true;
2303                 }
2304         }
2305
2306         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2307
2308         kfree(match);
2309
2310         if (del_from_hw) {
2311                 err = rocker_cmd_exec(rocker, rocker_port,
2312                                       rocker_cmd_flow_tbl_del,
2313                                       found, NULL, NULL, nowait);
2314                 kfree(found);
2315         }
2316
2317         return err;
2318 }
2319
2320 static gfp_t rocker_op_flags_gfp(int flags)
2321 {
2322         return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
2323 }
2324
2325 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2326                               int flags, struct rocker_flow_tbl_entry *entry)
2327 {
2328         bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2329
2330         if (flags & ROCKER_OP_FLAG_REMOVE)
2331                 return rocker_flow_tbl_del(rocker_port, entry, nowait);
2332         else
2333                 return rocker_flow_tbl_add(rocker_port, entry, nowait);
2334 }
2335
2336 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2337                                    int flags, u32 in_pport, u32 in_pport_mask,
2338                                    enum rocker_of_dpa_table_id goto_tbl)
2339 {
2340         struct rocker_flow_tbl_entry *entry;
2341
2342         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2343         if (!entry)
2344                 return -ENOMEM;
2345
2346         entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2347         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2348         entry->key.ig_port.in_pport = in_pport;
2349         entry->key.ig_port.in_pport_mask = in_pport_mask;
2350         entry->key.ig_port.goto_tbl = goto_tbl;
2351
2352         return rocker_flow_tbl_do(rocker_port, flags, entry);
2353 }
2354
2355 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2356                                 int flags, u32 in_pport,
2357                                 __be16 vlan_id, __be16 vlan_id_mask,
2358                                 enum rocker_of_dpa_table_id goto_tbl,
2359                                 bool untagged, __be16 new_vlan_id)
2360 {
2361         struct rocker_flow_tbl_entry *entry;
2362
2363         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2364         if (!entry)
2365                 return -ENOMEM;
2366
2367         entry->key.priority = ROCKER_PRIORITY_VLAN;
2368         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2369         entry->key.vlan.in_pport = in_pport;
2370         entry->key.vlan.vlan_id = vlan_id;
2371         entry->key.vlan.vlan_id_mask = vlan_id_mask;
2372         entry->key.vlan.goto_tbl = goto_tbl;
2373
2374         entry->key.vlan.untagged = untagged;
2375         entry->key.vlan.new_vlan_id = new_vlan_id;
2376
2377         return rocker_flow_tbl_do(rocker_port, flags, entry);
2378 }
2379
2380 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2381                                     u32 in_pport, u32 in_pport_mask,
2382                                     __be16 eth_type, const u8 *eth_dst,
2383                                     const u8 *eth_dst_mask, __be16 vlan_id,
2384                                     __be16 vlan_id_mask, bool copy_to_cpu,
2385                                     int flags)
2386 {
2387         struct rocker_flow_tbl_entry *entry;
2388
2389         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2390         if (!entry)
2391                 return -ENOMEM;
2392
2393         if (is_multicast_ether_addr(eth_dst)) {
2394                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2395                 entry->key.term_mac.goto_tbl =
2396                          ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2397         } else {
2398                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2399                 entry->key.term_mac.goto_tbl =
2400                          ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2401         }
2402
2403         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2404         entry->key.term_mac.in_pport = in_pport;
2405         entry->key.term_mac.in_pport_mask = in_pport_mask;
2406         entry->key.term_mac.eth_type = eth_type;
2407         ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2408         ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2409         entry->key.term_mac.vlan_id = vlan_id;
2410         entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2411         entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2412
2413         return rocker_flow_tbl_do(rocker_port, flags, entry);
2414 }
2415
2416 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2417                                   int flags,
2418                                   const u8 *eth_dst, const u8 *eth_dst_mask,
2419                                   __be16 vlan_id, u32 tunnel_id,
2420                                   enum rocker_of_dpa_table_id goto_tbl,
2421                                   u32 group_id, bool copy_to_cpu)
2422 {
2423         struct rocker_flow_tbl_entry *entry;
2424         u32 priority;
2425         bool vlan_bridging = !!vlan_id;
2426         bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2427         bool wild = false;
2428
2429         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2430         if (!entry)
2431                 return -ENOMEM;
2432
2433         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2434
2435         if (eth_dst) {
2436                 entry->key.bridge.has_eth_dst = 1;
2437                 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2438         }
2439         if (eth_dst_mask) {
2440                 entry->key.bridge.has_eth_dst_mask = 1;
2441                 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2442                 if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
2443                         wild = true;
2444         }
2445
2446         priority = ROCKER_PRIORITY_UNKNOWN;
2447         if (vlan_bridging && dflt && wild)
2448                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2449         else if (vlan_bridging && dflt && !wild)
2450                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2451         else if (vlan_bridging && !dflt)
2452                 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2453         else if (!vlan_bridging && dflt && wild)
2454                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2455         else if (!vlan_bridging && dflt && !wild)
2456                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2457         else if (!vlan_bridging && !dflt)
2458                 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2459
2460         entry->key.priority = priority;
2461         entry->key.bridge.vlan_id = vlan_id;
2462         entry->key.bridge.tunnel_id = tunnel_id;
2463         entry->key.bridge.goto_tbl = goto_tbl;
2464         entry->key.bridge.group_id = group_id;
2465         entry->key.bridge.copy_to_cpu = copy_to_cpu;
2466
2467         return rocker_flow_tbl_do(rocker_port, flags, entry);
2468 }
2469
2470 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2471                                int flags, u32 in_pport,
2472                                u32 in_pport_mask,
2473                                const u8 *eth_src, const u8 *eth_src_mask,
2474                                const u8 *eth_dst, const u8 *eth_dst_mask,
2475                                __be16 eth_type,
2476                                __be16 vlan_id, __be16 vlan_id_mask,
2477                                u8 ip_proto, u8 ip_proto_mask,
2478                                u8 ip_tos, u8 ip_tos_mask,
2479                                u32 group_id)
2480 {
2481         u32 priority;
2482         struct rocker_flow_tbl_entry *entry;
2483
2484         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2485         if (!entry)
2486                 return -ENOMEM;
2487
2488         priority = ROCKER_PRIORITY_ACL_NORMAL;
2489         if (eth_dst && eth_dst_mask) {
2490                 if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
2491                         priority = ROCKER_PRIORITY_ACL_DFLT;
2492                 else if (is_link_local_ether_addr(eth_dst))
2493                         priority = ROCKER_PRIORITY_ACL_CTRL;
2494         }
2495
2496         entry->key.priority = priority;
2497         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2498         entry->key.acl.in_pport = in_pport;
2499         entry->key.acl.in_pport_mask = in_pport_mask;
2500
2501         if (eth_src)
2502                 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2503         if (eth_src_mask)
2504                 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2505         if (eth_dst)
2506                 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2507         if (eth_dst_mask)
2508                 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2509
2510         entry->key.acl.eth_type = eth_type;
2511         entry->key.acl.vlan_id = vlan_id;
2512         entry->key.acl.vlan_id_mask = vlan_id_mask;
2513         entry->key.acl.ip_proto = ip_proto;
2514         entry->key.acl.ip_proto_mask = ip_proto_mask;
2515         entry->key.acl.ip_tos = ip_tos;
2516         entry->key.acl.ip_tos_mask = ip_tos_mask;
2517         entry->key.acl.group_id = group_id;
2518
2519         return rocker_flow_tbl_do(rocker_port, flags, entry);
2520 }
2521
2522 static struct rocker_group_tbl_entry *
2523 rocker_group_tbl_find(struct rocker *rocker,
2524                       struct rocker_group_tbl_entry *match)
2525 {
2526         struct rocker_group_tbl_entry *found;
2527
2528         hash_for_each_possible(rocker->group_tbl, found,
2529                                entry, match->group_id) {
2530                 if (found->group_id == match->group_id)
2531                         return found;
2532         }
2533
2534         return NULL;
2535 }
2536
2537 static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
2538 {
2539         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2540         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2541         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2542                 kfree(entry->group_ids);
2543                 break;
2544         default:
2545                 break;
2546         }
2547         kfree(entry);
2548 }
2549
2550 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2551                                 struct rocker_group_tbl_entry *match,
2552                                 bool nowait)
2553 {
2554         struct rocker *rocker = rocker_port->rocker;
2555         struct rocker_group_tbl_entry *found;
2556         unsigned long flags;
2557         int err = 0;
2558
2559         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2560
2561         found = rocker_group_tbl_find(rocker, match);
2562
2563         if (found) {
2564                 hash_del(&found->entry);
2565                 rocker_group_tbl_entry_free(found);
2566                 found = match;
2567                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2568         } else {
2569                 found = match;
2570                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2571         }
2572
2573         hash_add(rocker->group_tbl, &found->entry, found->group_id);
2574
2575         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2576
2577         if (found->cmd)
2578                 err = rocker_cmd_exec(rocker, rocker_port,
2579                                       rocker_cmd_group_tbl_add,
2580                                       found, NULL, NULL, nowait);
2581
2582         return err;
2583 }
2584
2585 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2586                                 struct rocker_group_tbl_entry *match,
2587                                 bool nowait)
2588 {
2589         struct rocker *rocker = rocker_port->rocker;
2590         struct rocker_group_tbl_entry *found;
2591         unsigned long flags;
2592         int err = 0;
2593
2594         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2595
2596         found = rocker_group_tbl_find(rocker, match);
2597
2598         if (found) {
2599                 hash_del(&found->entry);
2600                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2601         }
2602
2603         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2604
2605         rocker_group_tbl_entry_free(match);
2606
2607         if (found) {
2608                 err = rocker_cmd_exec(rocker, rocker_port,
2609                                       rocker_cmd_group_tbl_del,
2610                                       found, NULL, NULL, nowait);
2611                 rocker_group_tbl_entry_free(found);
2612         }
2613
2614         return err;
2615 }
2616
2617 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2618                                int flags, struct rocker_group_tbl_entry *entry)
2619 {
2620         bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2621
2622         if (flags & ROCKER_OP_FLAG_REMOVE)
2623                 return rocker_group_tbl_del(rocker_port, entry, nowait);
2624         else
2625                 return rocker_group_tbl_add(rocker_port, entry, nowait);
2626 }
2627
2628 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2629                                      int flags, __be16 vlan_id,
2630                                      u32 out_pport, int pop_vlan)
2631 {
2632         struct rocker_group_tbl_entry *entry;
2633
2634         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2635         if (!entry)
2636                 return -ENOMEM;
2637
2638         entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2639         entry->l2_interface.pop_vlan = pop_vlan;
2640
2641         return rocker_group_tbl_do(rocker_port, flags, entry);
2642 }
2643
2644 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2645                                    int flags, u8 group_count,
2646                                    u32 *group_ids, u32 group_id)
2647 {
2648         struct rocker_group_tbl_entry *entry;
2649
2650         entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2651         if (!entry)
2652                 return -ENOMEM;
2653
2654         entry->group_id = group_id;
2655         entry->group_count = group_count;
2656
2657         entry->group_ids = kcalloc(group_count, sizeof(u32),
2658                                    rocker_op_flags_gfp(flags));
2659         if (!entry->group_ids) {
2660                 kfree(entry);
2661                 return -ENOMEM;
2662         }
2663         memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2664
2665         return rocker_group_tbl_do(rocker_port, flags, entry);
2666 }
2667
2668 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2669                                  int flags, __be16 vlan_id,
2670                                  u8 group_count, u32 *group_ids,
2671                                  u32 group_id)
2672 {
2673         return rocker_group_l2_fan_out(rocker_port, flags,
2674                                        group_count, group_ids,
2675                                        group_id);
2676 }
2677
2678 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
2679                                         int flags, __be16 vlan_id)
2680 {
2681         struct rocker_port *p;
2682         struct rocker *rocker = rocker_port->rocker;
2683         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2684         u32 group_ids[rocker->port_count];
2685         u8 group_count = 0;
2686         int err;
2687         int i;
2688
2689         /* Adjust the flood group for this VLAN.  The flood group
2690          * references an L2 interface group for each port in this
2691          * VLAN.
2692          */
2693
2694         for (i = 0; i < rocker->port_count; i++) {
2695                 p = rocker->ports[i];
2696                 if (!rocker_port_is_bridged(p))
2697                         continue;
2698                 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
2699                         group_ids[group_count++] =
2700                                 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
2701                 }
2702         }
2703
2704         /* If there are no bridged ports in this VLAN, we're done */
2705         if (group_count == 0)
2706                 return 0;
2707
2708         err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
2709                                     group_count, group_ids,
2710                                     group_id);
2711         if (err)
2712                 netdev_err(rocker_port->dev,
2713                            "Error (%d) port VLAN l2 flood group\n", err);
2714
2715         return err;
2716 }
2717
2718 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
2719                                       int flags, __be16 vlan_id,
2720                                       bool pop_vlan)
2721 {
2722         struct rocker *rocker = rocker_port->rocker;
2723         struct rocker_port *p;
2724         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2725         u32 out_pport;
2726         int ref = 0;
2727         int err;
2728         int i;
2729
2730         /* An L2 interface group for this port in this VLAN, but
2731          * only when port STP state is LEARNING|FORWARDING.
2732          */
2733
2734         if (rocker_port->stp_state == BR_STATE_LEARNING ||
2735             rocker_port->stp_state == BR_STATE_FORWARDING) {
2736                 out_pport = rocker_port->pport;
2737                 err = rocker_group_l2_interface(rocker_port, flags,
2738                                                 vlan_id, out_pport,
2739                                                 pop_vlan);
2740                 if (err) {
2741                         netdev_err(rocker_port->dev,
2742                                    "Error (%d) port VLAN l2 group for pport %d\n",
2743                                    err, out_pport);
2744                         return err;
2745                 }
2746         }
2747
2748         /* An L2 interface group for this VLAN to CPU port.
2749          * Add when first port joins this VLAN and destroy when
2750          * last port leaves this VLAN.
2751          */
2752
2753         for (i = 0; i < rocker->port_count; i++) {
2754                 p = rocker->ports[i];
2755                 if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
2756                         ref++;
2757         }
2758
2759         if ((!adding || ref != 1) && (adding || ref != 0))
2760                 return 0;
2761
2762         out_pport = 0;
2763         err = rocker_group_l2_interface(rocker_port, flags,
2764                                         vlan_id, out_pport,
2765                                         pop_vlan);
2766         if (err) {
2767                 netdev_err(rocker_port->dev,
2768                            "Error (%d) port VLAN l2 group for CPU port\n", err);
2769                 return err;
2770         }
2771
2772         return 0;
2773 }
2774
2775 static struct rocker_ctrl {
2776         const u8 *eth_dst;
2777         const u8 *eth_dst_mask;
2778         __be16 eth_type;
2779         bool acl;
2780         bool bridge;
2781         bool term;
2782         bool copy_to_cpu;
2783 } rocker_ctrls[] = {
2784         [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
2785                 /* pass link local multicast pkts up to CPU for filtering */
2786                 .eth_dst = ll_mac,
2787                 .eth_dst_mask = ll_mask,
2788                 .acl = true,
2789         },
2790         [ROCKER_CTRL_LOCAL_ARP] = {
2791                 /* pass local ARP pkts up to CPU */
2792                 .eth_dst = zero_mac,
2793                 .eth_dst_mask = zero_mac,
2794                 .eth_type = htons(ETH_P_ARP),
2795                 .acl = true,
2796         },
2797         [ROCKER_CTRL_IPV4_MCAST] = {
2798                 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
2799                 .eth_dst = ipv4_mcast,
2800                 .eth_dst_mask = ipv4_mask,
2801                 .eth_type = htons(ETH_P_IP),
2802                 .term  = true,
2803                 .copy_to_cpu = true,
2804         },
2805         [ROCKER_CTRL_IPV6_MCAST] = {
2806                 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
2807                 .eth_dst = ipv6_mcast,
2808                 .eth_dst_mask = ipv6_mask,
2809                 .eth_type = htons(ETH_P_IPV6),
2810                 .term  = true,
2811                 .copy_to_cpu = true,
2812         },
2813         [ROCKER_CTRL_DFLT_BRIDGING] = {
2814                 /* flood any pkts on vlan */
2815                 .bridge = true,
2816                 .copy_to_cpu = true,
2817         },
2818 };
2819
2820 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
2821                                      int flags, struct rocker_ctrl *ctrl,
2822                                      __be16 vlan_id)
2823 {
2824         u32 in_pport = rocker_port->pport;
2825         u32 in_pport_mask = 0xffffffff;
2826         u32 out_pport = 0;
2827         u8 *eth_src = NULL;
2828         u8 *eth_src_mask = NULL;
2829         __be16 vlan_id_mask = htons(0xffff);
2830         u8 ip_proto = 0;
2831         u8 ip_proto_mask = 0;
2832         u8 ip_tos = 0;
2833         u8 ip_tos_mask = 0;
2834         u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2835         int err;
2836
2837         err = rocker_flow_tbl_acl(rocker_port, flags,
2838                                   in_pport, in_pport_mask,
2839                                   eth_src, eth_src_mask,
2840                                   ctrl->eth_dst, ctrl->eth_dst_mask,
2841                                   ctrl->eth_type,
2842                                   vlan_id, vlan_id_mask,
2843                                   ip_proto, ip_proto_mask,
2844                                   ip_tos, ip_tos_mask,
2845                                   group_id);
2846
2847         if (err)
2848                 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
2849
2850         return err;
2851 }
2852
2853 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
2854                                         int flags, struct rocker_ctrl *ctrl,
2855                                         __be16 vlan_id)
2856 {
2857         enum rocker_of_dpa_table_id goto_tbl =
2858                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2859         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2860         u32 tunnel_id = 0;
2861         int err;
2862
2863         if (!rocker_port_is_bridged(rocker_port))
2864                 return 0;
2865
2866         err = rocker_flow_tbl_bridge(rocker_port, flags,
2867                                      ctrl->eth_dst, ctrl->eth_dst_mask,
2868                                      vlan_id, tunnel_id,
2869                                      goto_tbl, group_id, ctrl->copy_to_cpu);
2870
2871         if (err)
2872                 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
2873
2874         return err;
2875 }
2876
2877 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
2878                                       int flags, struct rocker_ctrl *ctrl,
2879                                       __be16 vlan_id)
2880 {
2881         u32 in_pport_mask = 0xffffffff;
2882         __be16 vlan_id_mask = htons(0xffff);
2883         int err;
2884
2885         if (ntohs(vlan_id) == 0)
2886                 vlan_id = rocker_port->internal_vlan_id;
2887
2888         err = rocker_flow_tbl_term_mac(rocker_port,
2889                                        rocker_port->pport, in_pport_mask,
2890                                        ctrl->eth_type, ctrl->eth_dst,
2891                                        ctrl->eth_dst_mask, vlan_id,
2892                                        vlan_id_mask, ctrl->copy_to_cpu,
2893                                        flags);
2894
2895         if (err)
2896                 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
2897
2898         return err;
2899 }
2900
2901 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
2902                                  struct rocker_ctrl *ctrl, __be16 vlan_id)
2903 {
2904         if (ctrl->acl)
2905                 return rocker_port_ctrl_vlan_acl(rocker_port, flags,
2906                                                  ctrl, vlan_id);
2907         if (ctrl->bridge)
2908                 return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
2909                                                     ctrl, vlan_id);
2910
2911         if (ctrl->term)
2912                 return rocker_port_ctrl_vlan_term(rocker_port, flags,
2913                                                   ctrl, vlan_id);
2914
2915         return -EOPNOTSUPP;
2916 }
2917
2918 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
2919                                      int flags, __be16 vlan_id)
2920 {
2921         int err = 0;
2922         int i;
2923
2924         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
2925                 if (rocker_port->ctrls[i]) {
2926                         err = rocker_port_ctrl_vlan(rocker_port, flags,
2927                                                     &rocker_ctrls[i], vlan_id);
2928                         if (err)
2929                                 return err;
2930                 }
2931         }
2932
2933         return err;
2934 }
2935
2936 static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
2937                             struct rocker_ctrl *ctrl)
2938 {
2939         u16 vid;
2940         int err = 0;
2941
2942         for (vid = 1; vid < VLAN_N_VID; vid++) {
2943                 if (!test_bit(vid, rocker_port->vlan_bitmap))
2944                         continue;
2945                 err = rocker_port_ctrl_vlan(rocker_port, flags,
2946                                             ctrl, htons(vid));
2947                 if (err)
2948                         break;
2949         }
2950
2951         return err;
2952 }
2953
2954 static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
2955                             u16 vid)
2956 {
2957         enum rocker_of_dpa_table_id goto_tbl =
2958                 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2959         u32 in_pport = rocker_port->pport;
2960         __be16 vlan_id = htons(vid);
2961         __be16 vlan_id_mask = htons(0xffff);
2962         __be16 internal_vlan_id;
2963         bool untagged;
2964         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2965         int err;
2966
2967         internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
2968
2969         if (adding && test_and_set_bit(ntohs(internal_vlan_id),
2970                                        rocker_port->vlan_bitmap))
2971                         return 0; /* already added */
2972         else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
2973                                                 rocker_port->vlan_bitmap))
2974                         return 0; /* already removed */
2975
2976         if (adding) {
2977                 err = rocker_port_ctrl_vlan_add(rocker_port, flags,
2978                                                 internal_vlan_id);
2979                 if (err) {
2980                         netdev_err(rocker_port->dev,
2981                                    "Error (%d) port ctrl vlan add\n", err);
2982                         return err;
2983                 }
2984         }
2985
2986         err = rocker_port_vlan_l2_groups(rocker_port, flags,
2987                                          internal_vlan_id, untagged);
2988         if (err) {
2989                 netdev_err(rocker_port->dev,
2990                            "Error (%d) port VLAN l2 groups\n", err);
2991                 return err;
2992         }
2993
2994         err = rocker_port_vlan_flood_group(rocker_port, flags,
2995                                            internal_vlan_id);
2996         if (err) {
2997                 netdev_err(rocker_port->dev,
2998                            "Error (%d) port VLAN l2 flood group\n", err);
2999                 return err;
3000         }
3001
3002         err = rocker_flow_tbl_vlan(rocker_port, flags,
3003                                    in_pport, vlan_id, vlan_id_mask,
3004                                    goto_tbl, untagged, internal_vlan_id);
3005         if (err)
3006                 netdev_err(rocker_port->dev,
3007                            "Error (%d) port VLAN table\n", err);
3008
3009         return err;
3010 }
3011
3012 static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
3013 {
3014         enum rocker_of_dpa_table_id goto_tbl;
3015         u32 in_pport;
3016         u32 in_pport_mask;
3017         int err;
3018
3019         /* Normal Ethernet Frames.  Matches pkts from any local physical
3020          * ports.  Goto VLAN tbl.
3021          */
3022
3023         in_pport = 0;
3024         in_pport_mask = 0xffff0000;
3025         goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3026
3027         err = rocker_flow_tbl_ig_port(rocker_port, flags,
3028                                       in_pport, in_pport_mask,
3029                                       goto_tbl);
3030         if (err)
3031                 netdev_err(rocker_port->dev,
3032                            "Error (%d) ingress port table entry\n", err);
3033
3034         return err;
3035 }
3036
3037 struct rocker_fdb_learn_work {
3038         struct work_struct work;
3039         struct net_device *dev;
3040         int flags;
3041         u8 addr[ETH_ALEN];
3042         u16 vid;
3043 };
3044
3045 static void rocker_port_fdb_learn_work(struct work_struct *work)
3046 {
3047         struct rocker_fdb_learn_work *lw =
3048                 container_of(work, struct rocker_fdb_learn_work, work);
3049         bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3050         bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3051         struct netdev_switch_notifier_fdb_info info;
3052
3053         info.addr = lw->addr;
3054         info.vid = lw->vid;
3055
3056         if (learned && removing)
3057                 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
3058                                              lw->dev, &info.info);
3059         else if (learned && !removing)
3060                 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
3061                                              lw->dev, &info.info);
3062
3063         kfree(work);
3064 }
3065
3066 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3067                                  int flags, const u8 *addr, __be16 vlan_id)
3068 {
3069         struct rocker_fdb_learn_work *lw;
3070         enum rocker_of_dpa_table_id goto_tbl =
3071                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3072         u32 out_pport = rocker_port->pport;
3073         u32 tunnel_id = 0;
3074         u32 group_id = ROCKER_GROUP_NONE;
3075         bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3076         bool copy_to_cpu = false;
3077         int err;
3078
3079         if (rocker_port_is_bridged(rocker_port))
3080                 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3081
3082         if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3083                 err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
3084                                              vlan_id, tunnel_id, goto_tbl,
3085                                              group_id, copy_to_cpu);
3086                 if (err)
3087                         return err;
3088         }
3089
3090         if (!syncing)
3091                 return 0;
3092
3093         if (!rocker_port_is_bridged(rocker_port))
3094                 return 0;
3095
3096         lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
3097         if (!lw)
3098                 return -ENOMEM;
3099
3100         INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3101
3102         lw->dev = rocker_port->dev;
3103         lw->flags = flags;
3104         ether_addr_copy(lw->addr, addr);
3105         lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3106
3107         schedule_work(&lw->work);
3108
3109         return 0;
3110 }
3111
3112 static struct rocker_fdb_tbl_entry *
3113 rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
3114 {
3115         struct rocker_fdb_tbl_entry *found;
3116
3117         hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3118                 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3119                         return found;
3120
3121         return NULL;
3122 }
3123
3124 static int rocker_port_fdb(struct rocker_port *rocker_port,
3125                            const unsigned char *addr,
3126                            __be16 vlan_id, int flags)
3127 {
3128         struct rocker *rocker = rocker_port->rocker;
3129         struct rocker_fdb_tbl_entry *fdb;
3130         struct rocker_fdb_tbl_entry *found;
3131         bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3132         unsigned long lock_flags;
3133
3134         fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
3135         if (!fdb)
3136                 return -ENOMEM;
3137
3138         fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3139         fdb->key.pport = rocker_port->pport;
3140         ether_addr_copy(fdb->key.addr, addr);
3141         fdb->key.vlan_id = vlan_id;
3142         fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3143
3144         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3145
3146         found = rocker_fdb_tbl_find(rocker, fdb);
3147
3148         if (removing && found) {
3149                 kfree(fdb);
3150                 hash_del(&found->entry);
3151         } else if (!removing && !found) {
3152                 hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
3153         }
3154
3155         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3156
3157         /* Check if adding and already exists, or removing and can't find */
3158         if (!found != !removing) {
3159                 kfree(fdb);
3160                 if (!found && removing)
3161                         return 0;
3162                 /* Refreshing existing to update aging timers */
3163                 flags |= ROCKER_OP_FLAG_REFRESH;
3164         }
3165
3166         return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
3167 }
3168
3169 static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
3170 {
3171         struct rocker *rocker = rocker_port->rocker;
3172         struct rocker_fdb_tbl_entry *found;
3173         unsigned long lock_flags;
3174         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3175         struct hlist_node *tmp;
3176         int bkt;
3177         int err = 0;
3178
3179         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3180             rocker_port->stp_state == BR_STATE_FORWARDING)
3181                 return 0;
3182
3183         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3184
3185         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3186                 if (found->key.pport != rocker_port->pport)
3187                         continue;
3188                 if (!found->learned)
3189                         continue;
3190                 err = rocker_port_fdb_learn(rocker_port, flags,
3191                                             found->key.addr,
3192                                             found->key.vlan_id);
3193                 if (err)
3194                         goto err_out;
3195                 hash_del(&found->entry);
3196         }
3197
3198 err_out:
3199         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3200
3201         return err;
3202 }
3203
3204 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3205                                   int flags, __be16 vlan_id)
3206 {
3207         u32 in_pport_mask = 0xffffffff;
3208         __be16 eth_type;
3209         const u8 *dst_mac_mask = ff_mac;
3210         __be16 vlan_id_mask = htons(0xffff);
3211         bool copy_to_cpu = false;
3212         int err;
3213
3214         if (ntohs(vlan_id) == 0)
3215                 vlan_id = rocker_port->internal_vlan_id;
3216
3217         eth_type = htons(ETH_P_IP);
3218         err = rocker_flow_tbl_term_mac(rocker_port,
3219                                        rocker_port->pport, in_pport_mask,
3220                                        eth_type, rocker_port->dev->dev_addr,
3221                                        dst_mac_mask, vlan_id, vlan_id_mask,
3222                                        copy_to_cpu, flags);
3223         if (err)
3224                 return err;
3225
3226         eth_type = htons(ETH_P_IPV6);
3227         err = rocker_flow_tbl_term_mac(rocker_port,
3228                                        rocker_port->pport, in_pport_mask,
3229                                        eth_type, rocker_port->dev->dev_addr,
3230                                        dst_mac_mask, vlan_id, vlan_id_mask,
3231                                        copy_to_cpu, flags);
3232
3233         return err;
3234 }
3235
3236 static int rocker_port_fwding(struct rocker_port *rocker_port)
3237 {
3238         bool pop_vlan;
3239         u32 out_pport;
3240         __be16 vlan_id;
3241         u16 vid;
3242         int flags = ROCKER_OP_FLAG_NOWAIT;
3243         int err;
3244
3245         /* Port will be forwarding-enabled if its STP state is LEARNING
3246          * or FORWARDING.  Traffic from CPU can still egress, regardless of
3247          * port STP state.  Use L2 interface group on port VLANs as a way
3248          * to toggle port forwarding: if forwarding is disabled, L2
3249          * interface group will not exist.
3250          */
3251
3252         if (rocker_port->stp_state != BR_STATE_LEARNING &&
3253             rocker_port->stp_state != BR_STATE_FORWARDING)
3254                 flags |= ROCKER_OP_FLAG_REMOVE;
3255
3256         out_pport = rocker_port->pport;
3257         for (vid = 1; vid < VLAN_N_VID; vid++) {
3258                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3259                         continue;
3260                 vlan_id = htons(vid);
3261                 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3262                 err = rocker_group_l2_interface(rocker_port, flags,
3263                                                 vlan_id, out_pport,
3264                                                 pop_vlan);
3265                 if (err) {
3266                         netdev_err(rocker_port->dev,
3267                                    "Error (%d) port VLAN l2 group for pport %d\n",
3268                                    err, out_pport);
3269                         return err;
3270                 }
3271         }
3272
3273         return 0;
3274 }
3275
3276 static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
3277 {
3278         bool want[ROCKER_CTRL_MAX] = { 0, };
3279         int flags;
3280         int err;
3281         int i;
3282
3283         if (rocker_port->stp_state == state)
3284                 return 0;
3285
3286         rocker_port->stp_state = state;
3287
3288         switch (state) {
3289         case BR_STATE_DISABLED:
3290                 /* port is completely disabled */
3291                 break;
3292         case BR_STATE_LISTENING:
3293         case BR_STATE_BLOCKING:
3294                 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3295                 break;
3296         case BR_STATE_LEARNING:
3297         case BR_STATE_FORWARDING:
3298                 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3299                 want[ROCKER_CTRL_IPV4_MCAST] = true;
3300                 want[ROCKER_CTRL_IPV6_MCAST] = true;
3301                 if (rocker_port_is_bridged(rocker_port))
3302                         want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3303                 else
3304                         want[ROCKER_CTRL_LOCAL_ARP] = true;
3305                 break;
3306         }
3307
3308         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3309                 if (want[i] != rocker_port->ctrls[i]) {
3310                         flags = ROCKER_OP_FLAG_NOWAIT |
3311                                 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3312                         err = rocker_port_ctrl(rocker_port, flags,
3313                                                &rocker_ctrls[i]);
3314                         if (err)
3315                                 return err;
3316                         rocker_port->ctrls[i] = want[i];
3317                 }
3318         }
3319
3320         err = rocker_port_fdb_flush(rocker_port);
3321         if (err)
3322                 return err;
3323
3324         return rocker_port_fwding(rocker_port);
3325 }
3326
3327 static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
3328 {
3329         if (rocker_port_is_bridged(rocker_port))
3330                 /* bridge STP will enable port */
3331                 return 0;
3332
3333         /* port is not bridged, so simulate going to FORWARDING state */
3334         return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
3335 }
3336
3337 static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
3338 {
3339         if (rocker_port_is_bridged(rocker_port))
3340                 /* bridge STP will disable port */
3341                 return 0;
3342
3343         /* port is not bridged, so simulate going to DISABLED state */
3344         return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
3345 }
3346
3347 static struct rocker_internal_vlan_tbl_entry *
3348 rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
3349 {
3350         struct rocker_internal_vlan_tbl_entry *found;
3351
3352         hash_for_each_possible(rocker->internal_vlan_tbl, found,
3353                                entry, ifindex) {
3354                 if (found->ifindex == ifindex)
3355                         return found;
3356         }
3357
3358         return NULL;
3359 }
3360
3361 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3362                                                int ifindex)
3363 {
3364         struct rocker *rocker = rocker_port->rocker;
3365         struct rocker_internal_vlan_tbl_entry *entry;
3366         struct rocker_internal_vlan_tbl_entry *found;
3367         unsigned long lock_flags;
3368         int i;
3369
3370         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3371         if (!entry)
3372                 return 0;
3373
3374         entry->ifindex = ifindex;
3375
3376         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3377
3378         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3379         if (found) {
3380                 kfree(entry);
3381                 goto found;
3382         }
3383
3384         found = entry;
3385         hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3386
3387         for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3388                 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3389                         continue;
3390                 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3391                 goto found;
3392         }
3393
3394         netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3395
3396 found:
3397         found->ref_count++;
3398         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3399
3400         return found->vlan_id;
3401 }
3402
3403 static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
3404                                              int ifindex)
3405 {
3406         struct rocker *rocker = rocker_port->rocker;
3407         struct rocker_internal_vlan_tbl_entry *found;
3408         unsigned long lock_flags;
3409         unsigned long bit;
3410
3411         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3412
3413         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3414         if (!found) {
3415                 netdev_err(rocker_port->dev,
3416                            "ifindex (%d) not found in internal VLAN tbl\n",
3417                            ifindex);
3418                 goto not_found;
3419         }
3420
3421         if (--found->ref_count <= 0) {
3422                 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3423                 clear_bit(bit, rocker->internal_vlan_bitmap);
3424                 hash_del(&found->entry);
3425                 kfree(found);
3426         }
3427
3428 not_found:
3429         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3430 }
3431
3432 /*****************
3433  * Net device ops
3434  *****************/
3435
3436 static int rocker_port_open(struct net_device *dev)
3437 {
3438         struct rocker_port *rocker_port = netdev_priv(dev);
3439         int err;
3440
3441         err = rocker_port_dma_rings_init(rocker_port);
3442         if (err)
3443                 return err;
3444
3445         err = request_irq(rocker_msix_tx_vector(rocker_port),
3446                           rocker_tx_irq_handler, 0,
3447                           rocker_driver_name, rocker_port);
3448         if (err) {
3449                 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3450                 goto err_request_tx_irq;
3451         }
3452
3453         err = request_irq(rocker_msix_rx_vector(rocker_port),
3454                           rocker_rx_irq_handler, 0,
3455                           rocker_driver_name, rocker_port);
3456         if (err) {
3457                 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3458                 goto err_request_rx_irq;
3459         }
3460
3461         err = rocker_port_fwd_enable(rocker_port);
3462         if (err)
3463                 goto err_fwd_enable;
3464
3465         napi_enable(&rocker_port->napi_tx);
3466         napi_enable(&rocker_port->napi_rx);
3467         rocker_port_set_enable(rocker_port, true);
3468         netif_start_queue(dev);
3469         return 0;
3470
3471 err_fwd_enable:
3472         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3473 err_request_rx_irq:
3474         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3475 err_request_tx_irq:
3476         rocker_port_dma_rings_fini(rocker_port);
3477         return err;
3478 }
3479
3480 static int rocker_port_stop(struct net_device *dev)
3481 {
3482         struct rocker_port *rocker_port = netdev_priv(dev);
3483
3484         netif_stop_queue(dev);
3485         rocker_port_set_enable(rocker_port, false);
3486         napi_disable(&rocker_port->napi_rx);
3487         napi_disable(&rocker_port->napi_tx);
3488         rocker_port_fwd_disable(rocker_port);
3489         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3490         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3491         rocker_port_dma_rings_fini(rocker_port);
3492
3493         return 0;
3494 }
3495
3496 static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
3497                                        struct rocker_desc_info *desc_info)
3498 {
3499         struct rocker *rocker = rocker_port->rocker;
3500         struct pci_dev *pdev = rocker->pdev;
3501         struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
3502         struct rocker_tlv *attr;
3503         int rem;
3504
3505         rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
3506         if (!attrs[ROCKER_TLV_TX_FRAGS])
3507                 return;
3508         rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
3509                 struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
3510                 dma_addr_t dma_handle;
3511                 size_t len;
3512
3513                 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
3514                         continue;
3515                 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
3516                                         attr);
3517                 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
3518                     !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
3519                         continue;
3520                 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
3521                 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
3522                 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
3523         }
3524 }
3525
3526 static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
3527                                        struct rocker_desc_info *desc_info,
3528                                        char *buf, size_t buf_len)
3529 {
3530         struct rocker *rocker = rocker_port->rocker;
3531         struct pci_dev *pdev = rocker->pdev;
3532         dma_addr_t dma_handle;
3533         struct rocker_tlv *frag;
3534
3535         dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
3536         if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
3537                 if (net_ratelimit())
3538                         netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
3539                 return -EIO;
3540         }
3541         frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
3542         if (!frag)
3543                 goto unmap_frag;
3544         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
3545                                dma_handle))
3546                 goto nest_cancel;
3547         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
3548                                buf_len))
3549                 goto nest_cancel;
3550         rocker_tlv_nest_end(desc_info, frag);
3551         return 0;
3552
3553 nest_cancel:
3554         rocker_tlv_nest_cancel(desc_info, frag);
3555 unmap_frag:
3556         pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
3557         return -EMSGSIZE;
3558 }
3559
3560 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)