linux/dim: Rename externally exposed macros
[muen/linux.git] / drivers / net / ethernet / broadcom / bcmsysport.c
1 /*
2  * Broadcom BCM7xxx System Port Ethernet MAC driver
3  *
4  * Copyright (C) 2014 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
20 #include <linux/of.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
25 #include <net/dsa.h>
26 #include <net/ip.h>
27 #include <net/ipv6.h>
28
29 #include "bcmsysport.h"
30
31 /* I/O accessors register helpers */
32 #define BCM_SYSPORT_IO_MACRO(name, offset) \
33 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)  \
34 {                                                                       \
35         u32 reg = readl_relaxed(priv->base + offset + off);             \
36         return reg;                                                     \
37 }                                                                       \
38 static inline void name##_writel(struct bcm_sysport_priv *priv,         \
39                                   u32 val, u32 off)                     \
40 {                                                                       \
41         writel_relaxed(val, priv->base + offset + off);                 \
42 }                                                                       \
43
44 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
45 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
46 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
47 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
48 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
49 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
50 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
51 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
52 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
53 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
54
55 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
56  * same layout, except it has been moved by 4 bytes up, *sigh*
57  */
58 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
59 {
60         if (priv->is_lite && off >= RDMA_STATUS)
61                 off += 4;
62         return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
63 }
64
65 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
66 {
67         if (priv->is_lite && off >= RDMA_STATUS)
68                 off += 4;
69         writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
70 }
71
72 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
73 {
74         if (!priv->is_lite) {
75                 return BIT(bit);
76         } else {
77                 if (bit >= ACB_ALGO)
78                         return BIT(bit + 1);
79                 else
80                         return BIT(bit);
81         }
82 }
83
84 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
85  * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
86   */
87 #define BCM_SYSPORT_INTR_L2(which)      \
88 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
89                                                 u32 mask)               \
90 {                                                                       \
91         priv->irq##which##_mask &= ~(mask);                             \
92         intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);     \
93 }                                                                       \
94 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
95                                                 u32 mask)               \
96 {                                                                       \
97         intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);      \
98         priv->irq##which##_mask |= (mask);                              \
99 }                                                                       \
100
101 BCM_SYSPORT_INTR_L2(0)
102 BCM_SYSPORT_INTR_L2(1)
103
104 /* Register accesses to GISB/RBUS registers are expensive (few hundred
105  * nanoseconds), so keep the check for 64-bits explicit here to save
106  * one register write per-packet on 32-bits platforms.
107  */
108 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
109                                      void __iomem *d,
110                                      dma_addr_t addr)
111 {
112 #ifdef CONFIG_PHYS_ADDR_T_64BIT
113         writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
114                      d + DESC_ADDR_HI_STATUS_LEN);
115 #endif
116         writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
117 }
118
119 /* Ethtool operations */
120 static void bcm_sysport_set_rx_csum(struct net_device *dev,
121                                     netdev_features_t wanted)
122 {
123         struct bcm_sysport_priv *priv = netdev_priv(dev);
124         u32 reg;
125
126         priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
127         reg = rxchk_readl(priv, RXCHK_CONTROL);
128         /* Clear L2 header checks, which would prevent BPDUs
129          * from being received.
130          */
131         reg &= ~RXCHK_L2_HDR_DIS;
132         if (priv->rx_chk_en)
133                 reg |= RXCHK_EN;
134         else
135                 reg &= ~RXCHK_EN;
136
137         /* If UniMAC forwards CRC, we need to skip over it to get
138          * a valid CHK bit to be set in the per-packet status word
139          */
140         if (priv->rx_chk_en && priv->crc_fwd)
141                 reg |= RXCHK_SKIP_FCS;
142         else
143                 reg &= ~RXCHK_SKIP_FCS;
144
145         /* If Broadcom tags are enabled (e.g: using a switch), make
146          * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
147          * tag after the Ethernet MAC Source Address.
148          */
149         if (netdev_uses_dsa(dev))
150                 reg |= RXCHK_BRCM_TAG_EN;
151         else
152                 reg &= ~RXCHK_BRCM_TAG_EN;
153
154         rxchk_writel(priv, reg, RXCHK_CONTROL);
155 }
156
157 static void bcm_sysport_set_tx_csum(struct net_device *dev,
158                                     netdev_features_t wanted)
159 {
160         struct bcm_sysport_priv *priv = netdev_priv(dev);
161         u32 reg;
162
163         /* Hardware transmit checksum requires us to enable the Transmit status
164          * block prepended to the packet contents
165          */
166         priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
167         reg = tdma_readl(priv, TDMA_CONTROL);
168         if (priv->tsb_en)
169                 reg |= tdma_control_bit(priv, TSB_EN);
170         else
171                 reg &= ~tdma_control_bit(priv, TSB_EN);
172         tdma_writel(priv, reg, TDMA_CONTROL);
173 }
174
175 static int bcm_sysport_set_features(struct net_device *dev,
176                                     netdev_features_t features)
177 {
178         struct bcm_sysport_priv *priv = netdev_priv(dev);
179
180         /* Read CRC forward */
181         if (!priv->is_lite)
182                 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
183         else
184                 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
185                                   GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
186
187         bcm_sysport_set_rx_csum(dev, features);
188         bcm_sysport_set_tx_csum(dev, features);
189
190         return 0;
191 }
192
193 /* Hardware counters must be kept in sync because the order/offset
194  * is important here (order in structure declaration = order in hardware)
195  */
196 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
197         /* general stats */
198         STAT_NETDEV64(rx_packets),
199         STAT_NETDEV64(tx_packets),
200         STAT_NETDEV64(rx_bytes),
201         STAT_NETDEV64(tx_bytes),
202         STAT_NETDEV(rx_errors),
203         STAT_NETDEV(tx_errors),
204         STAT_NETDEV(rx_dropped),
205         STAT_NETDEV(tx_dropped),
206         STAT_NETDEV(multicast),
207         /* UniMAC RSV counters */
208         STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
209         STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
210         STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
211         STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
212         STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
213         STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
214         STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
215         STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
216         STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
217         STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
218         STAT_MIB_RX("rx_pkts", mib.rx.pkt),
219         STAT_MIB_RX("rx_bytes", mib.rx.bytes),
220         STAT_MIB_RX("rx_multicast", mib.rx.mca),
221         STAT_MIB_RX("rx_broadcast", mib.rx.bca),
222         STAT_MIB_RX("rx_fcs", mib.rx.fcs),
223         STAT_MIB_RX("rx_control", mib.rx.cf),
224         STAT_MIB_RX("rx_pause", mib.rx.pf),
225         STAT_MIB_RX("rx_unknown", mib.rx.uo),
226         STAT_MIB_RX("rx_align", mib.rx.aln),
227         STAT_MIB_RX("rx_outrange", mib.rx.flr),
228         STAT_MIB_RX("rx_code", mib.rx.cde),
229         STAT_MIB_RX("rx_carrier", mib.rx.fcr),
230         STAT_MIB_RX("rx_oversize", mib.rx.ovr),
231         STAT_MIB_RX("rx_jabber", mib.rx.jbr),
232         STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
233         STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
234         STAT_MIB_RX("rx_unicast", mib.rx.uc),
235         STAT_MIB_RX("rx_ppp", mib.rx.ppp),
236         STAT_MIB_RX("rx_crc", mib.rx.rcrc),
237         /* UniMAC TSV counters */
238         STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
239         STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
240         STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
241         STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
242         STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
243         STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
244         STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
245         STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
246         STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
247         STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
248         STAT_MIB_TX("tx_pkts", mib.tx.pkts),
249         STAT_MIB_TX("tx_multicast", mib.tx.mca),
250         STAT_MIB_TX("tx_broadcast", mib.tx.bca),
251         STAT_MIB_TX("tx_pause", mib.tx.pf),
252         STAT_MIB_TX("tx_control", mib.tx.cf),
253         STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
254         STAT_MIB_TX("tx_oversize", mib.tx.ovr),
255         STAT_MIB_TX("tx_defer", mib.tx.drf),
256         STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
257         STAT_MIB_TX("tx_single_col", mib.tx.scl),
258         STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
259         STAT_MIB_TX("tx_late_col", mib.tx.lcl),
260         STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
261         STAT_MIB_TX("tx_frags", mib.tx.frg),
262         STAT_MIB_TX("tx_total_col", mib.tx.ncl),
263         STAT_MIB_TX("tx_jabber", mib.tx.jbr),
264         STAT_MIB_TX("tx_bytes", mib.tx.bytes),
265         STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
266         STAT_MIB_TX("tx_unicast", mib.tx.uc),
267         /* UniMAC RUNT counters */
268         STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
269         STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
270         STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
271         STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
272         /* RXCHK misc statistics */
273         STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
274         STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
275                    RXCHK_OTHER_DISC_CNTR),
276         /* RBUF misc statistics */
277         STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
278         STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
279         STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
280         STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
281         STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
282         STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
283         STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
284         /* Per TX-queue statistics are dynamically appended */
285 };
286
287 #define BCM_SYSPORT_STATS_LEN   ARRAY_SIZE(bcm_sysport_gstrings_stats)
288
289 static void bcm_sysport_get_drvinfo(struct net_device *dev,
290                                     struct ethtool_drvinfo *info)
291 {
292         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
293         strlcpy(info->version, "0.1", sizeof(info->version));
294         strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
295 }
296
297 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
298 {
299         struct bcm_sysport_priv *priv = netdev_priv(dev);
300
301         return priv->msg_enable;
302 }
303
304 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
305 {
306         struct bcm_sysport_priv *priv = netdev_priv(dev);
307
308         priv->msg_enable = enable;
309 }
310
311 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
312 {
313         switch (type) {
314         case BCM_SYSPORT_STAT_NETDEV:
315         case BCM_SYSPORT_STAT_NETDEV64:
316         case BCM_SYSPORT_STAT_RXCHK:
317         case BCM_SYSPORT_STAT_RBUF:
318         case BCM_SYSPORT_STAT_SOFT:
319                 return true;
320         default:
321                 return false;
322         }
323 }
324
325 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
326 {
327         struct bcm_sysport_priv *priv = netdev_priv(dev);
328         const struct bcm_sysport_stats *s;
329         unsigned int i, j;
330
331         switch (string_set) {
332         case ETH_SS_STATS:
333                 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
334                         s = &bcm_sysport_gstrings_stats[i];
335                         if (priv->is_lite &&
336                             !bcm_sysport_lite_stat_valid(s->type))
337                                 continue;
338                         j++;
339                 }
340                 /* Include per-queue statistics */
341                 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
342         default:
343                 return -EOPNOTSUPP;
344         }
345 }
346
347 static void bcm_sysport_get_strings(struct net_device *dev,
348                                     u32 stringset, u8 *data)
349 {
350         struct bcm_sysport_priv *priv = netdev_priv(dev);
351         const struct bcm_sysport_stats *s;
352         char buf[128];
353         int i, j;
354
355         switch (stringset) {
356         case ETH_SS_STATS:
357                 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
358                         s = &bcm_sysport_gstrings_stats[i];
359                         if (priv->is_lite &&
360                             !bcm_sysport_lite_stat_valid(s->type))
361                                 continue;
362
363                         memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
364                                ETH_GSTRING_LEN);
365                         j++;
366                 }
367
368                 for (i = 0; i < dev->num_tx_queues; i++) {
369                         snprintf(buf, sizeof(buf), "txq%d_packets", i);
370                         memcpy(data + j * ETH_GSTRING_LEN, buf,
371                                ETH_GSTRING_LEN);
372                         j++;
373
374                         snprintf(buf, sizeof(buf), "txq%d_bytes", i);
375                         memcpy(data + j * ETH_GSTRING_LEN, buf,
376                                ETH_GSTRING_LEN);
377                         j++;
378                 }
379                 break;
380         default:
381                 break;
382         }
383 }
384
385 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
386 {
387         int i, j = 0;
388
389         for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
390                 const struct bcm_sysport_stats *s;
391                 u8 offset = 0;
392                 u32 val = 0;
393                 char *p;
394
395                 s = &bcm_sysport_gstrings_stats[i];
396                 switch (s->type) {
397                 case BCM_SYSPORT_STAT_NETDEV:
398                 case BCM_SYSPORT_STAT_NETDEV64:
399                 case BCM_SYSPORT_STAT_SOFT:
400                         continue;
401                 case BCM_SYSPORT_STAT_MIB_RX:
402                 case BCM_SYSPORT_STAT_MIB_TX:
403                 case BCM_SYSPORT_STAT_RUNT:
404                         if (priv->is_lite)
405                                 continue;
406
407                         if (s->type != BCM_SYSPORT_STAT_MIB_RX)
408                                 offset = UMAC_MIB_STAT_OFFSET;
409                         val = umac_readl(priv, UMAC_MIB_START + j + offset);
410                         break;
411                 case BCM_SYSPORT_STAT_RXCHK:
412                         val = rxchk_readl(priv, s->reg_offset);
413                         if (val == ~0)
414                                 rxchk_writel(priv, 0, s->reg_offset);
415                         break;
416                 case BCM_SYSPORT_STAT_RBUF:
417                         val = rbuf_readl(priv, s->reg_offset);
418                         if (val == ~0)
419                                 rbuf_writel(priv, 0, s->reg_offset);
420                         break;
421                 }
422
423                 j += s->stat_sizeof;
424                 p = (char *)priv + s->stat_offset;
425                 *(u32 *)p = val;
426         }
427
428         netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
429 }
430
431 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
432                                         u64 *tx_bytes, u64 *tx_packets)
433 {
434         struct bcm_sysport_tx_ring *ring;
435         u64 bytes = 0, packets = 0;
436         unsigned int start;
437         unsigned int q;
438
439         for (q = 0; q < priv->netdev->num_tx_queues; q++) {
440                 ring = &priv->tx_rings[q];
441                 do {
442                         start = u64_stats_fetch_begin_irq(&priv->syncp);
443                         bytes = ring->bytes;
444                         packets = ring->packets;
445                 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
446
447                 *tx_bytes += bytes;
448                 *tx_packets += packets;
449         }
450 }
451
452 static void bcm_sysport_get_stats(struct net_device *dev,
453                                   struct ethtool_stats *stats, u64 *data)
454 {
455         struct bcm_sysport_priv *priv = netdev_priv(dev);
456         struct bcm_sysport_stats64 *stats64 = &priv->stats64;
457         struct u64_stats_sync *syncp = &priv->syncp;
458         struct bcm_sysport_tx_ring *ring;
459         u64 tx_bytes = 0, tx_packets = 0;
460         unsigned int start;
461         int i, j;
462
463         if (netif_running(dev)) {
464                 bcm_sysport_update_mib_counters(priv);
465                 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
466                 stats64->tx_bytes = tx_bytes;
467                 stats64->tx_packets = tx_packets;
468         }
469
470         for (i =  0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
471                 const struct bcm_sysport_stats *s;
472                 char *p;
473
474                 s = &bcm_sysport_gstrings_stats[i];
475                 if (s->type == BCM_SYSPORT_STAT_NETDEV)
476                         p = (char *)&dev->stats;
477                 else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
478                         p = (char *)stats64;
479                 else
480                         p = (char *)priv;
481
482                 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
483                         continue;
484                 p += s->stat_offset;
485
486                 if (s->stat_sizeof == sizeof(u64) &&
487                     s->type == BCM_SYSPORT_STAT_NETDEV64) {
488                         do {
489                                 start = u64_stats_fetch_begin_irq(syncp);
490                                 data[i] = *(u64 *)p;
491                         } while (u64_stats_fetch_retry_irq(syncp, start));
492                 } else
493                         data[i] = *(u32 *)p;
494                 j++;
495         }
496
497         /* For SYSTEMPORT Lite since we have holes in our statistics, j would
498          * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
499          * needs to point to how many total statistics we have minus the
500          * number of per TX queue statistics
501          */
502         j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
503             dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
504
505         for (i = 0; i < dev->num_tx_queues; i++) {
506                 ring = &priv->tx_rings[i];
507                 data[j] = ring->packets;
508                 j++;
509                 data[j] = ring->bytes;
510                 j++;
511         }
512 }
513
514 static void bcm_sysport_get_wol(struct net_device *dev,
515                                 struct ethtool_wolinfo *wol)
516 {
517         struct bcm_sysport_priv *priv = netdev_priv(dev);
518
519         wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
520         wol->wolopts = priv->wolopts;
521
522         if (!(priv->wolopts & WAKE_MAGICSECURE))
523                 return;
524
525         memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
526 }
527
528 static int bcm_sysport_set_wol(struct net_device *dev,
529                                struct ethtool_wolinfo *wol)
530 {
531         struct bcm_sysport_priv *priv = netdev_priv(dev);
532         struct device *kdev = &priv->pdev->dev;
533         u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
534
535         if (!device_can_wakeup(kdev))
536                 return -ENOTSUPP;
537
538         if (wol->wolopts & ~supported)
539                 return -EINVAL;
540
541         if (wol->wolopts & WAKE_MAGICSECURE)
542                 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
543
544         /* Flag the device and relevant IRQ as wakeup capable */
545         if (wol->wolopts) {
546                 device_set_wakeup_enable(kdev, 1);
547                 if (priv->wol_irq_disabled)
548                         enable_irq_wake(priv->wol_irq);
549                 priv->wol_irq_disabled = 0;
550         } else {
551                 device_set_wakeup_enable(kdev, 0);
552                 /* Avoid unbalanced disable_irq_wake calls */
553                 if (!priv->wol_irq_disabled)
554                         disable_irq_wake(priv->wol_irq);
555                 priv->wol_irq_disabled = 1;
556         }
557
558         priv->wolopts = wol->wolopts;
559
560         return 0;
561 }
562
563 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
564                                         u32 usecs, u32 pkts)
565 {
566         u32 reg;
567
568         reg = rdma_readl(priv, RDMA_MBDONE_INTR);
569         reg &= ~(RDMA_INTR_THRESH_MASK |
570                  RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
571         reg |= pkts;
572         reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
573         rdma_writel(priv, reg, RDMA_MBDONE_INTR);
574 }
575
576 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
577                                         struct ethtool_coalesce *ec)
578 {
579         struct bcm_sysport_priv *priv = ring->priv;
580         u32 reg;
581
582         reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
583         reg &= ~(RING_INTR_THRESH_MASK |
584                  RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
585         reg |= ec->tx_max_coalesced_frames;
586         reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
587                             RING_TIMEOUT_SHIFT;
588         tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
589 }
590
591 static int bcm_sysport_get_coalesce(struct net_device *dev,
592                                     struct ethtool_coalesce *ec)
593 {
594         struct bcm_sysport_priv *priv = netdev_priv(dev);
595         u32 reg;
596
597         reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
598
599         ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
600         ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
601
602         reg = rdma_readl(priv, RDMA_MBDONE_INTR);
603
604         ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
605         ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
606         ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
607
608         return 0;
609 }
610
611 static int bcm_sysport_set_coalesce(struct net_device *dev,
612                                     struct ethtool_coalesce *ec)
613 {
614         struct bcm_sysport_priv *priv = netdev_priv(dev);
615         struct net_dim_cq_moder moder;
616         u32 usecs, pkts;
617         unsigned int i;
618
619         /* Base system clock is 125Mhz, DMA timeout is this reference clock
620          * divided by 1024, which yield roughly 8.192 us, our maximum value has
621          * to fit in the RING_TIMEOUT_MASK (16 bits).
622          */
623         if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
624             ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
625             ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
626             ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
627                 return -EINVAL;
628
629         if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
630             (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
631             ec->use_adaptive_tx_coalesce)
632                 return -EINVAL;
633
634         for (i = 0; i < dev->num_tx_queues; i++)
635                 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
636
637         priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
638         priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
639         usecs = priv->rx_coalesce_usecs;
640         pkts = priv->rx_max_coalesced_frames;
641
642         if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
643                 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
644                 usecs = moder.usec;
645                 pkts = moder.pkts;
646         }
647
648         priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
649
650         /* Apply desired coalescing parameters */
651         bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
652
653         return 0;
654 }
655
656 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
657 {
658         dev_consume_skb_any(cb->skb);
659         cb->skb = NULL;
660         dma_unmap_addr_set(cb, dma_addr, 0);
661 }
662
663 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
664                                              struct bcm_sysport_cb *cb)
665 {
666         struct device *kdev = &priv->pdev->dev;
667         struct net_device *ndev = priv->netdev;
668         struct sk_buff *skb, *rx_skb;
669         dma_addr_t mapping;
670
671         /* Allocate a new SKB for a new packet */
672         skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
673         if (!skb) {
674                 priv->mib.alloc_rx_buff_failed++;
675                 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
676                 return NULL;
677         }
678
679         mapping = dma_map_single(kdev, skb->data,
680                                  RX_BUF_LENGTH, DMA_FROM_DEVICE);
681         if (dma_mapping_error(kdev, mapping)) {
682                 priv->mib.rx_dma_failed++;
683                 dev_kfree_skb_any(skb);
684                 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
685                 return NULL;
686         }
687
688         /* Grab the current SKB on the ring */
689         rx_skb = cb->skb;
690         if (likely(rx_skb))
691                 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
692                                  RX_BUF_LENGTH, DMA_FROM_DEVICE);
693
694         /* Put the new SKB on the ring */
695         cb->skb = skb;
696         dma_unmap_addr_set(cb, dma_addr, mapping);
697         dma_desc_set_addr(priv, cb->bd_addr, mapping);
698
699         netif_dbg(priv, rx_status, ndev, "RX refill\n");
700
701         /* Return the current SKB to the caller */
702         return rx_skb;
703 }
704
705 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
706 {
707         struct bcm_sysport_cb *cb;
708         struct sk_buff *skb;
709         unsigned int i;
710
711         for (i = 0; i < priv->num_rx_bds; i++) {
712                 cb = &priv->rx_cbs[i];
713                 skb = bcm_sysport_rx_refill(priv, cb);
714                 if (skb)
715                         dev_kfree_skb(skb);
716                 if (!cb->skb)
717                         return -ENOMEM;
718         }
719
720         return 0;
721 }
722
723 /* Poll the hardware for up to budget packets to process */
724 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
725                                         unsigned int budget)
726 {
727         struct bcm_sysport_stats64 *stats64 = &priv->stats64;
728         struct net_device *ndev = priv->netdev;
729         unsigned int processed = 0, to_process;
730         unsigned int processed_bytes = 0;
731         struct bcm_sysport_cb *cb;
732         struct sk_buff *skb;
733         unsigned int p_index;
734         u16 len, status;
735         struct bcm_rsb *rsb;
736
737         /* Clear status before servicing to reduce spurious interrupts */
738         intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
739
740         /* Determine how much we should process since last call, SYSTEMPORT Lite
741          * groups the producer and consumer indexes into the same 32-bit
742          * which we access using RDMA_CONS_INDEX
743          */
744         if (!priv->is_lite)
745                 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
746         else
747                 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
748         p_index &= RDMA_PROD_INDEX_MASK;
749
750         to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
751
752         netif_dbg(priv, rx_status, ndev,
753                   "p_index=%d rx_c_index=%d to_process=%d\n",
754                   p_index, priv->rx_c_index, to_process);
755
756         while ((processed < to_process) && (processed < budget)) {
757                 cb = &priv->rx_cbs[priv->rx_read_ptr];
758                 skb = bcm_sysport_rx_refill(priv, cb);
759
760
761                 /* We do not have a backing SKB, so we do not a corresponding
762                  * DMA mapping for this incoming packet since
763                  * bcm_sysport_rx_refill always either has both skb and mapping
764                  * or none.
765                  */
766                 if (unlikely(!skb)) {
767                         netif_err(priv, rx_err, ndev, "out of memory!\n");
768                         ndev->stats.rx_dropped++;
769                         ndev->stats.rx_errors++;
770                         goto next;
771                 }
772
773                 /* Extract the Receive Status Block prepended */
774                 rsb = (struct bcm_rsb *)skb->data;
775                 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
776                 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
777                           DESC_STATUS_MASK;
778
779                 netif_dbg(priv, rx_status, ndev,
780                           "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
781                           p_index, priv->rx_c_index, priv->rx_read_ptr,
782                           len, status);
783
784                 if (unlikely(len > RX_BUF_LENGTH)) {
785                         netif_err(priv, rx_status, ndev, "oversized packet\n");
786                         ndev->stats.rx_length_errors++;
787                         ndev->stats.rx_errors++;
788                         dev_kfree_skb_any(skb);
789                         goto next;
790                 }
791
792                 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
793                         netif_err(priv, rx_status, ndev, "fragmented packet!\n");
794                         ndev->stats.rx_dropped++;
795                         ndev->stats.rx_errors++;
796                         dev_kfree_skb_any(skb);
797                         goto next;
798                 }
799
800                 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
801                         netif_err(priv, rx_err, ndev, "error packet\n");
802                         if (status & RX_STATUS_OVFLOW)
803                                 ndev->stats.rx_over_errors++;
804                         ndev->stats.rx_dropped++;
805                         ndev->stats.rx_errors++;
806                         dev_kfree_skb_any(skb);
807                         goto next;
808                 }
809
810                 skb_put(skb, len);
811
812                 /* Hardware validated our checksum */
813                 if (likely(status & DESC_L4_CSUM))
814                         skb->ip_summed = CHECKSUM_UNNECESSARY;
815
816                 /* Hardware pre-pends packets with 2bytes before Ethernet
817                  * header plus we have the Receive Status Block, strip off all
818                  * of this from the SKB.
819                  */
820                 skb_pull(skb, sizeof(*rsb) + 2);
821                 len -= (sizeof(*rsb) + 2);
822                 processed_bytes += len;
823
824                 /* UniMAC may forward CRC */
825                 if (priv->crc_fwd) {
826                         skb_trim(skb, len - ETH_FCS_LEN);
827                         len -= ETH_FCS_LEN;
828                 }
829
830                 skb->protocol = eth_type_trans(skb, ndev);
831                 ndev->stats.rx_packets++;
832                 ndev->stats.rx_bytes += len;
833                 u64_stats_update_begin(&priv->syncp);
834                 stats64->rx_packets++;
835                 stats64->rx_bytes += len;
836                 u64_stats_update_end(&priv->syncp);
837
838                 napi_gro_receive(&priv->napi, skb);
839 next:
840                 processed++;
841                 priv->rx_read_ptr++;
842
843                 if (priv->rx_read_ptr == priv->num_rx_bds)
844                         priv->rx_read_ptr = 0;
845         }
846
847         priv->dim.packets = processed;
848         priv->dim.bytes = processed_bytes;
849
850         return processed;
851 }
852
853 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
854                                        struct bcm_sysport_cb *cb,
855                                        unsigned int *bytes_compl,
856                                        unsigned int *pkts_compl)
857 {
858         struct bcm_sysport_priv *priv = ring->priv;
859         struct device *kdev = &priv->pdev->dev;
860
861         if (cb->skb) {
862                 *bytes_compl += cb->skb->len;
863                 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
864                                  dma_unmap_len(cb, dma_len),
865                                  DMA_TO_DEVICE);
866                 (*pkts_compl)++;
867                 bcm_sysport_free_cb(cb);
868         /* SKB fragment */
869         } else if (dma_unmap_addr(cb, dma_addr)) {
870                 *bytes_compl += dma_unmap_len(cb, dma_len);
871                 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
872                                dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
873                 dma_unmap_addr_set(cb, dma_addr, 0);
874         }
875 }
876
877 /* Reclaim queued SKBs for transmission completion, lockless version */
878 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
879                                              struct bcm_sysport_tx_ring *ring)
880 {
881         unsigned int pkts_compl = 0, bytes_compl = 0;
882         struct net_device *ndev = priv->netdev;
883         unsigned int txbds_processed = 0;
884         struct bcm_sysport_cb *cb;
885         unsigned int txbds_ready;
886         unsigned int c_index;
887         u32 hw_ind;
888
889         /* Clear status before servicing to reduce spurious interrupts */
890         if (!ring->priv->is_lite)
891                 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
892         else
893                 intrl2_0_writel(ring->priv, BIT(ring->index +
894                                 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
895
896         /* Compute how many descriptors have been processed since last call */
897         hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
898         c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
899         txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
900
901         netif_dbg(priv, tx_done, ndev,
902                   "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
903                   ring->index, ring->c_index, c_index, txbds_ready);
904
905         while (txbds_processed < txbds_ready) {
906                 cb = &ring->cbs[ring->clean_index];
907                 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
908
909                 ring->desc_count++;
910                 txbds_processed++;
911
912                 if (likely(ring->clean_index < ring->size - 1))
913                         ring->clean_index++;
914                 else
915                         ring->clean_index = 0;
916         }
917
918         u64_stats_update_begin(&priv->syncp);
919         ring->packets += pkts_compl;
920         ring->bytes += bytes_compl;
921         u64_stats_update_end(&priv->syncp);
922
923         ring->c_index = c_index;
924
925         netif_dbg(priv, tx_done, ndev,
926                   "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
927                   ring->index, ring->c_index, pkts_compl, bytes_compl);
928
929         return pkts_compl;
930 }
931
932 /* Locked version of the per-ring TX reclaim routine */
933 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
934                                            struct bcm_sysport_tx_ring *ring)
935 {
936         struct netdev_queue *txq;
937         unsigned int released;
938         unsigned long flags;
939
940         txq = netdev_get_tx_queue(priv->netdev, ring->index);
941
942         spin_lock_irqsave(&ring->lock, flags);
943         released = __bcm_sysport_tx_reclaim(priv, ring);
944         if (released)
945                 netif_tx_wake_queue(txq);
946
947         spin_unlock_irqrestore(&ring->lock, flags);
948
949         return released;
950 }
951
952 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
953 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
954                                  struct bcm_sysport_tx_ring *ring)
955 {
956         unsigned long flags;
957
958         spin_lock_irqsave(&ring->lock, flags);
959         __bcm_sysport_tx_reclaim(priv, ring);
960         spin_unlock_irqrestore(&ring->lock, flags);
961 }
962
963 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
964 {
965         struct bcm_sysport_tx_ring *ring =
966                 container_of(napi, struct bcm_sysport_tx_ring, napi);
967         unsigned int work_done = 0;
968
969         work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
970
971         if (work_done == 0) {
972                 napi_complete(napi);
973                 /* re-enable TX interrupt */
974                 if (!ring->priv->is_lite)
975                         intrl2_1_mask_clear(ring->priv, BIT(ring->index));
976                 else
977                         intrl2_0_mask_clear(ring->priv, BIT(ring->index +
978                                             INTRL2_0_TDMA_MBDONE_SHIFT));
979
980                 return 0;
981         }
982
983         return budget;
984 }
985
986 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
987 {
988         unsigned int q;
989
990         for (q = 0; q < priv->netdev->num_tx_queues; q++)
991                 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
992 }
993
994 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
995 {
996         struct bcm_sysport_priv *priv =
997                 container_of(napi, struct bcm_sysport_priv, napi);
998         struct net_dim_sample dim_sample;
999         unsigned int work_done = 0;
1000
1001         work_done = bcm_sysport_desc_rx(priv, budget);
1002
1003         priv->rx_c_index += work_done;
1004         priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
1005
1006         /* SYSTEMPORT Lite groups the producer/consumer index, producer is
1007          * maintained by HW, but writes to it will be ignore while RDMA
1008          * is active
1009          */
1010         if (!priv->is_lite)
1011                 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
1012         else
1013                 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
1014
1015         if (work_done < budget) {
1016                 napi_complete_done(napi, work_done);
1017                 /* re-enable RX interrupts */
1018                 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
1019         }
1020
1021         if (priv->dim.use_dim) {
1022                 net_dim_sample(priv->dim.event_ctr, priv->dim.packets,
1023                                priv->dim.bytes, &dim_sample);
1024                 net_dim(&priv->dim.dim, dim_sample);
1025         }
1026
1027         return work_done;
1028 }
1029
1030 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
1031 {
1032         u32 reg, bit;
1033
1034         reg = umac_readl(priv, UMAC_MPD_CTRL);
1035         if (enable)
1036                 reg |= MPD_EN;
1037         else
1038                 reg &= ~MPD_EN;
1039         umac_writel(priv, reg, UMAC_MPD_CTRL);
1040
1041         if (priv->is_lite)
1042                 bit = RBUF_ACPI_EN_LITE;
1043         else
1044                 bit = RBUF_ACPI_EN;
1045
1046         reg = rbuf_readl(priv, RBUF_CONTROL);
1047         if (enable)
1048                 reg |= bit;
1049         else
1050                 reg &= ~bit;
1051         rbuf_writel(priv, reg, RBUF_CONTROL);
1052 }
1053
1054 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1055 {
1056         unsigned int index;
1057         u32 reg;
1058
1059         /* Disable RXCHK, active filters and Broadcom tag matching */
1060         reg = rxchk_readl(priv, RXCHK_CONTROL);
1061         reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
1062                  RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
1063         rxchk_writel(priv, reg, RXCHK_CONTROL);
1064
1065         /* Make sure we restore correct CID index in case HW lost
1066          * its context during deep idle state
1067          */
1068         for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
1069                 rxchk_writel(priv, priv->filters_loc[index] <<
1070                              RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
1071                 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
1072         }
1073
1074         /* Clear the MagicPacket detection logic */
1075         mpd_enable_set(priv, false);
1076
1077         reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1078         if (reg & INTRL2_0_MPD)
1079                 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1080
1081         if (reg & INTRL2_0_BRCM_MATCH_TAG) {
1082                 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1083                                   RXCHK_BRCM_TAG_MATCH_MASK;
1084                 netdev_info(priv->netdev,
1085                             "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1086         }
1087
1088         netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1089 }
1090
1091 static void bcm_sysport_dim_work(struct work_struct *work)
1092 {
1093         struct net_dim *dim = container_of(work, struct net_dim, work);
1094         struct bcm_sysport_net_dim *ndim =
1095                         container_of(dim, struct bcm_sysport_net_dim, dim);
1096         struct bcm_sysport_priv *priv =
1097                         container_of(ndim, struct bcm_sysport_priv, dim);
1098         struct net_dim_cq_moder cur_profile =
1099                         net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1100
1101         bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1102         dim->state = DIM_START_MEASURE;
1103 }
1104
1105 /* RX and misc interrupt routine */
1106 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1107 {
1108         struct net_device *dev = dev_id;
1109         struct bcm_sysport_priv *priv = netdev_priv(dev);
1110         struct bcm_sysport_tx_ring *txr;
1111         unsigned int ring, ring_bit;
1112
1113         priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1114                           ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1115         intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1116
1117         if (unlikely(priv->irq0_stat == 0)) {
1118                 netdev_warn(priv->netdev, "spurious RX interrupt\n");
1119                 return IRQ_NONE;
1120         }
1121
1122         if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1123                 priv->dim.event_ctr++;
1124                 if (likely(napi_schedule_prep(&priv->napi))) {
1125                         /* disable RX interrupts */
1126                         intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1127                         __napi_schedule_irqoff(&priv->napi);
1128                 }
1129         }
1130
1131         /* TX ring is full, perform a full reclaim since we do not know
1132          * which one would trigger this interrupt
1133          */
1134         if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1135                 bcm_sysport_tx_reclaim_all(priv);
1136
1137         if (!priv->is_lite)
1138                 goto out;
1139
1140         for (ring = 0; ring < dev->num_tx_queues; ring++) {
1141                 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1142                 if (!(priv->irq0_stat & ring_bit))
1143                         continue;
1144
1145                 txr = &priv->tx_rings[ring];
1146
1147                 if (likely(napi_schedule_prep(&txr->napi))) {
1148                         intrl2_0_mask_set(priv, ring_bit);
1149                         __napi_schedule(&txr->napi);
1150                 }
1151         }
1152 out:
1153         return IRQ_HANDLED;
1154 }
1155
1156 /* TX interrupt service routine */
1157 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1158 {
1159         struct net_device *dev = dev_id;
1160         struct bcm_sysport_priv *priv = netdev_priv(dev);
1161         struct bcm_sysport_tx_ring *txr;
1162         unsigned int ring;
1163
1164         priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1165                                 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1166         intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1167
1168         if (unlikely(priv->irq1_stat == 0)) {
1169                 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1170                 return IRQ_NONE;
1171         }
1172
1173         for (ring = 0; ring < dev->num_tx_queues; ring++) {
1174                 if (!(priv->irq1_stat & BIT(ring)))
1175                         continue;
1176
1177                 txr = &priv->tx_rings[ring];
1178
1179                 if (likely(napi_schedule_prep(&txr->napi))) {
1180                         intrl2_1_mask_set(priv, BIT(ring));
1181                         __napi_schedule_irqoff(&txr->napi);
1182                 }
1183         }
1184
1185         return IRQ_HANDLED;
1186 }
1187
1188 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1189 {
1190         struct bcm_sysport_priv *priv = dev_id;
1191
1192         pm_wakeup_event(&priv->pdev->dev, 0);
1193
1194         return IRQ_HANDLED;
1195 }
1196
1197 #ifdef CONFIG_NET_POLL_CONTROLLER
1198 static void bcm_sysport_poll_controller(struct net_device *dev)
1199 {
1200         struct bcm_sysport_priv *priv = netdev_priv(dev);
1201
1202         disable_irq(priv->irq0);
1203         bcm_sysport_rx_isr(priv->irq0, priv);
1204         enable_irq(priv->irq0);
1205
1206         if (!priv->is_lite) {
1207                 disable_irq(priv->irq1);
1208                 bcm_sysport_tx_isr(priv->irq1, priv);
1209                 enable_irq(priv->irq1);
1210         }
1211 }
1212 #endif
1213
1214 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1215                                               struct net_device *dev)
1216 {
1217         struct bcm_sysport_priv *priv = netdev_priv(dev);
1218         struct sk_buff *nskb;
1219         struct bcm_tsb *tsb;
1220         u32 csum_info;
1221         u8 ip_proto;
1222         u16 csum_start;
1223         __be16 ip_ver;
1224
1225         /* Re-allocate SKB if needed */
1226         if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1227                 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1228                 if (!nskb) {
1229                         dev_kfree_skb_any(skb);
1230                         priv->mib.tx_realloc_tsb_failed++;
1231                         dev->stats.tx_errors++;
1232                         dev->stats.tx_dropped++;
1233                         return NULL;
1234                 }
1235                 dev_consume_skb_any(skb);
1236                 skb = nskb;
1237                 priv->mib.tx_realloc_tsb++;
1238         }
1239
1240         tsb = skb_push(skb, sizeof(*tsb));
1241         /* Zero-out TSB by default */
1242         memset(tsb, 0, sizeof(*tsb));
1243
1244         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1245                 ip_ver = skb->protocol;
1246                 switch (ip_ver) {
1247                 case htons(ETH_P_IP):
1248                         ip_proto = ip_hdr(skb)->protocol;
1249                         break;
1250                 case htons(ETH_P_IPV6):
1251                         ip_proto = ipv6_hdr(skb)->nexthdr;
1252                         break;
1253                 default:
1254                         return skb;
1255                 }
1256
1257                 /* Get the checksum offset and the L4 (transport) offset */
1258                 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1259                 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1260                 csum_info |= (csum_start << L4_PTR_SHIFT);
1261
1262                 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1263                         csum_info |= L4_LENGTH_VALID;
1264                         if (ip_proto == IPPROTO_UDP &&
1265                             ip_ver == htons(ETH_P_IP))
1266                                 csum_info |= L4_UDP;
1267                 } else {
1268                         csum_info = 0;
1269                 }
1270
1271                 tsb->l4_ptr_dest_map = csum_info;
1272         }
1273
1274         return skb;
1275 }
1276
1277 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1278                                     struct net_device *dev)
1279 {
1280         struct bcm_sysport_priv *priv = netdev_priv(dev);
1281         struct device *kdev = &priv->pdev->dev;
1282         struct bcm_sysport_tx_ring *ring;
1283         struct bcm_sysport_cb *cb;
1284         struct netdev_queue *txq;
1285         u32 len_status, addr_lo;
1286         unsigned int skb_len;
1287         unsigned long flags;
1288         dma_addr_t mapping;
1289         u16 queue;
1290         int ret;
1291
1292         queue = skb_get_queue_mapping(skb);
1293         txq = netdev_get_tx_queue(dev, queue);
1294         ring = &priv->tx_rings[queue];
1295
1296         /* lock against tx reclaim in BH context and TX ring full interrupt */
1297         spin_lock_irqsave(&ring->lock, flags);
1298         if (unlikely(ring->desc_count == 0)) {
1299                 netif_tx_stop_queue(txq);
1300                 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1301                 ret = NETDEV_TX_BUSY;
1302                 goto out;
1303         }
1304
1305         /* Insert TSB and checksum infos */
1306         if (priv->tsb_en) {
1307                 skb = bcm_sysport_insert_tsb(skb, dev);
1308                 if (!skb) {
1309                         ret = NETDEV_TX_OK;
1310                         goto out;
1311                 }
1312         }
1313
1314         skb_len = skb->len;
1315
1316         mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1317         if (dma_mapping_error(kdev, mapping)) {
1318                 priv->mib.tx_dma_failed++;
1319                 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1320                           skb->data, skb_len);
1321                 ret = NETDEV_TX_OK;
1322                 goto out;
1323         }
1324
1325         /* Remember the SKB for future freeing */
1326         cb = &ring->cbs[ring->curr_desc];
1327         cb->skb = skb;
1328         dma_unmap_addr_set(cb, dma_addr, mapping);
1329         dma_unmap_len_set(cb, dma_len, skb_len);
1330
1331         addr_lo = lower_32_bits(mapping);
1332         len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1333         len_status |= (skb_len << DESC_LEN_SHIFT);
1334         len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1335                        DESC_STATUS_SHIFT;
1336         if (skb->ip_summed == CHECKSUM_PARTIAL)
1337                 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1338
1339         ring->curr_desc++;
1340         if (ring->curr_desc == ring->size)
1341                 ring->curr_desc = 0;
1342         ring->desc_count--;
1343
1344         /* Ports are latched, so write upper address first */
1345         tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
1346         tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
1347
1348         /* Check ring space and update SW control flow */
1349         if (ring->desc_count == 0)
1350                 netif_tx_stop_queue(txq);
1351
1352         netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1353                   ring->index, ring->desc_count, ring->curr_desc);
1354
1355         ret = NETDEV_TX_OK;
1356 out:
1357         spin_unlock_irqrestore(&ring->lock, flags);
1358         return ret;
1359 }
1360
1361 static void bcm_sysport_tx_timeout(struct net_device *dev)
1362 {
1363         netdev_warn(dev, "transmit timeout!\n");
1364
1365         netif_trans_update(dev);
1366         dev->stats.tx_errors++;
1367
1368         netif_tx_wake_all_queues(dev);
1369 }
1370
1371 /* phylib adjust link callback */
1372 static void bcm_sysport_adj_link(struct net_device *dev)
1373 {
1374         struct bcm_sysport_priv *priv = netdev_priv(dev);
1375         struct phy_device *phydev = dev->phydev;
1376         unsigned int changed = 0;
1377         u32 cmd_bits = 0, reg;
1378
1379         if (priv->old_link != phydev->link) {
1380                 changed = 1;
1381                 priv->old_link = phydev->link;
1382         }
1383
1384         if (priv->old_duplex != phydev->duplex) {
1385                 changed = 1;
1386                 priv->old_duplex = phydev->duplex;
1387         }
1388
1389         if (priv->is_lite)
1390                 goto out;
1391
1392         switch (phydev->speed) {
1393         case SPEED_2500:
1394                 cmd_bits = CMD_SPEED_2500;
1395                 break;
1396         case SPEED_1000:
1397                 cmd_bits = CMD_SPEED_1000;
1398                 break;
1399         case SPEED_100:
1400                 cmd_bits = CMD_SPEED_100;
1401                 break;
1402         case SPEED_10:
1403                 cmd_bits = CMD_SPEED_10;
1404                 break;
1405         default:
1406                 break;
1407         }
1408         cmd_bits <<= CMD_SPEED_SHIFT;
1409
1410         if (phydev->duplex == DUPLEX_HALF)
1411                 cmd_bits |= CMD_HD_EN;
1412
1413         if (priv->old_pause != phydev->pause) {
1414                 changed = 1;
1415                 priv->old_pause = phydev->pause;
1416         }
1417
1418         if (!phydev->pause)
1419                 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1420
1421         if (!changed)
1422                 return;
1423
1424         if (phydev->link) {
1425                 reg = umac_readl(priv, UMAC_CMD);
1426                 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1427                         CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1428                         CMD_TX_PAUSE_IGNORE);
1429                 reg |= cmd_bits;
1430                 umac_writel(priv, reg, UMAC_CMD);
1431         }
1432 out:
1433         if (changed)
1434                 phy_print_status(phydev);
1435 }
1436
1437 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
1438                                  void (*cb)(struct work_struct *work))
1439 {
1440         struct bcm_sysport_net_dim *dim = &priv->dim;
1441
1442         INIT_WORK(&dim->dim.work, cb);
1443         dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1444         dim->event_ctr = 0;
1445         dim->packets = 0;
1446         dim->bytes = 0;
1447 }
1448
1449 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
1450 {
1451         struct bcm_sysport_net_dim *dim = &priv->dim;
1452         struct net_dim_cq_moder moder;
1453         u32 usecs, pkts;
1454
1455         usecs = priv->rx_coalesce_usecs;
1456         pkts = priv->rx_max_coalesced_frames;
1457
1458         /* If DIM was enabled, re-apply default parameters */
1459         if (dim->use_dim) {
1460                 moder = net_dim_get_def_rx_moderation(dim->dim.mode);
1461                 usecs = moder.usec;
1462                 pkts = moder.pkts;
1463         }
1464
1465         bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
1466 }
1467
1468 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1469                                     unsigned int index)
1470 {
1471         struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1472         size_t size;
1473         u32 reg;
1474
1475         /* Simple descriptors partitioning for now */
1476         size = 256;
1477
1478         ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1479         if (!ring->cbs) {
1480                 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1481                 return -ENOMEM;
1482         }
1483
1484         /* Initialize SW view of the ring */
1485         spin_lock_init(&ring->lock);
1486         ring->priv = priv;
1487         netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1488         ring->index = index;
1489         ring->size = size;
1490         ring->clean_index = 0;
1491         ring->alloc_size = ring->size;
1492         ring->desc_count = ring->size;
1493         ring->curr_desc = 0;
1494
1495         /* Initialize HW ring */
1496         tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1497         tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1498         tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1499         tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1500
1501         /* Configure QID and port mapping */
1502         reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1503         reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
1504         if (ring->inspect) {
1505                 reg |= ring->switch_queue & RING_QID_MASK;
1506                 reg |= ring->switch_port << RING_PORT_ID_SHIFT;
1507         } else {
1508                 reg |= RING_IGNORE_STATUS;
1509         }
1510         tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1511         tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1512
1513         /* Enable ACB algorithm 2 */
1514         reg = tdma_readl(priv, TDMA_CONTROL);
1515         reg |= tdma_control_bit(priv, ACB_ALGO);
1516         tdma_writel(priv, reg, TDMA_CONTROL);
1517
1518         /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1519          * with the original definition of ACB_ALGO
1520          */
1521         reg = tdma_readl(priv, TDMA_CONTROL);
1522         if (priv->is_lite)
1523                 reg &= ~BIT(TSB_SWAP1);
1524         /* Set a correct TSB format based on host endian */
1525         if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1526                 reg |= tdma_control_bit(priv, TSB_SWAP0);
1527         else
1528                 reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1529         tdma_writel(priv, reg, TDMA_CONTROL);
1530
1531         /* Program the number of descriptors as MAX_THRESHOLD and half of
1532          * its size for the hysteresis trigger
1533          */
1534         tdma_writel(priv, ring->size |
1535                         1 << RING_HYST_THRESH_SHIFT,
1536                         TDMA_DESC_RING_MAX_HYST(index));
1537
1538         /* Enable the ring queue in the arbiter */
1539         reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1540         reg |= (1 << index);
1541         tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1542
1543         napi_enable(&ring->napi);
1544
1545         netif_dbg(priv, hw, priv->netdev,
1546                   "TDMA cfg, size=%d, switch q=%d,port=%d\n",
1547                   ring->size, ring->switch_queue,
1548                   ring->switch_port);
1549
1550         return 0;
1551 }
1552
1553 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1554                                      unsigned int index)
1555 {
1556         struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1557         u32 reg;
1558
1559         /* Caller should stop the TDMA engine */
1560         reg = tdma_readl(priv, TDMA_STATUS);
1561         if (!(reg & TDMA_DISABLED))
1562                 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1563
1564         /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1565          * fail, so by checking this pointer we know whether the TX ring was
1566          * fully initialized or not.
1567          */
1568         if (!ring->cbs)
1569                 return;
1570
1571         napi_disable(&ring->napi);
1572         netif_napi_del(&ring->napi);
1573
1574         bcm_sysport_tx_clean(priv, ring);
1575
1576         kfree(ring->cbs);
1577         ring->cbs = NULL;
1578         ring->size = 0;
1579         ring->alloc_size = 0;
1580
1581         netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1582 }
1583
1584 /* RDMA helper */
1585 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1586                                   unsigned int enable)
1587 {
1588         unsigned int timeout = 1000;
1589         u32 reg;
1590
1591         reg = rdma_readl(priv, RDMA_CONTROL);
1592         if (enable)
1593                 reg |= RDMA_EN;
1594         else
1595                 reg &= ~RDMA_EN;
1596         rdma_writel(priv, reg, RDMA_CONTROL);
1597
1598         /* Poll for RMDA disabling completion */
1599         do {
1600                 reg = rdma_readl(priv, RDMA_STATUS);
1601                 if (!!(reg & RDMA_DISABLED) == !enable)
1602                         return 0;
1603                 usleep_range(1000, 2000);
1604         } while (timeout-- > 0);
1605
1606         netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1607
1608         return -ETIMEDOUT;
1609 }
1610
1611 /* TDMA helper */
1612 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1613                                   unsigned int enable)
1614 {
1615         unsigned int timeout = 1000;
1616         u32 reg;
1617
1618         reg = tdma_readl(priv, TDMA_CONTROL);
1619         if (enable)
1620                 reg |= tdma_control_bit(priv, TDMA_EN);
1621         else
1622                 reg &= ~tdma_control_bit(priv, TDMA_EN);
1623         tdma_writel(priv, reg, TDMA_CONTROL);
1624
1625         /* Poll for TMDA disabling completion */
1626         do {
1627                 reg = tdma_readl(priv, TDMA_STATUS);
1628                 if (!!(reg & TDMA_DISABLED) == !enable)
1629                         return 0;
1630
1631                 usleep_range(1000, 2000);
1632         } while (timeout-- > 0);
1633
1634         netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1635
1636         return -ETIMEDOUT;
1637 }
1638
1639 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1640 {
1641         struct bcm_sysport_cb *cb;
1642         u32 reg;
1643         int ret;
1644         int i;
1645
1646         /* Initialize SW view of the RX ring */
1647         priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1648         priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1649         priv->rx_c_index = 0;
1650         priv->rx_read_ptr = 0;
1651         priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1652                                 GFP_KERNEL);
1653         if (!priv->rx_cbs) {
1654                 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1655                 return -ENOMEM;
1656         }
1657
1658         for (i = 0; i < priv->num_rx_bds; i++) {
1659                 cb = priv->rx_cbs + i;
1660                 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1661         }
1662
1663         ret = bcm_sysport_alloc_rx_bufs(priv);
1664         if (ret) {
1665                 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1666                 return ret;
1667         }
1668
1669         /* Initialize HW, ensure RDMA is disabled */
1670         reg = rdma_readl(priv, RDMA_STATUS);
1671         if (!(reg & RDMA_DISABLED))
1672                 rdma_enable_set(priv, 0);
1673
1674         rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1675         rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1676         rdma_writel(priv, 0, RDMA_PROD_INDEX);
1677         rdma_writel(priv, 0, RDMA_CONS_INDEX);
1678         rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1679                           RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1680         /* Operate the queue in ring mode */
1681         rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1682         rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1683         rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1684         rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1685
1686         netif_dbg(priv, hw, priv->netdev,
1687                   "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1688                   priv->num_rx_bds, priv->rx_bds);
1689
1690         return 0;
1691 }
1692
1693 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1694 {
1695         struct bcm_sysport_cb *cb;
1696         unsigned int i;
1697         u32 reg;
1698
1699         /* Caller should ensure RDMA is disabled */
1700         reg = rdma_readl(priv, RDMA_STATUS);
1701         if (!(reg & RDMA_DISABLED))
1702                 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1703
1704         for (i = 0; i < priv->num_rx_bds; i++) {
1705                 cb = &priv->rx_cbs[i];
1706                 if (dma_unmap_addr(cb, dma_addr))
1707                         dma_unmap_single(&priv->pdev->dev,
1708                                          dma_unmap_addr(cb, dma_addr),
1709                                          RX_BUF_LENGTH, DMA_FROM_DEVICE);
1710                 bcm_sysport_free_cb(cb);
1711         }
1712
1713         kfree(priv->rx_cbs);
1714         priv->rx_cbs = NULL;
1715
1716         netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1717 }
1718
1719 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1720 {
1721         struct bcm_sysport_priv *priv = netdev_priv(dev);
1722         u32 reg;
1723
1724         if (priv->is_lite)
1725                 return;
1726
1727         reg = umac_readl(priv, UMAC_CMD);
1728         if (dev->flags & IFF_PROMISC)
1729                 reg |= CMD_PROMISC;
1730         else
1731                 reg &= ~CMD_PROMISC;
1732         umac_writel(priv, reg, UMAC_CMD);
1733
1734         /* No support for ALLMULTI */
1735         if (dev->flags & IFF_ALLMULTI)
1736                 return;
1737 }
1738
1739 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1740                                    u32 mask, unsigned int enable)
1741 {
1742         u32 reg;
1743
1744         if (!priv->is_lite) {
1745                 reg = umac_readl(priv, UMAC_CMD);
1746                 if (enable)
1747                         reg |= mask;
1748                 else
1749                         reg &= ~mask;
1750                 umac_writel(priv, reg, UMAC_CMD);
1751         } else {
1752                 reg = gib_readl(priv, GIB_CONTROL);
1753                 if (enable)
1754                         reg |= mask;
1755                 else
1756                         reg &= ~mask;
1757                 gib_writel(priv, reg, GIB_CONTROL);
1758         }
1759
1760         /* UniMAC stops on a packet boundary, wait for a full-sized packet
1761          * to be processed (1 msec).
1762          */
1763         if (enable == 0)
1764                 usleep_range(1000, 2000);
1765 }
1766
1767 static inline void umac_reset(struct bcm_sysport_priv *priv)
1768 {
1769         u32 reg;
1770
1771         if (priv->is_lite)
1772                 return;
1773
1774         reg = umac_readl(priv, UMAC_CMD);
1775         reg |= CMD_SW_RESET;
1776         umac_writel(priv, reg, UMAC_CMD);
1777         udelay(10);
1778         reg = umac_readl(priv, UMAC_CMD);
1779         reg &= ~CMD_SW_RESET;
1780         umac_writel(priv, reg, UMAC_CMD);
1781 }
1782
1783 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1784                              unsigned char *addr)
1785 {
1786         u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1787                     addr[3];
1788         u32 mac1 = (addr[4] << 8) | addr[5];
1789
1790         if (!priv->is_lite) {
1791                 umac_writel(priv, mac0, UMAC_MAC0);
1792                 umac_writel(priv, mac1, UMAC_MAC1);
1793         } else {
1794                 gib_writel(priv, mac0, GIB_MAC0);
1795                 gib_writel(priv, mac1, GIB_MAC1);
1796         }
1797 }
1798
1799 static void topctrl_flush(struct bcm_sysport_priv *priv)
1800 {
1801         topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1802         topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1803         mdelay(1);
1804         topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1805         topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1806 }
1807
1808 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1809 {
1810         struct bcm_sysport_priv *priv = netdev_priv(dev);
1811         struct sockaddr *addr = p;
1812
1813         if (!is_valid_ether_addr(addr->sa_data))
1814                 return -EINVAL;
1815
1816         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1817
1818         /* interface is disabled, changes to MAC will be reflected on next
1819          * open call
1820          */
1821         if (!netif_running(dev))
1822                 return 0;
1823
1824         umac_set_hw_addr(priv, dev->dev_addr);
1825
1826         return 0;
1827 }
1828
1829 static void bcm_sysport_get_stats64(struct net_device *dev,
1830                                     struct rtnl_link_stats64 *stats)
1831 {
1832         struct bcm_sysport_priv *priv = netdev_priv(dev);
1833         struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1834         unsigned int start;
1835
1836         netdev_stats_to_stats64(stats, &dev->stats);
1837
1838         bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1839                                     &stats->tx_packets);
1840
1841         do {
1842                 start = u64_stats_fetch_begin_irq(&priv->syncp);
1843                 stats->rx_packets = stats64->rx_packets;
1844                 stats->rx_bytes = stats64->rx_bytes;
1845         } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1846 }
1847
1848 static void bcm_sysport_netif_start(struct net_device *dev)
1849 {
1850         struct bcm_sysport_priv *priv = netdev_priv(dev);
1851
1852         /* Enable NAPI */
1853         bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
1854         bcm_sysport_init_rx_coalesce(priv);
1855         napi_enable(&priv->napi);
1856
1857         /* Enable RX interrupt and TX ring full interrupt */
1858         intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1859
1860         phy_start(dev->phydev);
1861
1862         /* Enable TX interrupts for the TXQs */
1863         if (!priv->is_lite)
1864                 intrl2_1_mask_clear(priv, 0xffffffff);
1865         else
1866                 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1867 }
1868
1869 static void rbuf_init(struct bcm_sysport_priv *priv)
1870 {
1871         u32 reg;
1872
1873         reg = rbuf_readl(priv, RBUF_CONTROL);
1874         reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1875         /* Set a correct RSB format on SYSTEMPORT Lite */
1876         if (priv->is_lite)
1877                 reg &= ~RBUF_RSB_SWAP1;
1878
1879         /* Set a correct RSB format based on host endian */
1880         if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1881                 reg |= RBUF_RSB_SWAP0;
1882         else
1883                 reg &= ~RBUF_RSB_SWAP0;
1884         rbuf_writel(priv, reg, RBUF_CONTROL);
1885 }
1886
1887 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1888 {
1889         intrl2_0_mask_set(priv, 0xffffffff);
1890         intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1891         if (!priv->is_lite) {
1892                 intrl2_1_mask_set(priv, 0xffffffff);
1893                 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1894         }
1895 }
1896
1897 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1898 {
1899         u32 reg;
1900
1901         reg = gib_readl(priv, GIB_CONTROL);
1902         /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1903         if (netdev_uses_dsa(priv->netdev)) {
1904                 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1905                 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1906         }
1907         reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
1908         reg |= 12 << GIB_IPG_LEN_SHIFT;
1909         gib_writel(priv, reg, GIB_CONTROL);
1910 }
1911
1912 static int bcm_sysport_open(struct net_device *dev)
1913 {
1914         struct bcm_sysport_priv *priv = netdev_priv(dev);
1915         struct phy_device *phydev;
1916         unsigned int i;
1917         int ret;
1918
1919         /* Reset UniMAC */
1920         umac_reset(priv);
1921
1922         /* Flush TX and RX FIFOs at TOPCTRL level */
1923         topctrl_flush(priv);
1924
1925         /* Disable the UniMAC RX/TX */
1926         umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1927
1928         /* Enable RBUF 2bytes alignment and Receive Status Block */
1929         rbuf_init(priv);
1930
1931         /* Set maximum frame length */
1932         if (!priv->is_lite)
1933                 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1934         else
1935                 gib_set_pad_extension(priv);
1936
1937         /* Apply features again in case we changed them while interface was
1938          * down
1939          */
1940         bcm_sysport_set_features(dev, dev->features);
1941
1942         /* Set MAC address */
1943         umac_set_hw_addr(priv, dev->dev_addr);
1944
1945         phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1946                                 0, priv->phy_interface);
1947         if (!phydev) {
1948                 netdev_err(dev, "could not attach to PHY\n");
1949                 return -ENODEV;
1950         }
1951
1952         /* Reset house keeping link status */
1953         priv->old_duplex = -1;
1954         priv->old_link = -1;
1955         priv->old_pause = -1;
1956
1957         /* mask all interrupts and request them */
1958         bcm_sysport_mask_all_intrs(priv);
1959
1960         ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1961         if (ret) {
1962                 netdev_err(dev, "failed to request RX interrupt\n");
1963                 goto out_phy_disconnect;
1964         }
1965
1966         if (!priv->is_lite) {
1967                 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
1968                                   dev->name, dev);
1969                 if (ret) {
1970                         netdev_err(dev, "failed to request TX interrupt\n");
1971                         goto out_free_irq0;
1972                 }
1973         }
1974
1975         /* Initialize both hardware and software ring */
1976         for (i = 0; i < dev->num_tx_queues; i++) {
1977                 ret = bcm_sysport_init_tx_ring(priv, i);
1978                 if (ret) {
1979                         netdev_err(dev, "failed to initialize TX ring %d\n",
1980                                    i);
1981                         goto out_free_tx_ring;
1982                 }
1983         }
1984
1985         /* Initialize linked-list */
1986         tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1987
1988         /* Initialize RX ring */
1989         ret = bcm_sysport_init_rx_ring(priv);
1990         if (ret) {
1991                 netdev_err(dev, "failed to initialize RX ring\n");
1992                 goto out_free_rx_ring;
1993         }
1994
1995         /* Turn on RDMA */
1996         ret = rdma_enable_set(priv, 1);
1997         if (ret)
1998                 goto out_free_rx_ring;
1999
2000         /* Turn on TDMA */
2001         ret = tdma_enable_set(priv, 1);
2002         if (ret)
2003                 goto out_clear_rx_int;
2004
2005         /* Turn on UniMAC TX/RX */
2006         umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
2007
2008         bcm_sysport_netif_start(dev);
2009
2010         netif_tx_start_all_queues(dev);
2011
2012         return 0;
2013
2014 out_clear_rx_int:
2015         intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
2016 out_free_rx_ring:
2017         bcm_sysport_fini_rx_ring(priv);
2018 out_free_tx_ring:
2019         for (i = 0; i < dev->num_tx_queues; i++)
2020                 bcm_sysport_fini_tx_ring(priv, i);
2021         if (!priv->is_lite)
2022                 free_irq(priv->irq1, dev);
2023 out_free_irq0:
2024         free_irq(priv->irq0, dev);
2025 out_phy_disconnect:
2026         phy_disconnect(phydev);
2027         return ret;
2028 }
2029
2030 static void bcm_sysport_netif_stop(struct net_device *dev)
2031 {
2032         struct bcm_sysport_priv *priv = netdev_priv(dev);
2033
2034         /* stop all software from updating hardware */
2035         netif_tx_disable(dev);
2036         napi_disable(&priv->napi);
2037         cancel_work_sync(&priv->dim.dim.work);
2038         phy_stop(dev->phydev);
2039
2040         /* mask all interrupts */
2041         bcm_sysport_mask_all_intrs(priv);
2042 }
2043
2044 static int bcm_sysport_stop(struct net_device *dev)
2045 {
2046         struct bcm_sysport_priv *priv = netdev_priv(dev);
2047         unsigned int i;
2048         int ret;
2049
2050         bcm_sysport_netif_stop(dev);
2051
2052         /* Disable UniMAC RX */
2053         umac_enable_set(priv, CMD_RX_EN, 0);
2054
2055         ret = tdma_enable_set(priv, 0);
2056         if (ret) {
2057                 netdev_err(dev, "timeout disabling RDMA\n");
2058                 return ret;
2059         }
2060
2061         /* Wait for a maximum packet size to be drained */
2062         usleep_range(2000, 3000);
2063
2064         ret = rdma_enable_set(priv, 0);
2065         if (ret) {
2066                 netdev_err(dev, "timeout disabling TDMA\n");
2067                 return ret;
2068         }
2069
2070         /* Disable UniMAC TX */
2071         umac_enable_set(priv, CMD_TX_EN, 0);
2072
2073         /* Free RX/TX rings SW structures */
2074         for (i = 0; i < dev->num_tx_queues; i++)
2075                 bcm_sysport_fini_tx_ring(priv, i);
2076         bcm_sysport_fini_rx_ring(priv);
2077
2078         free_irq(priv->irq0, dev);
2079         if (!priv->is_lite)
2080                 free_irq(priv->irq1, dev);
2081
2082         /* Disconnect from PHY */
2083         phy_disconnect(dev->phydev);
2084
2085         return 0;
2086 }
2087
2088 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
2089                                  u64 location)
2090 {
2091         unsigned int index;
2092         u32 reg;
2093
2094         for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2095                 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2096                 reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
2097                 reg &= RXCHK_BRCM_TAG_CID_MASK;
2098                 if (reg == location)
2099                         return index;
2100         }
2101
2102         return -EINVAL;
2103 }
2104
2105 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
2106                                 struct ethtool_rxnfc *nfc)
2107 {
2108         int index;
2109
2110         /* This is not a rule that we know about */
2111         index = bcm_sysport_rule_find(priv, nfc->fs.location);
2112         if (index < 0)
2113                 return -EOPNOTSUPP;
2114
2115         nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
2116
2117         return 0;
2118 }
2119
2120 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
2121                                 struct ethtool_rxnfc *nfc)
2122 {
2123         unsigned int index;
2124         u32 reg;
2125
2126         /* We cannot match locations greater than what the classification ID
2127          * permits (256 entries)
2128          */
2129         if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
2130                 return -E2BIG;
2131
2132         /* We cannot support flows that are not destined for a wake-up */
2133         if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
2134                 return -EOPNOTSUPP;
2135
2136         /* All filters are already in use, we cannot match more rules */
2137         if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
2138             RXCHK_BRCM_TAG_MAX)
2139                 return -ENOSPC;
2140
2141         index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
2142         if (index > RXCHK_BRCM_TAG_MAX)
2143                 return -ENOSPC;
2144
2145         /* Location is the classification ID, and index is the position
2146          * within one of our 8 possible filters to be programmed
2147          */
2148         reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2149         reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
2150         reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
2151         rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
2152         rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
2153
2154         priv->filters_loc[index] = nfc->fs.location;
2155         set_bit(index, priv->filters);
2156
2157         return 0;
2158 }
2159
2160 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
2161                                 u64 location)
2162 {
2163         int index;
2164
2165         /* This is not a rule that we know about */
2166         index = bcm_sysport_rule_find(priv, location);
2167         if (index < 0)
2168                 return -EOPNOTSUPP;
2169
2170         /* No need to disable this filter if it was enabled, this will
2171          * be taken care of during suspend time by bcm_sysport_suspend_to_wol
2172          */
2173         clear_bit(index, priv->filters);
2174         priv->filters_loc[index] = 0;
2175
2176         return 0;
2177 }
2178
2179 static int bcm_sysport_get_rxnfc(struct net_device *dev,
2180                                  struct ethtool_rxnfc *nfc, u32 *rule_locs)
2181 {
2182         struct bcm_sysport_priv *priv = netdev_priv(dev);
2183         int ret = -EOPNOTSUPP;
2184
2185         switch (nfc->cmd) {
2186         case ETHTOOL_GRXCLSRULE:
2187                 ret = bcm_sysport_rule_get(priv, nfc);
2188                 break;
2189         default:
2190                 break;
2191         }
2192
2193         return ret;
2194 }
2195
2196 static int bcm_sysport_set_rxnfc(struct net_device *dev,
2197                                  struct ethtool_rxnfc *nfc)
2198 {
2199         struct bcm_sysport_priv *priv = netdev_priv(dev);
2200         int ret = -EOPNOTSUPP;
2201
2202         switch (nfc->cmd) {
2203         case ETHTOOL_SRXCLSRLINS:
2204                 ret = bcm_sysport_rule_set(priv, nfc);
2205                 break;
2206         case ETHTOOL_SRXCLSRLDEL:
2207                 ret = bcm_sysport_rule_del(priv, nfc->fs.location);
2208                 break;
2209         default:
2210                 break;
2211         }
2212
2213         return ret;
2214 }
2215
2216 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2217         .get_drvinfo            = bcm_sysport_get_drvinfo,
2218         .get_msglevel           = bcm_sysport_get_msglvl,
2219         .set_msglevel           = bcm_sysport_set_msglvl,
2220         .get_link               = ethtool_op_get_link,
2221         .get_strings            = bcm_sysport_get_strings,
2222         .get_ethtool_stats      = bcm_sysport_get_stats,
2223         .get_sset_count         = bcm_sysport_get_sset_count,
2224         .get_wol                = bcm_sysport_get_wol,
2225         .set_wol                = bcm_sysport_set_wol,
2226         .get_coalesce           = bcm_sysport_get_coalesce,
2227         .set_coalesce           = bcm_sysport_set_coalesce,
2228         .get_link_ksettings     = phy_ethtool_get_link_ksettings,
2229         .set_link_ksettings     = phy_ethtool_set_link_ksettings,
2230         .get_rxnfc              = bcm_sysport_get_rxnfc,
2231         .set_rxnfc              = bcm_sysport_set_rxnfc,
2232 };
2233
2234 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2235                                     struct net_device *sb_dev)
2236 {
2237         struct bcm_sysport_priv *priv = netdev_priv(dev);
2238         u16 queue = skb_get_queue_mapping(skb);
2239         struct bcm_sysport_tx_ring *tx_ring;
2240         unsigned int q, port;
2241
2242         if (!netdev_uses_dsa(dev))
2243                 return netdev_pick_tx(dev, skb, NULL);
2244
2245         /* DSA tagging layer will have configured the correct queue */
2246         q = BRCM_TAG_GET_QUEUE(queue);
2247         port = BRCM_TAG_GET_PORT(queue);
2248         tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2249
2250         if (unlikely(!tx_ring))
2251                 return netdev_pick_tx(dev, skb, NULL);
2252
2253         return tx_ring->index;
2254 }
2255
2256 static const struct net_device_ops bcm_sysport_netdev_ops = {
2257         .ndo_start_xmit         = bcm_sysport_xmit,
2258         .ndo_tx_timeout         = bcm_sysport_tx_timeout,
2259         .ndo_open               = bcm_sysport_open,
2260         .ndo_stop               = bcm_sysport_stop,
2261         .ndo_set_features       = bcm_sysport_set_features,
2262         .ndo_set_rx_mode        = bcm_sysport_set_rx_mode,
2263         .ndo_set_mac_address    = bcm_sysport_change_mac,
2264 #ifdef CONFIG_NET_POLL_CONTROLLER
2265         .ndo_poll_controller    = bcm_sysport_poll_controller,
2266 #endif
2267         .ndo_get_stats64        = bcm_sysport_get_stats64,
2268         .ndo_select_queue       = bcm_sysport_select_queue,
2269 };
2270
2271 static int bcm_sysport_map_queues(struct notifier_block *nb,
2272                                   struct dsa_notifier_register_info *info)
2273 {
2274         struct bcm_sysport_tx_ring *ring;
2275         struct bcm_sysport_priv *priv;
2276         struct net_device *slave_dev;
2277         unsigned int num_tx_queues;
2278         unsigned int q, qp, port;
2279         struct net_device *dev;
2280
2281         priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
2282         if (priv->netdev != info->master)
2283                 return 0;
2284
2285         dev = info->master;
2286
2287         /* We can't be setting up queue inspection for non directly attached
2288          * switches
2289          */
2290         if (info->switch_number)
2291                 return 0;
2292
2293         if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2294                 return 0;
2295
2296         port = info->port_number;
2297         slave_dev = info->info.dev;
2298
2299         /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2300          * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2301          * per-port (slave_dev) network devices queue, we achieve just that.
2302          * This need to happen now before any slave network device is used such
2303          * it accurately reflects the number of real TX queues.
2304          */
2305         if (priv->is_lite)
2306                 netif_set_real_num_tx_queues(slave_dev,
2307                                              slave_dev->num_tx_queues / 2);
2308
2309         num_tx_queues = slave_dev->real_num_tx_queues;
2310
2311         if (priv->per_port_num_tx_queues &&
2312             priv->per_port_num_tx_queues != num_tx_queues)
2313                 netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
2314
2315         priv->per_port_num_tx_queues = num_tx_queues;
2316
2317         for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
2318              q++) {
2319                 ring = &priv->tx_rings[q];
2320
2321                 if (ring->inspect)
2322                         continue;
2323
2324                 /* Just remember the mapping actual programming done
2325                  * during bcm_sysport_init_tx_ring
2326                  */
2327                 ring->switch_queue = qp;
2328                 ring->switch_port = port;
2329                 ring->inspect = true;
2330                 priv->ring_map[q + port * num_tx_queues] = ring;
2331                 qp++;
2332         }
2333
2334         return 0;
2335 }
2336
2337 static int bcm_sysport_unmap_queues(struct notifier_block *nb,
2338                                     struct dsa_notifier_register_info *info)
2339 {
2340         struct bcm_sysport_tx_ring *ring;
2341         struct bcm_sysport_priv *priv;
2342         struct net_device *slave_dev;
2343         unsigned int num_tx_queues;
2344         struct net_device *dev;
2345         unsigned int q, port;
2346
2347         priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
2348         if (priv->netdev != info->master)
2349                 return 0;
2350
2351         dev = info->master;
2352
2353         if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2354                 return 0;
2355
2356         port = info->port_number;
2357         slave_dev = info->info.dev;
2358
2359         num_tx_queues = slave_dev->real_num_tx_queues;
2360
2361         for (q = 0; q < dev->num_tx_queues; q++) {
2362                 ring = &priv->tx_rings[q];
2363
2364                 if (ring->switch_port != port)
2365                         continue;
2366
2367                 if (!ring->inspect)
2368                         continue;
2369
2370                 ring->inspect = false;
2371                 priv->ring_map[q + port * num_tx_queues] = NULL;
2372         }
2373
2374         return 0;
2375 }
2376
2377 static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
2378                                     unsigned long event, void *ptr)
2379 {
2380         int ret = NOTIFY_DONE;
2381
2382         switch (event) {
2383         case DSA_PORT_REGISTER:
2384                 ret = bcm_sysport_map_queues(nb, ptr);
2385                 break;
2386         case DSA_PORT_UNREGISTER:
2387                 ret = bcm_sysport_unmap_queues(nb, ptr);
2388                 break;
2389         }
2390
2391         return notifier_from_errno(ret);
2392 }
2393
2394 #define REV_FMT "v%2x.%02x"
2395
2396 static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2397         [SYSTEMPORT] = {
2398                 .is_lite = false,
2399                 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2400         },
2401         [SYSTEMPORT_LITE] = {
2402                 .is_lite = true,
2403                 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2404         },
2405 };
2406
2407 static const struct of_device_id bcm_sysport_of_match[] = {
2408         { .compatible = "brcm,systemportlite-v1.00",
2409           .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2410         { .compatible = "brcm,systemport-v1.00",
2411           .data = &bcm_sysport_params[SYSTEMPORT] },
2412         { .compatible = "brcm,systemport",
2413           .data = &bcm_sysport_params[SYSTEMPORT] },
2414         { /* sentinel */ }
2415 };
2416 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2417
2418 static int bcm_sysport_probe(struct platform_device *pdev)
2419 {
2420         const struct bcm_sysport_hw_params *params;
2421         const struct of_device_id *of_id = NULL;
2422         struct bcm_sysport_priv *priv;
2423         struct device_node *dn;
2424         struct net_device *dev;
2425         const void *macaddr;
2426         struct resource *r;
2427         u32 txq, rxq;
2428         int ret;
2429
2430         dn = pdev->dev.of_node;
2431         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2432         of_id = of_match_node(bcm_sysport_of_match, dn);
2433         if (!of_id || !of_id->data)
2434                 return -EINVAL;
2435
2436         /* Fairly quickly we need to know the type of adapter we have */
2437         params = of_id->data;
2438
2439         /* Read the Transmit/Receive Queue properties */
2440         if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2441                 txq = TDMA_NUM_RINGS;
2442         if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2443                 rxq = 1;
2444
2445         /* Sanity check the number of transmit queues */
2446         if (!txq || txq > TDMA_NUM_RINGS)
2447                 return -EINVAL;
2448
2449         dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2450         if (!dev)
2451                 return -ENOMEM;
2452
2453         /* Initialize private members */
2454         priv = netdev_priv(dev);
2455
2456         /* Allocate number of TX rings */
2457         priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2458                                       sizeof(struct bcm_sysport_tx_ring),
2459                                       GFP_KERNEL);
2460         if (!priv->tx_rings)
2461                 return -ENOMEM;
2462
2463         priv->is_lite = params->is_lite;
2464         priv->num_rx_desc_words = params->num_rx_desc_words;
2465
2466         priv->irq0 = platform_get_irq(pdev, 0);
2467         if (!priv->is_lite) {
2468                 priv->irq1 = platform_get_irq(pdev, 1);
2469                 priv->wol_irq = platform_get_irq(pdev, 2);
2470         } else {
2471                 priv->wol_irq = platform_get_irq(pdev, 1);
2472         }
2473         if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2474                 dev_err(&pdev->dev, "invalid interrupts\n");
2475                 ret = -EINVAL;
2476                 goto err_free_netdev;
2477         }
2478
2479         priv->base = devm_ioremap_resource(&pdev->dev, r);
2480         if (IS_ERR(priv->base)) {
2481                 ret = PTR_ERR(priv->base);
2482                 goto err_free_netdev;
2483         }
2484
2485         priv->netdev = dev;
2486         priv->pdev = pdev;
2487
2488         priv->phy_interface = of_get_phy_mode(dn);
2489         /* Default to GMII interface mode */
2490         if (priv->phy_interface < 0)
2491                 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2492
2493         /* In the case of a fixed PHY, the DT node associated
2494          * to the PHY is the Ethernet MAC DT node.
2495          */
2496         if (of_phy_is_fixed_link(dn)) {
2497                 ret = of_phy_register_fixed_link(dn);
2498                 if (ret) {
2499                         dev_err(&pdev->dev, "failed to register fixed PHY\n");
2500                         goto err_free_netdev;
2501                 }
2502
2503                 priv->phy_dn = dn;
2504         }
2505
2506         /* Initialize netdevice members */
2507         macaddr = of_get_mac_address(dn);
2508         if (IS_ERR(macaddr)) {
2509                 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2510                 eth_hw_addr_random(dev);
2511         } else {
2512                 ether_addr_copy(dev->dev_addr, macaddr);
2513         }
2514
2515         SET_NETDEV_DEV(dev, &pdev->dev);
2516         dev_set_drvdata(&pdev->dev, dev);
2517         dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2518         dev->netdev_ops = &bcm_sysport_netdev_ops;
2519         netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2520
2521         dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2522                          NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2523         dev->hw_features |= dev->features;
2524         dev->vlan_features |= dev->features;
2525
2526         /* Request the WOL interrupt and advertise suspend if available */
2527         priv->wol_irq_disabled = 1;
2528         ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2529                                bcm_sysport_wol_isr, 0, dev->name, priv);
2530         if (!ret)
2531                 device_set_wakeup_capable(&pdev->dev, 1);
2532
2533         /* Set the needed headroom once and for all */
2534         BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2535         dev->needed_headroom += sizeof(struct bcm_tsb);
2536
2537         /* libphy will adjust the link state accordingly */
2538         netif_carrier_off(dev);
2539
2540         priv->rx_max_coalesced_frames = 1;
2541         u64_stats_init(&priv->syncp);
2542
2543         priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
2544
2545         ret = register_dsa_notifier(&priv->dsa_notifier);
2546         if (ret) {
2547                 dev_err(&pdev->dev, "failed to register DSA notifier\n");
2548                 goto err_deregister_fixed_link;
2549         }
2550
2551         ret = register_netdev(dev);
2552         if (ret) {
2553                 dev_err(&pdev->dev, "failed to register net_device\n");
2554                 goto err_deregister_notifier;
2555         }
2556
2557         priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2558         dev_info(&pdev->dev,
2559                  "Broadcom SYSTEMPORT%s " REV_FMT
2560                  " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2561                  priv->is_lite ? " Lite" : "",
2562                  (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2563                  priv->irq0, priv->irq1, txq, rxq);
2564
2565         return 0;
2566
2567 err_deregister_notifier:
2568         unregister_dsa_notifier(&priv->dsa_notifier);
2569 err_deregister_fixed_link:
2570         if (of_phy_is_fixed_link(dn))
2571                 of_phy_deregister_fixed_link(dn);
2572 err_free_netdev:
2573         free_netdev(dev);
2574         return ret;
2575 }
2576
2577 static int bcm_sysport_remove(struct platform_device *pdev)
2578 {
2579         struct net_device *dev = dev_get_drvdata(&pdev->dev);
2580         struct bcm_sysport_priv *priv = netdev_priv(dev);
2581         struct device_node *dn = pdev->dev.of_node;
2582
2583         /* Not much to do, ndo_close has been called
2584          * and we use managed allocations
2585          */
2586         unregister_dsa_notifier(&priv->dsa_notifier);
2587         unregister_netdev(dev);
2588         if (of_phy_is_fixed_link(dn))
2589                 of_phy_deregister_fixed_link(dn);
2590         free_netdev(dev);
2591         dev_set_drvdata(&pdev->dev, NULL);
2592
2593         return 0;
2594 }
2595
2596 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2597 {
2598         struct net_device *ndev = priv->netdev;
2599         unsigned int timeout = 1000;
2600         unsigned int index, i = 0;
2601         u32 reg;
2602
2603         reg = umac_readl(priv, UMAC_MPD_CTRL);
2604         if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2605                 reg |= MPD_EN;
2606         reg &= ~PSW_EN;
2607         if (priv->wolopts & WAKE_MAGICSECURE) {
2608                 /* Program the SecureOn password */
2609                 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2610                             UMAC_PSW_MS);
2611                 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2612                             UMAC_PSW_LS);
2613                 reg |= PSW_EN;
2614         }
2615         umac_writel(priv, reg, UMAC_MPD_CTRL);
2616
2617         if (priv->wolopts & WAKE_FILTER) {
2618                 /* Turn on ACPI matching to steal packets from RBUF */
2619                 reg = rbuf_readl(priv, RBUF_CONTROL);
2620                 if (priv->is_lite)
2621                         reg |= RBUF_ACPI_EN_LITE;
2622                 else
2623                         reg |= RBUF_ACPI_EN;
2624                 rbuf_writel(priv, reg, RBUF_CONTROL);
2625
2626                 /* Enable RXCHK, active filters and Broadcom tag matching */
2627                 reg = rxchk_readl(priv, RXCHK_CONTROL);
2628                 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
2629                          RXCHK_BRCM_TAG_MATCH_SHIFT);
2630                 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2631                         reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
2632                         i++;
2633                 }
2634                 reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
2635                 rxchk_writel(priv, reg, RXCHK_CONTROL);
2636         }
2637
2638         /* Make sure RBUF entered WoL mode as result */
2639         do {
2640                 reg = rbuf_readl(priv, RBUF_STATUS);
2641                 if (reg & RBUF_WOL_MODE)
2642                         break;
2643
2644                 udelay(10);
2645         } while (timeout-- > 0);
2646
2647         /* Do not leave the UniMAC RBUF matching only MPD packets */
2648         if (!timeout) {
2649                 mpd_enable_set(priv, false);
2650                 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2651                 return -ETIMEDOUT;
2652         }
2653
2654         /* UniMAC receive needs to be turned on */
2655         umac_enable_set(priv, CMD_RX_EN, 1);
2656
2657         netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2658
2659         return 0;
2660 }
2661
2662 static int __maybe_unused bcm_sysport_suspend(struct device *d)
2663 {
2664         struct net_device *dev = dev_get_drvdata(d);
2665         struct bcm_sysport_priv *priv = netdev_priv(dev);
2666         unsigned int i;
2667         int ret = 0;
2668         u32 reg;
2669
2670         if (!netif_running(dev))
2671                 return 0;
2672
2673         netif_device_detach(dev);
2674
2675         bcm_sysport_netif_stop(dev);
2676
2677         phy_suspend(dev->phydev);
2678
2679         /* Disable UniMAC RX */
2680         umac_enable_set(priv, CMD_RX_EN, 0);
2681
2682         ret = rdma_enable_set(priv, 0);
2683         if (ret) {
2684                 netdev_err(dev, "RDMA timeout!\n");
2685                 return ret;
2686         }
2687
2688         /* Disable RXCHK if enabled */
2689         if (priv->rx_chk_en) {
2690                 reg = rxchk_readl(priv, RXCHK_CONTROL);
2691                 reg &= ~RXCHK_EN;
2692                 rxchk_writel(priv, reg, RXCHK_CONTROL);
2693         }
2694
2695         /* Flush RX pipe */
2696         if (!priv->wolopts)
2697                 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2698
2699         ret = tdma_enable_set(priv, 0);
2700         if (ret) {
2701                 netdev_err(dev, "TDMA timeout!\n");
2702                 return ret;
2703         }
2704
2705         /* Wait for a packet boundary */
2706         usleep_range(2000, 3000);
2707
2708         umac_enable_set(priv, CMD_TX_EN, 0);
2709
2710         topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2711
2712         /* Free RX/TX rings SW structures */
2713         for (i = 0; i < dev->num_tx_queues; i++)
2714                 bcm_sysport_fini_tx_ring(priv, i);
2715         bcm_sysport_fini_rx_ring(priv);
2716
2717         /* Get prepared for Wake-on-LAN */
2718         if (device_may_wakeup(d) && priv->wolopts)
2719                 ret = bcm_sysport_suspend_to_wol(priv);
2720
2721         return ret;
2722 }
2723
2724 static int __maybe_unused bcm_sysport_resume(struct device *d)
2725 {
2726         struct net_device *dev = dev_get_drvdata(d);
2727         struct bcm_sysport_priv *priv = netdev_priv(dev);
2728         unsigned int i;
2729         int ret;
2730
2731         if (!netif_running(dev))
2732                 return 0;
2733
2734         umac_reset(priv);
2735
2736         /* We may have been suspended and never received a WOL event that
2737          * would turn off MPD detection, take care of that now
2738          */
2739         bcm_sysport_resume_from_wol(priv);
2740
2741         /* Initialize both hardware and software ring */
2742         for (i = 0; i < dev->num_tx_queues; i++) {
2743                 ret = bcm_sysport_init_tx_ring(priv, i);
2744                 if (ret) {
2745                         netdev_err(dev, "failed to initialize TX ring %d\n",
2746                                    i);
2747                         goto out_free_tx_rings;
2748                 }
2749         }
2750
2751         /* Initialize linked-list */
2752         tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2753
2754         /* Initialize RX ring */
2755         ret = bcm_sysport_init_rx_ring(priv);
2756         if (ret) {
2757                 netdev_err(dev, "failed to initialize RX ring\n");
2758                 goto out_free_rx_ring;
2759         }
2760
2761         /* RX pipe enable */
2762         topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2763
2764         ret = rdma_enable_set(priv, 1);
2765         if (ret) {
2766                 netdev_err(dev, "failed to enable RDMA\n");
2767                 goto out_free_rx_ring;
2768         }
2769
2770         /* Restore enabled features */
2771         bcm_sysport_set_features(dev, dev->features);
2772
2773         rbuf_init(priv);
2774
2775         /* Set maximum frame length */
2776         if (!priv->is_lite)
2777                 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2778         else
2779                 gib_set_pad_extension(priv);
2780
2781         /* Set MAC address */
2782         umac_set_hw_addr(priv, dev->dev_addr);
2783
2784         umac_enable_set(priv, CMD_RX_EN, 1);
2785
2786         /* TX pipe enable */
2787         topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2788
2789         umac_enable_set(priv, CMD_TX_EN, 1);
2790
2791         ret = tdma_enable_set(priv, 1);
2792         if (ret) {
2793                 netdev_err(dev, "TDMA timeout!\n");
2794                 goto out_free_rx_ring;
2795         }
2796
2797         phy_resume(dev->phydev);
2798
2799         bcm_sysport_netif_start(dev);
2800
2801         netif_device_attach(dev);
2802
2803         return 0;
2804
2805 out_free_rx_ring:
2806         bcm_sysport_fini_rx_ring(priv);
2807 out_free_tx_rings:
2808         for (i = 0; i < dev->num_tx_queues; i++)
2809                 bcm_sysport_fini_tx_ring(priv, i);
2810         return ret;
2811 }
2812
2813 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2814                 bcm_sysport_suspend, bcm_sysport_resume);
2815
2816 static struct platform_driver bcm_sysport_driver = {
2817         .probe  = bcm_sysport_probe,
2818         .remove = bcm_sysport_remove,
2819         .driver =  {
2820                 .name = "brcm-systemport",
2821                 .of_match_table = bcm_sysport_of_match,
2822                 .pm = &bcm_sysport_pm_ops,
2823         },
2824 };
2825 module_platform_driver(bcm_sysport_driver);
2826
2827 MODULE_AUTHOR("Broadcom Corporation");
2828 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2829 MODULE_ALIAS("platform:brcm-systemport");
2830 MODULE_LICENSE("GPL");