2 * Texas Instruments Ethernet Switch Driver
4 * Copyright (C) 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/kernel.h>
18 #include <linux/clk.h>
19 #include <linux/timer.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/irqreturn.h>
23 #include <linux/interrupt.h>
24 #include <linux/if_ether.h>
25 #include <linux/etherdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/net_tstamp.h>
28 #include <linux/phy.h>
29 #include <linux/workqueue.h>
30 #include <linux/delay.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/gpio/consumer.h>
34 #include <linux/of_mdio.h>
35 #include <linux/of_net.h>
36 #include <linux/of_device.h>
37 #include <linux/if_vlan.h>
38 #include <linux/kmemleak.h>
39 #include <linux/sys_soc.h>
41 #include <linux/pinctrl/consumer.h>
42 #include <net/pkt_cls.h>
47 #include "davinci_cpdma.h"
49 #include <net/pkt_sched.h>
51 #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
52 NETIF_MSG_DRV | NETIF_MSG_LINK | \
53 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
54 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
55 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
56 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
57 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
60 #define cpsw_info(priv, type, format, ...) \
62 if (netif_msg_##type(priv) && net_ratelimit()) \
63 dev_info(priv->dev, format, ## __VA_ARGS__); \
66 #define cpsw_err(priv, type, format, ...) \
68 if (netif_msg_##type(priv) && net_ratelimit()) \
69 dev_err(priv->dev, format, ## __VA_ARGS__); \
72 #define cpsw_dbg(priv, type, format, ...) \
74 if (netif_msg_##type(priv) && net_ratelimit()) \
75 dev_dbg(priv->dev, format, ## __VA_ARGS__); \
78 #define cpsw_notice(priv, type, format, ...) \
80 if (netif_msg_##type(priv) && net_ratelimit()) \
81 dev_notice(priv->dev, format, ## __VA_ARGS__); \
84 #define ALE_ALL_PORTS 0x7
86 #define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
87 #define CPSW_MINOR_VERSION(reg) (reg & 0xff)
88 #define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
90 #define CPSW_VERSION_1 0x19010a
91 #define CPSW_VERSION_2 0x19010c
92 #define CPSW_VERSION_3 0x19010f
93 #define CPSW_VERSION_4 0x190112
95 #define HOST_PORT_NUM 0
96 #define CPSW_ALE_PORTS_NUM 3
97 #define SLIVER_SIZE 0x40
99 #define CPSW1_HOST_PORT_OFFSET 0x028
100 #define CPSW1_SLAVE_OFFSET 0x050
101 #define CPSW1_SLAVE_SIZE 0x040
102 #define CPSW1_CPDMA_OFFSET 0x100
103 #define CPSW1_STATERAM_OFFSET 0x200
104 #define CPSW1_HW_STATS 0x400
105 #define CPSW1_CPTS_OFFSET 0x500
106 #define CPSW1_ALE_OFFSET 0x600
107 #define CPSW1_SLIVER_OFFSET 0x700
109 #define CPSW2_HOST_PORT_OFFSET 0x108
110 #define CPSW2_SLAVE_OFFSET 0x200
111 #define CPSW2_SLAVE_SIZE 0x100
112 #define CPSW2_CPDMA_OFFSET 0x800
113 #define CPSW2_HW_STATS 0x900
114 #define CPSW2_STATERAM_OFFSET 0xa00
115 #define CPSW2_CPTS_OFFSET 0xc00
116 #define CPSW2_ALE_OFFSET 0xd00
117 #define CPSW2_SLIVER_OFFSET 0xd80
118 #define CPSW2_BD_OFFSET 0x2000
120 #define CPDMA_RXTHRESH 0x0c0
121 #define CPDMA_RXFREE 0x0e0
122 #define CPDMA_TXHDP 0x00
123 #define CPDMA_RXHDP 0x20
124 #define CPDMA_TXCP 0x40
125 #define CPDMA_RXCP 0x60
127 #define CPSW_POLL_WEIGHT 64
128 #define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
129 #define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
130 #define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
132 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
134 #define RX_PRIORITY_MAPPING 0x76543210
135 #define TX_PRIORITY_MAPPING 0x33221100
136 #define CPDMA_TX_PRIORITY_MAP 0x76543210
138 #define CPSW_VLAN_AWARE BIT(1)
139 #define CPSW_RX_VLAN_ENCAP BIT(2)
140 #define CPSW_ALE_VLAN_AWARE 1
142 #define CPSW_FIFO_NORMAL_MODE (0 << 16)
143 #define CPSW_FIFO_DUAL_MAC_MODE (1 << 16)
144 #define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16)
146 #define CPSW_INTPACEEN (0x3f << 16)
147 #define CPSW_INTPRESCALE_MASK (0x7FF << 0)
148 #define CPSW_CMINTMAX_CNT 63
149 #define CPSW_CMINTMIN_CNT 2
150 #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
151 #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
153 #define cpsw_slave_index(cpsw, priv) \
154 ((cpsw->data.dual_emac) ? priv->emac_port : \
155 cpsw->data.active_slave)
157 #define CPSW_MAX_QUEUES 8
158 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
159 #define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
160 #define CPSW_FIFO_SHAPE_EN_SHIFT 16
161 #define CPSW_FIFO_RATE_EN_SHIFT 20
162 #define CPSW_TC_NUM 4
163 #define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
164 #define CPSW_PCT_MASK 0x7f
166 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
167 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
168 #define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16
169 #define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8
170 #define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0)
172 CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
173 CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
174 CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
175 CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
178 static int debug_level;
179 module_param(debug_level, int, 0);
180 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
182 static int ale_ageout = 10;
183 module_param(ale_ageout, int, 0);
184 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
186 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
187 module_param(rx_packet_max, int, 0);
188 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
190 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
191 module_param(descs_pool_size, int, 0444);
192 MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
194 struct cpsw_wr_regs {
214 struct cpsw_ss_regs {
231 #define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
232 #define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
233 #define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
234 #define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
235 #define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
236 #define CPSW1_TS_CTL 0x14 /* Time Sync Control */
237 #define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
238 #define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
241 #define CPSW2_CONTROL 0x00 /* Control Register */
242 #define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
243 #define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
244 #define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
245 #define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
246 #define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
247 #define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
249 /* CPSW_PORT_V1 and V2 */
250 #define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
251 #define SA_HI 0x24 /* CPGMAC_SL Source Address High */
252 #define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
254 /* CPSW_PORT_V2 only */
255 #define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
256 #define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
257 #define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
258 #define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
259 #define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
260 #define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
261 #define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
262 #define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
264 /* Bit definitions for the CPSW2_CONTROL register */
265 #define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
266 #define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
267 #define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
268 #define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
269 #define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
270 #define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
271 #define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
272 #define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
273 #define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
274 #define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
275 #define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
276 #define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
277 #define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
278 #define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
279 #define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
280 #define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
281 #define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
282 #define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
284 #define CTRL_V2_TS_BITS \
285 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
286 TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
288 #define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
289 #define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
290 #define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
293 #define CTRL_V3_TS_BITS \
294 (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
295 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
298 #define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
299 #define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
300 #define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
302 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
303 #define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
304 #define TS_SEQ_ID_OFFSET_MASK (0x3f)
305 #define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
306 #define TS_MSG_TYPE_EN_MASK (0xffff)
308 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
309 #define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
311 /* Bit definitions for the CPSW1_TS_CTL register */
312 #define CPSW_V1_TS_RX_EN BIT(0)
313 #define CPSW_V1_TS_TX_EN BIT(4)
314 #define CPSW_V1_MSG_TYPE_OFS 16
316 /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
317 #define CPSW_V1_SEQ_ID_OFS_SHIFT 16
319 #define CPSW_MAX_BLKS_TX 15
320 #define CPSW_MAX_BLKS_TX_SHIFT 4
321 #define CPSW_MAX_BLKS_RX 5
323 struct cpsw_host_regs {
329 u32 cpdma_tx_pri_map;
330 u32 cpdma_rx_chan_map;
333 struct cpsw_sliver_regs {
346 struct cpsw_hw_stats {
348 u32 rxbroadcastframes;
349 u32 rxmulticastframes;
352 u32 rxaligncodeerrors;
353 u32 rxoversizedframes;
355 u32 rxundersizedframes;
360 u32 txbroadcastframes;
361 u32 txmulticastframes;
363 u32 txdeferredframes;
364 u32 txcollisionframes;
365 u32 txsinglecollframes;
366 u32 txmultcollframes;
367 u32 txexcessivecollisions;
368 u32 txlatecollisions;
370 u32 txcarriersenseerrors;
373 u32 octetframes65t127;
374 u32 octetframes128t255;
375 u32 octetframes256t511;
376 u32 octetframes512t1023;
377 u32 octetframes1024tup;
384 struct cpsw_slave_data {
385 struct device_node *phy_node;
386 char phy_id[MII_BUS_ID_SIZE];
388 u8 mac_addr[ETH_ALEN];
389 u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
392 struct cpsw_platform_data {
393 struct cpsw_slave_data *slave_data;
394 u32 ss_reg_ofs; /* Subsystem control register offset */
395 u32 channels; /* number of cpdma channels (symmetric) */
396 u32 slaves; /* number of slave cpgmac ports */
397 u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
398 u32 ale_entries; /* ale table size */
399 u32 bd_ram_size; /*buffer descriptor ram size */
400 u32 mac_control; /* Mac control register */
401 u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
402 bool dual_emac; /* Enable Dual EMAC mode */
407 struct cpsw_sliver_regs __iomem *sliver;
410 struct cpsw_slave_data *data;
411 struct phy_device *phy;
412 struct net_device *ndev;
416 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
418 return readl_relaxed(slave->regs + offset);
421 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
423 writel_relaxed(val, slave->regs + offset);
427 struct cpdma_chan *ch;
433 struct cpsw_platform_data data;
434 struct napi_struct napi_rx;
435 struct napi_struct napi_tx;
436 struct cpsw_ss_regs __iomem *regs;
437 struct cpsw_wr_regs __iomem *wr_regs;
438 u8 __iomem *hw_stats;
439 struct cpsw_host_regs __iomem *host_port_regs;
444 struct cpsw_slave *slaves;
445 struct cpdma_ctlr *dma;
446 struct cpsw_vector txv[CPSW_MAX_QUEUES];
447 struct cpsw_vector rxv[CPSW_MAX_QUEUES];
448 struct cpsw_ale *ale;
450 bool rx_irq_disabled;
451 bool tx_irq_disabled;
452 u32 irqs_table[IRQ_NUM];
454 int rx_ch_num, tx_ch_num;
460 struct net_device *ndev;
463 u8 mac_addr[ETH_ALEN];
467 int fifo_bw[CPSW_TC_NUM];
470 struct cpsw_common *cpsw;
474 char stat_string[ETH_GSTRING_LEN];
486 #define CPSW_STAT(m) CPSW_STATS, \
487 FIELD_SIZEOF(struct cpsw_hw_stats, m), \
488 offsetof(struct cpsw_hw_stats, m)
489 #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
490 FIELD_SIZEOF(struct cpdma_chan_stats, m), \
491 offsetof(struct cpdma_chan_stats, m)
492 #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
493 FIELD_SIZEOF(struct cpdma_chan_stats, m), \
494 offsetof(struct cpdma_chan_stats, m)
496 static const struct cpsw_stats cpsw_gstrings_stats[] = {
497 { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
498 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
499 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
500 { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
501 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
502 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
503 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
504 { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
505 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
506 { "Rx Fragments", CPSW_STAT(rxfragments) },
507 { "Rx Octets", CPSW_STAT(rxoctets) },
508 { "Good Tx Frames", CPSW_STAT(txgoodframes) },
509 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
510 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
511 { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
512 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
513 { "Collisions", CPSW_STAT(txcollisionframes) },
514 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
515 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
516 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
517 { "Late Collisions", CPSW_STAT(txlatecollisions) },
518 { "Tx Underrun", CPSW_STAT(txunderrun) },
519 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
520 { "Tx Octets", CPSW_STAT(txoctets) },
521 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
522 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
523 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
524 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
525 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
526 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
527 { "Net Octets", CPSW_STAT(netoctets) },
528 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
529 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
530 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
533 static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
534 { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
535 { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
536 { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
537 { "misqueued", CPDMA_RX_STAT(misqueued) },
538 { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
539 { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
540 { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
541 { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
542 { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
543 { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
544 { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
545 { "requeue", CPDMA_RX_STAT(requeue) },
546 { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
549 #define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
550 #define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
552 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
553 #define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
554 #define for_each_slave(priv, func, arg...) \
556 struct cpsw_slave *slave; \
557 struct cpsw_common *cpsw = (priv)->cpsw; \
559 if (cpsw->data.dual_emac) \
560 (func)((cpsw)->slaves + priv->emac_port, ##arg);\
562 for (n = cpsw->data.slaves, \
563 slave = cpsw->slaves; \
565 (func)(slave++, ##arg); \
568 static inline int cpsw_get_slave_port(u32 slave_num)
570 return slave_num + 1;
573 static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr)
575 struct cpsw_common *cpsw = priv->cpsw;
577 if (cpsw->data.dual_emac) {
578 struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
579 int slave_port = cpsw_get_slave_port(slave->slave_num);
581 cpsw_ale_add_mcast(cpsw->ale, addr,
582 1 << slave_port | ALE_PORT_HOST,
583 ALE_VLAN, slave->port_vlan, 0);
587 cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
590 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
592 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
593 struct cpsw_ale *ale = cpsw->ale;
596 if (cpsw->data.dual_emac) {
599 /* Enabling promiscuous mode for one interface will be
600 * common for both the interface as the interface shares
601 * the same hardware resource.
603 for (i = 0; i < cpsw->data.slaves; i++)
604 if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
607 if (!enable && flag) {
609 dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
614 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
616 dev_dbg(&ndev->dev, "promiscuity enabled\n");
619 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
620 dev_dbg(&ndev->dev, "promiscuity disabled\n");
624 unsigned long timeout = jiffies + HZ;
626 /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
627 for (i = 0; i <= cpsw->data.slaves; i++) {
628 cpsw_ale_control_set(ale, i,
629 ALE_PORT_NOLEARN, 1);
630 cpsw_ale_control_set(ale, i,
631 ALE_PORT_NO_SA_UPDATE, 1);
634 /* Clear All Untouched entries */
635 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
638 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
640 } while (time_after(timeout, jiffies));
641 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
643 /* Clear all mcast from ALE */
644 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
646 /* Flood All Unicast Packets to Host port */
647 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
648 dev_dbg(&ndev->dev, "promiscuity enabled\n");
650 /* Don't Flood All Unicast Packets to Host port */
651 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
653 /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
654 for (i = 0; i <= cpsw->data.slaves; i++) {
655 cpsw_ale_control_set(ale, i,
656 ALE_PORT_NOLEARN, 0);
657 cpsw_ale_control_set(ale, i,
658 ALE_PORT_NO_SA_UPDATE, 0);
660 dev_dbg(&ndev->dev, "promiscuity disabled\n");
665 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr)
667 struct cpsw_priv *priv = netdev_priv(ndev);
669 cpsw_add_mcast(priv, addr);
673 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr)
675 struct cpsw_priv *priv = netdev_priv(ndev);
676 struct cpsw_common *cpsw = priv->cpsw;
679 if (cpsw->data.dual_emac) {
680 vid = cpsw->slaves[priv->emac_port].port_vlan;
687 cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
691 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
693 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
695 if (ndev->flags & IFF_PROMISC) {
696 /* Enable promiscuous mode */
697 cpsw_set_promiscious(ndev, true);
698 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
701 /* Disable promiscuous mode */
702 cpsw_set_promiscious(ndev, false);
705 /* Restore allmulti on vlans if necessary */
706 cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
708 __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr);
711 static void cpsw_intr_enable(struct cpsw_common *cpsw)
713 writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
714 writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
716 cpdma_ctlr_int_ctrl(cpsw->dma, true);
720 static void cpsw_intr_disable(struct cpsw_common *cpsw)
722 writel_relaxed(0, &cpsw->wr_regs->tx_en);
723 writel_relaxed(0, &cpsw->wr_regs->rx_en);
725 cpdma_ctlr_int_ctrl(cpsw->dma, false);
729 static void cpsw_tx_handler(void *token, int len, int status)
731 struct netdev_queue *txq;
732 struct sk_buff *skb = token;
733 struct net_device *ndev = skb->dev;
734 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
736 /* Check whether the queue is stopped due to stalled tx dma, if the
737 * queue is stopped then start the queue as we have free desc for tx
739 txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
740 if (unlikely(netif_tx_queue_stopped(txq)))
741 netif_tx_wake_queue(txq);
743 cpts_tx_timestamp(cpsw->cpts, skb);
744 ndev->stats.tx_packets++;
745 ndev->stats.tx_bytes += len;
746 dev_kfree_skb_any(skb);
749 static void cpsw_rx_vlan_encap(struct sk_buff *skb)
751 struct cpsw_priv *priv = netdev_priv(skb->dev);
752 struct cpsw_common *cpsw = priv->cpsw;
753 u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
754 u16 vtag, vid, prio, pkt_type;
756 /* Remove VLAN header encapsulation word */
757 skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
759 pkt_type = (rx_vlan_encap_hdr >>
760 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
761 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
762 /* Ignore unknown & Priority-tagged packets*/
763 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
764 pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
767 vid = (rx_vlan_encap_hdr >>
768 CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
770 /* Ignore vid 0 and pass packet as is */
773 /* Ignore default vlans in dual mac mode */
774 if (cpsw->data.dual_emac &&
775 vid == cpsw->slaves[priv->emac_port].port_vlan)
778 prio = (rx_vlan_encap_hdr >>
779 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
780 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
782 vtag = (prio << VLAN_PRIO_SHIFT) | vid;
783 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
785 /* strip vlan tag for VLAN-tagged packet */
786 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
787 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
788 skb_pull(skb, VLAN_HLEN);
792 static void cpsw_rx_handler(void *token, int len, int status)
794 struct cpdma_chan *ch;
795 struct sk_buff *skb = token;
796 struct sk_buff *new_skb;
797 struct net_device *ndev = skb->dev;
799 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
801 if (cpsw->data.dual_emac) {
802 port = CPDMA_RX_SOURCE_PORT(status);
804 ndev = cpsw->slaves[--port].ndev;
809 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
810 /* In dual emac mode check for all interfaces */
811 if (cpsw->data.dual_emac && cpsw->usage_count &&
813 /* The packet received is for the interface which
814 * is already down and the other interface is up
815 * and running, instead of freeing which results
816 * in reducing of the number of rx descriptor in
817 * DMA engine, requeue skb back to cpdma.
823 /* the interface is going down, skbs are purged */
824 dev_kfree_skb_any(skb);
828 new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
830 skb_copy_queue_mapping(new_skb, skb);
832 if (status & CPDMA_RX_VLAN_ENCAP)
833 cpsw_rx_vlan_encap(skb);
834 cpts_rx_timestamp(cpsw->cpts, skb);
835 skb->protocol = eth_type_trans(skb, ndev);
836 netif_receive_skb(skb);
837 ndev->stats.rx_bytes += len;
838 ndev->stats.rx_packets++;
839 kmemleak_not_leak(new_skb);
841 ndev->stats.rx_dropped++;
846 if (netif_dormant(ndev)) {
847 dev_kfree_skb_any(new_skb);
851 ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
852 ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
853 skb_tailroom(new_skb), 0);
854 if (WARN_ON(ret < 0))
855 dev_kfree_skb_any(new_skb);
858 static void cpsw_split_res(struct net_device *ndev)
860 struct cpsw_priv *priv = netdev_priv(ndev);
861 u32 consumed_rate = 0, bigest_rate = 0;
862 struct cpsw_common *cpsw = priv->cpsw;
863 struct cpsw_vector *txv = cpsw->txv;
864 int i, ch_weight, rlim_ch_num = 0;
865 int budget, bigest_rate_ch = 0;
866 u32 ch_rate, max_rate;
869 for (i = 0; i < cpsw->tx_ch_num; i++) {
870 ch_rate = cpdma_chan_get_rate(txv[i].ch);
875 consumed_rate += ch_rate;
878 if (cpsw->tx_ch_num == rlim_ch_num) {
879 max_rate = consumed_rate;
880 } else if (!rlim_ch_num) {
881 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
883 max_rate = consumed_rate;
885 max_rate = cpsw->speed * 1000;
887 /* if max_rate is less then expected due to reduced link speed,
888 * split proportionally according next potential max speed
890 if (max_rate < consumed_rate)
893 if (max_rate < consumed_rate)
896 ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
897 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
898 (cpsw->tx_ch_num - rlim_ch_num);
899 bigest_rate = (max_rate - consumed_rate) /
900 (cpsw->tx_ch_num - rlim_ch_num);
903 /* split tx weight/budget */
904 budget = CPSW_POLL_WEIGHT;
905 for (i = 0; i < cpsw->tx_ch_num; i++) {
906 ch_rate = cpdma_chan_get_rate(txv[i].ch);
908 txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
911 if (ch_rate > bigest_rate) {
913 bigest_rate = ch_rate;
916 ch_weight = (ch_rate * 100) / max_rate;
919 cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
921 txv[i].budget = ch_budget;
924 cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
927 budget -= txv[i].budget;
931 txv[bigest_rate_ch].budget += budget;
933 /* split rx budget */
934 budget = CPSW_POLL_WEIGHT;
935 ch_budget = budget / cpsw->rx_ch_num;
936 for (i = 0; i < cpsw->rx_ch_num; i++) {
937 cpsw->rxv[i].budget = ch_budget;
942 cpsw->rxv[0].budget += budget;
945 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
947 struct cpsw_common *cpsw = dev_id;
949 writel(0, &cpsw->wr_regs->tx_en);
950 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
952 if (cpsw->quirk_irq) {
953 disable_irq_nosync(cpsw->irqs_table[1]);
954 cpsw->tx_irq_disabled = true;
957 napi_schedule(&cpsw->napi_tx);
961 static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
963 struct cpsw_common *cpsw = dev_id;
965 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
966 writel(0, &cpsw->wr_regs->rx_en);
968 if (cpsw->quirk_irq) {
969 disable_irq_nosync(cpsw->irqs_table[0]);
970 cpsw->rx_irq_disabled = true;
973 napi_schedule(&cpsw->napi_rx);
977 static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
980 int num_tx, cur_budget, ch;
981 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
982 struct cpsw_vector *txv;
984 /* process every unprocessed channel */
985 ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
986 for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
987 if (!(ch_map & 0x80))
990 txv = &cpsw->txv[ch];
991 if (unlikely(txv->budget > budget - num_tx))
992 cur_budget = budget - num_tx;
994 cur_budget = txv->budget;
996 num_tx += cpdma_chan_process(txv->ch, cur_budget);
997 if (num_tx >= budget)
1001 if (num_tx < budget) {
1002 napi_complete(napi_tx);
1003 writel(0xff, &cpsw->wr_regs->tx_en);
1009 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
1011 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
1014 num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
1015 if (num_tx < budget) {
1016 napi_complete(napi_tx);
1017 writel(0xff, &cpsw->wr_regs->tx_en);
1018 if (cpsw->tx_irq_disabled) {
1019 cpsw->tx_irq_disabled = false;
1020 enable_irq(cpsw->irqs_table[1]);
1027 static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
1030 int num_rx, cur_budget, ch;
1031 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
1032 struct cpsw_vector *rxv;
1034 /* process every unprocessed channel */
1035 ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
1036 for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
1037 if (!(ch_map & 0x01))
1040 rxv = &cpsw->rxv[ch];
1041 if (unlikely(rxv->budget > budget - num_rx))
1042 cur_budget = budget - num_rx;
1044 cur_budget = rxv->budget;
1046 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
1047 if (num_rx >= budget)
1051 if (num_rx < budget) {
1052 napi_complete_done(napi_rx, num_rx);
1053 writel(0xff, &cpsw->wr_regs->rx_en);
1059 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
1061 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
1064 num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
1065 if (num_rx < budget) {
1066 napi_complete_done(napi_rx, num_rx);
1067 writel(0xff, &cpsw->wr_regs->rx_en);
1068 if (cpsw->rx_irq_disabled) {
1069 cpsw->rx_irq_disabled = false;
1070 enable_irq(cpsw->irqs_table[0]);
1077 static inline void soft_reset(const char *module, void __iomem *reg)
1079 unsigned long timeout = jiffies + HZ;
1081 writel_relaxed(1, reg);
1084 } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
1086 WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
1089 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
1090 struct cpsw_priv *priv)
1092 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
1093 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
1096 static bool cpsw_shp_is_off(struct cpsw_priv *priv)
1098 struct cpsw_common *cpsw = priv->cpsw;
1099 struct cpsw_slave *slave;
1100 u32 shift, mask, val;
1102 val = readl_relaxed(&cpsw->regs->ptype);
1104 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1105 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1112 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
1114 struct cpsw_common *cpsw = priv->cpsw;
1115 struct cpsw_slave *slave;
1116 u32 shift, mask, val;
1118 val = readl_relaxed(&cpsw->regs->ptype);
1120 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1121 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1122 mask = (1 << --fifo) << shift;
1123 val = on ? val | mask : val & ~mask;
1125 writel_relaxed(val, &cpsw->regs->ptype);
1128 static void _cpsw_adjust_link(struct cpsw_slave *slave,
1129 struct cpsw_priv *priv, bool *link)
1131 struct phy_device *phy = slave->phy;
1132 u32 mac_control = 0;
1134 struct cpsw_common *cpsw = priv->cpsw;
1139 slave_port = cpsw_get_slave_port(slave->slave_num);
1142 mac_control = cpsw->data.mac_control;
1144 /* enable forwarding */
1145 cpsw_ale_control_set(cpsw->ale, slave_port,
1146 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1148 if (phy->speed == 1000)
1149 mac_control |= BIT(7); /* GIGABITEN */
1151 mac_control |= BIT(0); /* FULLDUPLEXEN */
1153 /* set speed_in input in case RMII mode is used in 100Mbps */
1154 if (phy->speed == 100)
1155 mac_control |= BIT(15);
1156 /* in band mode only works in 10Mbps RGMII mode */
1157 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
1158 mac_control |= BIT(18); /* In Band mode */
1161 mac_control |= BIT(3);
1164 mac_control |= BIT(4);
1168 if (priv->shp_cfg_speed &&
1169 priv->shp_cfg_speed != slave->phy->speed &&
1170 !cpsw_shp_is_off(priv))
1172 "Speed was changed, CBS shaper speeds are changed!");
1175 /* disable forwarding */
1176 cpsw_ale_control_set(cpsw->ale, slave_port,
1177 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1180 if (mac_control != slave->mac_control) {
1181 phy_print_status(phy);
1182 writel_relaxed(mac_control, &slave->sliver->mac_control);
1185 slave->mac_control = mac_control;
1188 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
1192 for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
1193 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
1194 speed += cpsw->slaves[i].phy->speed;
1199 static int cpsw_need_resplit(struct cpsw_common *cpsw)
1204 /* re-split resources only in case speed was changed */
1205 speed = cpsw_get_common_speed(cpsw);
1206 if (speed == cpsw->speed || !speed)
1209 cpsw->speed = speed;
1211 for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
1212 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
1219 /* cases not dependent on speed */
1220 if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
1226 static void cpsw_adjust_link(struct net_device *ndev)
1228 struct cpsw_priv *priv = netdev_priv(ndev);
1229 struct cpsw_common *cpsw = priv->cpsw;
1232 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
1235 if (cpsw_need_resplit(cpsw))
1236 cpsw_split_res(ndev);
1238 netif_carrier_on(ndev);
1239 if (netif_running(ndev))
1240 netif_tx_wake_all_queues(ndev);
1242 netif_carrier_off(ndev);
1243 netif_tx_stop_all_queues(ndev);
1247 static int cpsw_get_coalesce(struct net_device *ndev,
1248 struct ethtool_coalesce *coal)
1250 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1252 coal->rx_coalesce_usecs = cpsw->coal_intvl;
1256 static int cpsw_set_coalesce(struct net_device *ndev,
1257 struct ethtool_coalesce *coal)
1259 struct cpsw_priv *priv = netdev_priv(ndev);
1261 u32 num_interrupts = 0;
1265 struct cpsw_common *cpsw = priv->cpsw;
1267 coal_intvl = coal->rx_coalesce_usecs;
1269 int_ctrl = readl(&cpsw->wr_regs->int_control);
1270 prescale = cpsw->bus_freq_mhz * 4;
1272 if (!coal->rx_coalesce_usecs) {
1273 int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
1277 if (coal_intvl < CPSW_CMINTMIN_INTVL)
1278 coal_intvl = CPSW_CMINTMIN_INTVL;
1280 if (coal_intvl > CPSW_CMINTMAX_INTVL) {
1281 /* Interrupt pacer works with 4us Pulse, we can
1282 * throttle further by dilating the 4us pulse.
1284 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
1286 if (addnl_dvdr > 1) {
1287 prescale *= addnl_dvdr;
1288 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
1289 coal_intvl = (CPSW_CMINTMAX_INTVL
1293 coal_intvl = CPSW_CMINTMAX_INTVL;
1297 num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
1298 writel(num_interrupts, &cpsw->wr_regs->rx_imax);
1299 writel(num_interrupts, &cpsw->wr_regs->tx_imax);
1301 int_ctrl |= CPSW_INTPACEEN;
1302 int_ctrl &= (~CPSW_INTPRESCALE_MASK);
1303 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
1306 writel(int_ctrl, &cpsw->wr_regs->int_control);
1308 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
1309 cpsw->coal_intvl = coal_intvl;
1314 static int cpsw_get_sset_count(struct net_device *ndev, int sset)
1316 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1320 return (CPSW_STATS_COMMON_LEN +
1321 (cpsw->rx_ch_num + cpsw->tx_ch_num) *
1328 static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
1334 ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
1335 for (i = 0; i < ch_stats_len; i++) {
1336 line = i % CPSW_STATS_CH_LEN;
1337 snprintf(*p, ETH_GSTRING_LEN,
1338 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
1339 (long)(i / CPSW_STATS_CH_LEN),
1340 cpsw_gstrings_ch_stats[line].stat_string);
1341 *p += ETH_GSTRING_LEN;
1345 static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1347 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1351 switch (stringset) {
1353 for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
1354 memcpy(p, cpsw_gstrings_stats[i].stat_string,
1356 p += ETH_GSTRING_LEN;
1359 cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
1360 cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
1365 static void cpsw_get_ethtool_stats(struct net_device *ndev,
1366 struct ethtool_stats *stats, u64 *data)
1369 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1370 struct cpdma_chan_stats ch_stats;
1373 /* Collect Davinci CPDMA stats for Rx and Tx Channel */
1374 for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
1375 data[l] = readl(cpsw->hw_stats +
1376 cpsw_gstrings_stats[l].stat_offset);
1378 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1379 cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
1380 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1381 p = (u8 *)&ch_stats +
1382 cpsw_gstrings_ch_stats[i].stat_offset;
1383 data[l] = *(u32 *)p;
1387 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1388 cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
1389 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1390 p = (u8 *)&ch_stats +
1391 cpsw_gstrings_ch_stats[i].stat_offset;
1392 data[l] = *(u32 *)p;
1397 static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
1398 struct sk_buff *skb,
1399 struct cpdma_chan *txch)
1401 struct cpsw_common *cpsw = priv->cpsw;
1403 skb_tx_timestamp(skb);
1404 return cpdma_chan_submit(txch, skb, skb->data, skb->len,
1405 priv->emac_port + cpsw->data.dual_emac);
1408 static inline void cpsw_add_dual_emac_def_ale_entries(
1409 struct cpsw_priv *priv, struct cpsw_slave *slave,
1412 struct cpsw_common *cpsw = priv->cpsw;
1413 u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
1415 if (cpsw->version == CPSW_VERSION_1)
1416 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
1418 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
1419 cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
1420 port_mask, port_mask, 0);
1421 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1422 port_mask, ALE_VLAN, slave->port_vlan, 0);
1423 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1424 HOST_PORT_NUM, ALE_VLAN |
1425 ALE_SECURE, slave->port_vlan);
1426 cpsw_ale_control_set(cpsw->ale, slave_port,
1427 ALE_PORT_DROP_UNKNOWN_VLAN, 1);
1430 static void soft_reset_slave(struct cpsw_slave *slave)
1434 snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1435 soft_reset(name, &slave->sliver->soft_reset);
1438 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1441 struct phy_device *phy;
1442 struct cpsw_common *cpsw = priv->cpsw;
1444 soft_reset_slave(slave);
1446 /* setup priority mapping */
1447 writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
1449 switch (cpsw->version) {
1450 case CPSW_VERSION_1:
1451 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
1452 /* Increase RX FIFO size to 5 for supporting fullduplex
1456 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1457 CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
1459 case CPSW_VERSION_2:
1460 case CPSW_VERSION_3:
1461 case CPSW_VERSION_4:
1462 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
1463 /* Increase RX FIFO size to 5 for supporting fullduplex
1467 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1468 CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
1472 /* setup max packet size, and mac address */
1473 writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
1474 cpsw_set_slave_mac(slave, priv);
1476 slave->mac_control = 0; /* no link yet */
1478 slave_port = cpsw_get_slave_port(slave->slave_num);
1480 if (cpsw->data.dual_emac)
1481 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
1483 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1484 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1486 if (slave->data->phy_node) {
1487 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1488 &cpsw_adjust_link, 0, slave->data->phy_if);
1490 dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
1491 slave->data->phy_node,
1496 phy = phy_connect(priv->ndev, slave->data->phy_id,
1497 &cpsw_adjust_link, slave->data->phy_if);
1500 "phy \"%s\" not found on slave %d, err %ld\n",
1501 slave->data->phy_id, slave->slave_num,
1509 phy_attached_info(slave->phy);
1511 phy_start(slave->phy);
1513 /* Configure GMII_SEL register */
1514 cpsw_phy_sel(cpsw->dev, slave->phy->interface, slave->slave_num);
1517 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1519 struct cpsw_common *cpsw = priv->cpsw;
1520 const int vlan = cpsw->data.default_vlan;
1523 int unreg_mcast_mask;
1525 reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1528 writel(vlan, &cpsw->host_port_regs->port_vlan);
1530 for (i = 0; i < cpsw->data.slaves; i++)
1531 slave_write(cpsw->slaves + i, vlan, reg);
1533 if (priv->ndev->flags & IFF_ALLMULTI)
1534 unreg_mcast_mask = ALE_ALL_PORTS;
1536 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1538 cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
1539 ALE_ALL_PORTS, ALE_ALL_PORTS,
1543 static void cpsw_init_host_port(struct cpsw_priv *priv)
1547 struct cpsw_common *cpsw = priv->cpsw;
1549 /* soft reset the controller and initialize ale */
1550 soft_reset("cpsw", &cpsw->regs->soft_reset);
1551 cpsw_ale_start(cpsw->ale);
1553 /* switch to vlan unaware mode */
1554 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1555 CPSW_ALE_VLAN_AWARE);
1556 control_reg = readl(&cpsw->regs->control);
1557 control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
1558 writel(control_reg, &cpsw->regs->control);
1559 fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1560 CPSW_FIFO_NORMAL_MODE;
1561 writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1563 /* setup host port priority mapping */
1564 writel_relaxed(CPDMA_TX_PRIORITY_MAP,
1565 &cpsw->host_port_regs->cpdma_tx_pri_map);
1566 writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1568 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1569 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1571 if (!cpsw->data.dual_emac) {
1572 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1574 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1575 ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1579 static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1581 struct cpsw_common *cpsw = priv->cpsw;
1582 struct sk_buff *skb;
1586 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1587 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1588 for (i = 0; i < ch_buf_num; i++) {
1589 skb = __netdev_alloc_skb_ip_align(priv->ndev,
1590 cpsw->rx_packet_max,
1593 cpsw_err(priv, ifup, "cannot allocate skb\n");
1597 skb_set_queue_mapping(skb, ch);
1598 ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
1599 skb->data, skb_tailroom(skb),
1602 cpsw_err(priv, ifup,
1603 "cannot submit skb to channel %d rx, error %d\n",
1608 kmemleak_not_leak(skb);
1611 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1618 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1622 slave_port = cpsw_get_slave_port(slave->slave_num);
1626 phy_stop(slave->phy);
1627 phy_disconnect(slave->phy);
1629 cpsw_ale_control_set(cpsw->ale, slave_port,
1630 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1631 soft_reset_slave(slave);
1634 static int cpsw_tc_to_fifo(int tc, int num_tc)
1636 if (tc == num_tc - 1)
1639 return CPSW_FIFO_SHAPERS_NUM - tc;
1642 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
1644 struct cpsw_common *cpsw = priv->cpsw;
1645 u32 val = 0, send_pct, shift;
1646 struct cpsw_slave *slave;
1649 if (bw > priv->shp_cfg_speed * 1000)
1652 /* shaping has to stay enabled for highest fifos linearly
1653 * and fifo bw no more then interface can allow
1655 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1656 send_pct = slave_read(slave, SEND_PERCENT);
1657 for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
1659 if (i >= fifo || !priv->fifo_bw[i])
1662 dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
1666 if (!priv->fifo_bw[i] && i > fifo) {
1667 dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
1671 shift = (i - 1) * 8;
1673 send_pct &= ~(CPSW_PCT_MASK << shift);
1674 val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
1678 send_pct |= val << shift;
1683 if (priv->fifo_bw[i])
1684 pct += (send_pct >> shift) & CPSW_PCT_MASK;
1690 slave_write(slave, send_pct, SEND_PERCENT);
1691 priv->fifo_bw[fifo] = bw;
1693 dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
1694 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
1698 dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
1702 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
1704 struct cpsw_common *cpsw = priv->cpsw;
1705 struct cpsw_slave *slave;
1706 u32 tx_in_ctl_rg, val;
1709 ret = cpsw_set_fifo_bw(priv, fifo, bw);
1713 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1714 tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
1715 CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
1718 cpsw_fifo_shp_on(priv, fifo, bw);
1720 val = slave_read(slave, tx_in_ctl_rg);
1721 if (cpsw_shp_is_off(priv)) {
1722 /* disable FIFOs rate limited queues */
1723 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
1725 /* set type of FIFO queues to normal priority mode */
1726 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
1728 /* set type of FIFO queues to be rate limited */
1730 val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
1732 priv->shp_cfg_speed = 0;
1735 /* toggle a FIFO rate limited queue */
1737 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1739 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1740 slave_write(slave, val, tx_in_ctl_rg);
1742 /* FIFO transmit shape enable */
1743 cpsw_fifo_shp_on(priv, fifo, bw);
1750 * shaping for class A should be set first
1752 static int cpsw_set_cbs(struct net_device *ndev,
1753 struct tc_cbs_qopt_offload *qopt)
1755 struct cpsw_priv *priv = netdev_priv(ndev);
1756 struct cpsw_common *cpsw = priv->cpsw;
1757 struct cpsw_slave *slave;
1762 tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
1764 /* enable channels in backward order, as highest FIFOs must be rate
1765 * limited first and for compliance with CPDMA rate limited channels
1766 * that also used in bacward order. FIFO0 cannot be rate limited.
1768 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
1770 dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
1774 /* do nothing, it's disabled anyway */
1775 if (!qopt->enable && !priv->fifo_bw[fifo])
1778 /* shapers can be set if link speed is known */
1779 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1780 if (slave->phy && slave->phy->link) {
1781 if (priv->shp_cfg_speed &&
1782 priv->shp_cfg_speed != slave->phy->speed)
1783 prev_speed = priv->shp_cfg_speed;
1785 priv->shp_cfg_speed = slave->phy->speed;
1788 if (!priv->shp_cfg_speed) {
1789 dev_err(priv->dev, "Link speed is not known");
1793 ret = pm_runtime_get_sync(cpsw->dev);
1795 pm_runtime_put_noidle(cpsw->dev);
1799 bw = qopt->enable ? qopt->idleslope : 0;
1800 ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
1802 priv->shp_cfg_speed = prev_speed;
1806 if (bw && prev_speed)
1808 "Speed was changed, CBS shaper speeds are changed!");
1810 pm_runtime_put_sync(cpsw->dev);
1814 static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1818 for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1819 bw = priv->fifo_bw[fifo];
1823 cpsw_set_fifo_rlimit(priv, fifo, bw);
1827 static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1829 struct cpsw_common *cpsw = priv->cpsw;
1830 u32 tx_prio_map = 0;
1834 if (!priv->mqprio_hw)
1837 for (i = 0; i < 8; i++) {
1838 tc = netdev_get_prio_tc_map(priv->ndev, i);
1839 fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1840 tx_prio_map |= fifo << (4 * i);
1843 tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1844 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1846 slave_write(slave, tx_prio_map, tx_prio_rg);
1849 /* restore resources after port reset */
1850 static void cpsw_restore(struct cpsw_priv *priv)
1852 /* restore MQPRIO offload */
1853 for_each_slave(priv, cpsw_mqprio_resume, priv);
1855 /* restore CBS offload */
1856 for_each_slave(priv, cpsw_cbs_resume, priv);
1859 static int cpsw_ndo_open(struct net_device *ndev)
1861 struct cpsw_priv *priv = netdev_priv(ndev);
1862 struct cpsw_common *cpsw = priv->cpsw;
1866 ret = pm_runtime_get_sync(cpsw->dev);
1868 pm_runtime_put_noidle(cpsw->dev);
1872 netif_carrier_off(ndev);
1874 /* Notify the stack of the actual queue counts. */
1875 ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
1877 dev_err(priv->dev, "cannot set real number of tx queues\n");
1881 ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
1883 dev_err(priv->dev, "cannot set real number of rx queues\n");
1887 reg = cpsw->version;
1889 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
1890 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
1891 CPSW_RTL_VERSION(reg));
1893 /* Initialize host and slave ports */
1894 if (!cpsw->usage_count)
1895 cpsw_init_host_port(priv);
1896 for_each_slave(priv, cpsw_slave_open, priv);
1898 /* Add default VLAN */
1899 if (!cpsw->data.dual_emac)
1900 cpsw_add_default_vlan(priv);
1902 cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
1903 ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
1905 /* initialize shared resources for every ndev */
1906 if (!cpsw->usage_count) {
1907 /* disable priority elevation */
1908 writel_relaxed(0, &cpsw->regs->ptype);
1910 /* enable statistics collection only on all ports */
1911 writel_relaxed(0x7, &cpsw->regs->stat_port_en);
1913 /* Enable internal fifo flow control */
1914 writel(0x7, &cpsw->regs->flow_control);
1916 napi_enable(&cpsw->napi_rx);
1917 napi_enable(&cpsw->napi_tx);
1919 if (cpsw->tx_irq_disabled) {
1920 cpsw->tx_irq_disabled = false;
1921 enable_irq(cpsw->irqs_table[1]);
1924 if (cpsw->rx_irq_disabled) {
1925 cpsw->rx_irq_disabled = false;
1926 enable_irq(cpsw->irqs_table[0]);
1929 ret = cpsw_fill_rx_channels(priv);
1933 if (cpts_register(cpsw->cpts))
1934 dev_err(priv->dev, "error registering cpts device\n");
1940 /* Enable Interrupt pacing if configured */
1941 if (cpsw->coal_intvl != 0) {
1942 struct ethtool_coalesce coal;
1944 coal.rx_coalesce_usecs = cpsw->coal_intvl;
1945 cpsw_set_coalesce(ndev, &coal);
1948 cpdma_ctlr_start(cpsw->dma);
1949 cpsw_intr_enable(cpsw);
1950 cpsw->usage_count++;
1955 cpdma_ctlr_stop(cpsw->dma);
1956 for_each_slave(priv, cpsw_slave_stop, cpsw);
1957 pm_runtime_put_sync(cpsw->dev);
1958 netif_carrier_off(priv->ndev);
1962 static int cpsw_ndo_stop(struct net_device *ndev)
1964 struct cpsw_priv *priv = netdev_priv(ndev);
1965 struct cpsw_common *cpsw = priv->cpsw;
1967 cpsw_info(priv, ifdown, "shutting down cpsw device\n");
1968 __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr);
1969 netif_tx_stop_all_queues(priv->ndev);
1970 netif_carrier_off(priv->ndev);
1972 if (cpsw->usage_count <= 1) {
1973 napi_disable(&cpsw->napi_rx);
1974 napi_disable(&cpsw->napi_tx);
1975 cpts_unregister(cpsw->cpts);
1976 cpsw_intr_disable(cpsw);
1977 cpdma_ctlr_stop(cpsw->dma);
1978 cpsw_ale_stop(cpsw->ale);
1980 for_each_slave(priv, cpsw_slave_stop, cpsw);
1982 if (cpsw_need_resplit(cpsw))
1983 cpsw_split_res(ndev);
1985 cpsw->usage_count--;
1986 pm_runtime_put_sync(cpsw->dev);
1990 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1991 struct net_device *ndev)
1993 struct cpsw_priv *priv = netdev_priv(ndev);
1994 struct cpsw_common *cpsw = priv->cpsw;
1995 struct cpts *cpts = cpsw->cpts;
1996 struct netdev_queue *txq;
1997 struct cpdma_chan *txch;
2000 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
2001 cpsw_err(priv, tx_err, "packet pad failed\n");
2002 ndev->stats.tx_dropped++;
2003 return NET_XMIT_DROP;
2006 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2007 cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
2008 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2010 q_idx = skb_get_queue_mapping(skb);
2011 if (q_idx >= cpsw->tx_ch_num)
2012 q_idx = q_idx % cpsw->tx_ch_num;
2014 txch = cpsw->txv[q_idx].ch;
2015 txq = netdev_get_tx_queue(ndev, q_idx);
2016 ret = cpsw_tx_packet_submit(priv, skb, txch);
2017 if (unlikely(ret != 0)) {
2018 cpsw_err(priv, tx_err, "desc submit failed\n");
2022 /* If there is no more tx desc left free then we need to
2023 * tell the kernel to stop sending us tx frames.
2025 if (unlikely(!cpdma_check_free_tx_desc(txch))) {
2026 netif_tx_stop_queue(txq);
2028 /* Barrier, so that stop_queue visible to other cpus */
2029 smp_mb__after_atomic();
2031 if (cpdma_check_free_tx_desc(txch))
2032 netif_tx_wake_queue(txq);
2035 return NETDEV_TX_OK;
2037 ndev->stats.tx_dropped++;
2038 netif_tx_stop_queue(txq);
2040 /* Barrier, so that stop_queue visible to other cpus */
2041 smp_mb__after_atomic();
2043 if (cpdma_check_free_tx_desc(txch))
2044 netif_tx_wake_queue(txq);
2046 return NETDEV_TX_BUSY;
2049 #if IS_ENABLED(CONFIG_TI_CPTS)
2051 static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
2053 struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
2056 if (!cpts_is_tx_enabled(cpsw->cpts) &&
2057 !cpts_is_rx_enabled(cpsw->cpts)) {
2058 slave_write(slave, 0, CPSW1_TS_CTL);
2062 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2063 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
2065 if (cpts_is_tx_enabled(cpsw->cpts))
2066 ts_en |= CPSW_V1_TS_TX_EN;
2068 if (cpts_is_rx_enabled(cpsw->cpts))
2069 ts_en |= CPSW_V1_TS_RX_EN;
2071 slave_write(slave, ts_en, CPSW1_TS_CTL);
2072 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
2075 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
2077 struct cpsw_slave *slave;
2078 struct cpsw_common *cpsw = priv->cpsw;
2081 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2083 ctrl = slave_read(slave, CPSW2_CONTROL);
2084 switch (cpsw->version) {
2085 case CPSW_VERSION_2:
2086 ctrl &= ~CTRL_V2_ALL_TS_MASK;
2088 if (cpts_is_tx_enabled(cpsw->cpts))
2089 ctrl |= CTRL_V2_TX_TS_BITS;
2091 if (cpts_is_rx_enabled(cpsw->cpts))
2092 ctrl |= CTRL_V2_RX_TS_BITS;
2094 case CPSW_VERSION_3:
2096 ctrl &= ~CTRL_V3_ALL_TS_MASK;
2098 if (cpts_is_tx_enabled(cpsw->cpts))
2099 ctrl |= CTRL_V3_TX_TS_BITS;
2101 if (cpts_is_rx_enabled(cpsw->cpts))
2102 ctrl |= CTRL_V3_RX_TS_BITS;
2106 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
2108 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
2109 slave_write(slave, ctrl, CPSW2_CONTROL);
2110 writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
2113 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2115 struct cpsw_priv *priv = netdev_priv(dev);
2116 struct hwtstamp_config cfg;
2117 struct cpsw_common *cpsw = priv->cpsw;
2118 struct cpts *cpts = cpsw->cpts;
2120 if (cpsw->version != CPSW_VERSION_1 &&
2121 cpsw->version != CPSW_VERSION_2 &&
2122 cpsw->version != CPSW_VERSION_3)
2125 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2128 /* reserved for future extensions */
2132 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
2135 switch (cfg.rx_filter) {
2136 case HWTSTAMP_FILTER_NONE:
2137 cpts_rx_enable(cpts, 0);
2139 case HWTSTAMP_FILTER_ALL:
2140 case HWTSTAMP_FILTER_NTP_ALL:
2142 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2143 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2144 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2145 cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
2146 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2148 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2149 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2150 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2151 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2152 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2153 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2154 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2155 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2156 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2157 cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
2158 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2164 cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
2166 switch (cpsw->version) {
2167 case CPSW_VERSION_1:
2168 cpsw_hwtstamp_v1(cpsw);
2170 case CPSW_VERSION_2:
2171 case CPSW_VERSION_3:
2172 cpsw_hwtstamp_v2(priv);
2178 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2181 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2183 struct cpsw_common *cpsw = ndev_to_cpsw(dev);
2184 struct cpts *cpts = cpsw->cpts;
2185 struct hwtstamp_config cfg;
2187 if (cpsw->version != CPSW_VERSION_1 &&
2188 cpsw->version != CPSW_VERSION_2 &&
2189 cpsw->version != CPSW_VERSION_3)
2193 cfg.tx_type = cpts_is_tx_enabled(cpts) ?
2194 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2195 cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
2196 cpts->rx_enable : HWTSTAMP_FILTER_NONE);
2198 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2201 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2206 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2210 #endif /*CONFIG_TI_CPTS*/
2212 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2214 struct cpsw_priv *priv = netdev_priv(dev);
2215 struct cpsw_common *cpsw = priv->cpsw;
2216 int slave_no = cpsw_slave_index(cpsw, priv);
2218 if (!netif_running(dev))
2223 return cpsw_hwtstamp_set(dev, req);
2225 return cpsw_hwtstamp_get(dev, req);
2228 if (!cpsw->slaves[slave_no].phy)
2230 return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
2233 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
2235 struct cpsw_priv *priv = netdev_priv(ndev);
2236 struct cpsw_common *cpsw = priv->cpsw;
2239 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
2240 ndev->stats.tx_errors++;
2241 cpsw_intr_disable(cpsw);
2242 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
2243 cpdma_chan_stop(cpsw->txv[ch].ch);
2244 cpdma_chan_start(cpsw->txv[ch].ch);
2247 cpsw_intr_enable(cpsw);
2248 netif_trans_update(ndev);
2249 netif_tx_wake_all_queues(ndev);
2252 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
2254 struct cpsw_priv *priv = netdev_priv(ndev);
2255 struct sockaddr *addr = (struct sockaddr *)p;
2256 struct cpsw_common *cpsw = priv->cpsw;
2261 if (!is_valid_ether_addr(addr->sa_data))
2262 return -EADDRNOTAVAIL;
2264 ret = pm_runtime_get_sync(cpsw->dev);
2266 pm_runtime_put_noidle(cpsw->dev);
2270 if (cpsw->data.dual_emac) {
2271 vid = cpsw->slaves[priv->emac_port].port_vlan;
2275 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
2277 cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
2280 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
2281 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2282 for_each_slave(priv, cpsw_set_slave_mac, priv);
2284 pm_runtime_put(cpsw->dev);
2289 #ifdef CONFIG_NET_POLL_CONTROLLER
2290 static void cpsw_ndo_poll_controller(struct net_device *ndev)
2292 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2294 cpsw_intr_disable(cpsw);
2295 cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
2296 cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
2297 cpsw_intr_enable(cpsw);
2301 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
2305 int unreg_mcast_mask = 0;
2307 struct cpsw_common *cpsw = priv->cpsw;
2309 if (cpsw->data.dual_emac) {
2310 port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
2312 if (priv->ndev->flags & IFF_ALLMULTI)
2313 unreg_mcast_mask = port_mask;
2315 port_mask = ALE_ALL_PORTS;
2317 if (priv->ndev->flags & IFF_ALLMULTI)
2318 unreg_mcast_mask = ALE_ALL_PORTS;
2320 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
2323 ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
2328 ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
2329 HOST_PORT_NUM, ALE_VLAN, vid);
2333 ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
2334 port_mask, ALE_VLAN, vid, 0);
2336 goto clean_vlan_ucast;
2340 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2341 HOST_PORT_NUM, ALE_VLAN, vid);
2343 cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2347 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
2348 __be16 proto, u16 vid)
2350 struct cpsw_priv *priv = netdev_priv(ndev);
2351 struct cpsw_common *cpsw = priv->cpsw;
2354 if (vid == cpsw->data.default_vlan)
2357 ret = pm_runtime_get_sync(cpsw->dev);
2359 pm_runtime_put_noidle(cpsw->dev);
2363 if (cpsw->data.dual_emac) {
2364 /* In dual EMAC, reserved VLAN id should not be used for
2365 * creating VLAN interfaces as this can break the dual
2366 * EMAC port separation
2370 for (i = 0; i < cpsw->data.slaves; i++) {
2371 if (vid == cpsw->slaves[i].port_vlan) {
2378 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
2379 ret = cpsw_add_vlan_ale_entry(priv, vid);
2381 pm_runtime_put(cpsw->dev);
2385 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
2386 __be16 proto, u16 vid)
2388 struct cpsw_priv *priv = netdev_priv(ndev);
2389 struct cpsw_common *cpsw = priv->cpsw;
2392 if (vid == cpsw->data.default_vlan)
2395 ret = pm_runtime_get_sync(cpsw->dev);
2397 pm_runtime_put_noidle(cpsw->dev);
2401 if (cpsw->data.dual_emac) {
2404 for (i = 0; i < cpsw->data.slaves; i++) {
2405 if (vid == cpsw->slaves[i].port_vlan)
2410 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
2411 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2412 ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2413 HOST_PORT_NUM, ALE_VLAN, vid);
2414 ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
2417 pm_runtime_put(cpsw->dev);
2421 static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
2423 struct cpsw_priv *priv = netdev_priv(ndev);
2424 struct cpsw_common *cpsw = priv->cpsw;
2425 struct cpsw_slave *slave;
2430 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
2431 if (ch_rate == rate)
2434 ch_rate = rate * 1000;
2435 min_rate = cpdma_chan_get_min_rate(cpsw->dma);
2436 if ((ch_rate < min_rate && ch_rate)) {
2437 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
2442 if (rate > cpsw->speed) {
2443 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
2447 ret = pm_runtime_get_sync(cpsw->dev);
2449 pm_runtime_put_noidle(cpsw->dev);
2453 ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
2454 pm_runtime_put(cpsw->dev);
2459 /* update rates for slaves tx queues */
2460 for (i = 0; i < cpsw->data.slaves; i++) {
2461 slave = &cpsw->slaves[i];
2465 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
2468 cpsw_split_res(ndev);
2472 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
2474 struct tc_mqprio_qopt_offload *mqprio = type_data;
2475 struct cpsw_priv *priv = netdev_priv(ndev);
2476 struct cpsw_common *cpsw = priv->cpsw;
2477 int fifo, num_tc, count, offset;
2478 struct cpsw_slave *slave;
2479 u32 tx_prio_map = 0;
2482 num_tc = mqprio->qopt.num_tc;
2483 if (num_tc > CPSW_TC_NUM)
2486 if (mqprio->mode != TC_MQPRIO_MODE_DCB)
2489 ret = pm_runtime_get_sync(cpsw->dev);
2491 pm_runtime_put_noidle(cpsw->dev);
2496 for (i = 0; i < 8; i++) {
2497 tc = mqprio->qopt.prio_tc_map[i];
2498 fifo = cpsw_tc_to_fifo(tc, num_tc);
2499 tx_prio_map |= fifo << (4 * i);
2502 netdev_set_num_tc(ndev, num_tc);
2503 for (i = 0; i < num_tc; i++) {
2504 count = mqprio->qopt.count[i];
2505 offset = mqprio->qopt.offset[i];
2506 netdev_set_tc_queue(ndev, i, count, offset);
2510 if (!mqprio->qopt.hw) {
2511 /* restore default configuration */
2512 netdev_reset_tc(ndev);
2513 tx_prio_map = TX_PRIORITY_MAPPING;
2516 priv->mqprio_hw = mqprio->qopt.hw;
2518 offset = cpsw->version == CPSW_VERSION_1 ?
2519 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
2521 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2522 slave_write(slave, tx_prio_map, offset);
2524 pm_runtime_put_sync(cpsw->dev);
2529 static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2533 case TC_SETUP_QDISC_CBS:
2534 return cpsw_set_cbs(ndev, type_data);
2536 case TC_SETUP_QDISC_MQPRIO:
2537 return cpsw_set_mqprio(ndev, type_data);
2544 static const struct net_device_ops cpsw_netdev_ops = {
2545 .ndo_open = cpsw_ndo_open,
2546 .ndo_stop = cpsw_ndo_stop,
2547 .ndo_start_xmit = cpsw_ndo_start_xmit,
2548 .ndo_set_mac_address = cpsw_ndo_set_mac_address,
2549 .ndo_do_ioctl = cpsw_ndo_ioctl,
2550 .ndo_validate_addr = eth_validate_addr,
2551 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
2552 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
2553 .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
2554 #ifdef CONFIG_NET_POLL_CONTROLLER
2555 .ndo_poll_controller = cpsw_ndo_poll_controller,
2557 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
2558 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
2559 .ndo_setup_tc = cpsw_ndo_setup_tc,
2562 static int cpsw_get_regs_len(struct net_device *ndev)
2564 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2566 return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
2569 static void cpsw_get_regs(struct net_device *ndev,
2570 struct ethtool_regs *regs, void *p)
2573 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2575 /* update CPSW IP version */
2576 regs->version = cpsw->version;
2578 cpsw_ale_dump(cpsw->ale, reg);
2581 static void cpsw_get_drvinfo(struct net_device *ndev,
2582 struct ethtool_drvinfo *info)
2584 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2585 struct platform_device *pdev = to_platform_device(cpsw->dev);
2587 strlcpy(info->driver, "cpsw", sizeof(info->driver));
2588 strlcpy(info->version, "1.0", sizeof(info->version));
2589 strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
2592 static u32 cpsw_get_msglevel(struct net_device *ndev)
2594 struct cpsw_priv *priv = netdev_priv(ndev);
2595 return priv->msg_enable;
2598 static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
2600 struct cpsw_priv *priv = netdev_priv(ndev);
2601 priv->msg_enable = value;
2604 #if IS_ENABLED(CONFIG_TI_CPTS)
2605 static int cpsw_get_ts_info(struct net_device *ndev,
2606 struct ethtool_ts_info *info)
2608 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2610 info->so_timestamping =
2611 SOF_TIMESTAMPING_TX_HARDWARE |
2612 SOF_TIMESTAMPING_TX_SOFTWARE |
2613 SOF_TIMESTAMPING_RX_HARDWARE |
2614 SOF_TIMESTAMPING_RX_SOFTWARE |
2615 SOF_TIMESTAMPING_SOFTWARE |
2616 SOF_TIMESTAMPING_RAW_HARDWARE;
2617 info->phc_index = cpsw->cpts->phc_index;
2619 (1 << HWTSTAMP_TX_OFF) |
2620 (1 << HWTSTAMP_TX_ON);
2622 (1 << HWTSTAMP_FILTER_NONE) |
2623 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2624 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2628 static int cpsw_get_ts_info(struct net_device *ndev,
2629 struct ethtool_ts_info *info)
2631 info->so_timestamping =
2632 SOF_TIMESTAMPING_TX_SOFTWARE |
2633 SOF_TIMESTAMPING_RX_SOFTWARE |
2634 SOF_TIMESTAMPING_SOFTWARE;
2635 info->phc_index = -1;
2637 info->rx_filters = 0;
2642 static int cpsw_get_link_ksettings(struct net_device *ndev,
2643 struct ethtool_link_ksettings *ecmd)
2645 struct cpsw_priv *priv = netdev_priv(ndev);
2646 struct cpsw_common *cpsw = priv->cpsw;
2647 int slave_no = cpsw_slave_index(cpsw, priv);
2649 if (!cpsw->slaves[slave_no].phy)
2652 phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
2656 static int cpsw_set_link_ksettings(struct net_device *ndev,
2657 const struct ethtool_link_ksettings *ecmd)
2659 struct cpsw_priv *priv = netdev_priv(ndev);
2660 struct cpsw_common *cpsw = priv->cpsw;
2661 int slave_no = cpsw_slave_index(cpsw, priv);
2663 if (cpsw->slaves[slave_no].phy)
2664 return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
2670 static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2672 struct cpsw_priv *priv = netdev_priv(ndev);
2673 struct cpsw_common *cpsw = priv->cpsw;
2674 int slave_no = cpsw_slave_index(cpsw, priv);
2679 if (cpsw->slaves[slave_no].phy)
2680 phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
2683 static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2685 struct cpsw_priv *priv = netdev_priv(ndev);
2686 struct cpsw_common *cpsw = priv->cpsw;
2687 int slave_no = cpsw_slave_index(cpsw, priv);
2689 if (cpsw->slaves[slave_no].phy)
2690 return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
2695 static void cpsw_get_pauseparam(struct net_device *ndev,
2696 struct ethtool_pauseparam *pause)
2698 struct cpsw_priv *priv = netdev_priv(ndev);
2700 pause->autoneg = AUTONEG_DISABLE;
2701 pause->rx_pause = priv->rx_pause ? true : false;
2702 pause->tx_pause = priv->tx_pause ? true : false;
2705 static int cpsw_set_pauseparam(struct net_device *ndev,
2706 struct ethtool_pauseparam *pause)
2708 struct cpsw_priv *priv = netdev_priv(ndev);
2711 priv->rx_pause = pause->rx_pause ? true : false;
2712 priv->tx_pause = pause->tx_pause ? true : false;
2714 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
2718 static int cpsw_ethtool_op_begin(struct net_device *ndev)
2720 struct cpsw_priv *priv = netdev_priv(ndev);
2721 struct cpsw_common *cpsw = priv->cpsw;
2724 ret = pm_runtime_get_sync(cpsw->dev);
2726 cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
2727 pm_runtime_put_noidle(cpsw->dev);
2733 static void cpsw_ethtool_op_complete(struct net_device *ndev)
2735 struct cpsw_priv *priv = netdev_priv(ndev);
2738 ret = pm_runtime_put(priv->cpsw->dev);
2740 cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
2743 static void cpsw_get_channels(struct net_device *ndev,
2744 struct ethtool_channels *ch)
2746 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2748 ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2749 ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2750 ch->max_combined = 0;
2752 ch->other_count = 0;
2753 ch->rx_count = cpsw->rx_ch_num;
2754 ch->tx_count = cpsw->tx_ch_num;
2755 ch->combined_count = 0;
2758 static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
2759 struct ethtool_channels *ch)
2761 if (cpsw->quirk_irq) {
2762 dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
2766 if (ch->combined_count)
2769 /* verify we have at least one channel in each direction */
2770 if (!ch->rx_count || !ch->tx_count)
2773 if (ch->rx_count > cpsw->data.channels ||
2774 ch->tx_count > cpsw->data.channels)
2780 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
2782 struct cpsw_common *cpsw = priv->cpsw;
2783 void (*handler)(void *, int, int);
2784 struct netdev_queue *queue;
2785 struct cpsw_vector *vec;
2789 ch = &cpsw->rx_ch_num;
2791 handler = cpsw_rx_handler;
2793 ch = &cpsw->tx_ch_num;
2795 handler = cpsw_tx_handler;
2798 while (*ch < ch_num) {
2799 vch = rx ? *ch : 7 - *ch;
2800 vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
2801 queue = netdev_get_tx_queue(priv->ndev, *ch);
2802 queue->tx_maxrate = 0;
2804 if (IS_ERR(vec[*ch].ch))
2805 return PTR_ERR(vec[*ch].ch);
2810 cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
2811 (rx ? "rx" : "tx"));
2815 while (*ch > ch_num) {
2818 ret = cpdma_chan_destroy(vec[*ch].ch);
2822 cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
2823 (rx ? "rx" : "tx"));
2829 static int cpsw_update_channels(struct cpsw_priv *priv,
2830 struct ethtool_channels *ch)
2834 ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
2838 ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
2845 static void cpsw_suspend_data_pass(struct net_device *ndev)
2847 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2848 struct cpsw_slave *slave;
2851 /* Disable NAPI scheduling */
2852 cpsw_intr_disable(cpsw);
2854 /* Stop all transmit queues for every network device.
2855 * Disable re-using rx descriptors with dormant_on.
2857 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2858 if (!(slave->ndev && netif_running(slave->ndev)))
2861 netif_tx_stop_all_queues(slave->ndev);
2862 netif_dormant_on(slave->ndev);
2865 /* Handle rest of tx packets and stop cpdma channels */
2866 cpdma_ctlr_stop(cpsw->dma);
2869 static int cpsw_resume_data_pass(struct net_device *ndev)
2871 struct cpsw_priv *priv = netdev_priv(ndev);
2872 struct cpsw_common *cpsw = priv->cpsw;
2873 struct cpsw_slave *slave;
2876 /* Allow rx packets handling */
2877 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
2878 if (slave->ndev && netif_running(slave->ndev))
2879 netif_dormant_off(slave->ndev);
2881 /* After this receive is started */
2882 if (cpsw->usage_count) {
2883 ret = cpsw_fill_rx_channels(priv);
2887 cpdma_ctlr_start(cpsw->dma);
2888 cpsw_intr_enable(cpsw);
2891 /* Resume transmit for every affected interface */
2892 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
2893 if (slave->ndev && netif_running(slave->ndev))
2894 netif_tx_start_all_queues(slave->ndev);
2899 static int cpsw_set_channels(struct net_device *ndev,
2900 struct ethtool_channels *chs)
2902 struct cpsw_priv *priv = netdev_priv(ndev);
2903 struct cpsw_common *cpsw = priv->cpsw;
2904 struct cpsw_slave *slave;
2907 ret = cpsw_check_ch_settings(cpsw, chs);
2911 cpsw_suspend_data_pass(ndev);
2912 ret = cpsw_update_channels(priv, chs);
2916 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2917 if (!(slave->ndev && netif_running(slave->ndev)))
2920 /* Inform stack about new count of queues */
2921 ret = netif_set_real_num_tx_queues(slave->ndev,
2924 dev_err(priv->dev, "cannot set real number of tx queues\n");
2928 ret = netif_set_real_num_rx_queues(slave->ndev,
2931 dev_err(priv->dev, "cannot set real number of rx queues\n");
2936 if (cpsw->usage_count)
2937 cpsw_split_res(ndev);
2939 ret = cpsw_resume_data_pass(ndev);
2943 dev_err(priv->dev, "cannot update channels number, closing device\n");
2948 static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
2950 struct cpsw_priv *priv = netdev_priv(ndev);
2951 struct cpsw_common *cpsw = priv->cpsw;
2952 int slave_no = cpsw_slave_index(cpsw, priv);
2954 if (cpsw->slaves[slave_no].phy)
2955 return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
2960 static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
2962 struct cpsw_priv *priv = netdev_priv(ndev);
2963 struct cpsw_common *cpsw = priv->cpsw;
2964 int slave_no = cpsw_slave_index(cpsw, priv);
2966 if (cpsw->slaves[slave_no].phy)
2967 return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
2972 static int cpsw_nway_reset(struct net_device *ndev)
2974 struct cpsw_priv *priv = netdev_priv(ndev);
2975 struct cpsw_common *cpsw = priv->cpsw;
2976 int slave_no = cpsw_slave_index(cpsw, priv);
2978 if (cpsw->slaves[slave_no].phy)
2979 return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
2984 static void cpsw_get_ringparam(struct net_device *ndev,
2985 struct ethtool_ringparam *ering)
2987 struct cpsw_priv *priv = netdev_priv(ndev);
2988 struct cpsw_common *cpsw = priv->cpsw;
2991 ering->tx_max_pending = 0;
2992 ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
2993 ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
2994 ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
2997 static int cpsw_set_ringparam(struct net_device *ndev,
2998 struct ethtool_ringparam *ering)
3000 struct cpsw_priv *priv = netdev_priv(ndev);
3001 struct cpsw_common *cpsw = priv->cpsw;
3004 /* ignore ering->tx_pending - only rx_pending adjustment is supported */
3006 if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
3007 ering->rx_pending < CPSW_MAX_QUEUES ||
3008 ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
3011 if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
3014 cpsw_suspend_data_pass(ndev);
3016 cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
3018 if (cpsw->usage_count)
3019 cpdma_chan_split_pool(cpsw->dma);
3021 ret = cpsw_resume_data_pass(ndev);
3025 dev_err(&ndev->dev, "cannot set ring params, closing device\n");
3030 static const struct ethtool_ops cpsw_ethtool_ops = {
3031 .get_drvinfo = cpsw_get_drvinfo,
3032 .get_msglevel = cpsw_get_msglevel,
3033 .set_msglevel = cpsw_set_msglevel,
3034 .get_link = ethtool_op_get_link,
3035 .get_ts_info = cpsw_get_ts_info,
3036 .get_coalesce = cpsw_get_coalesce,
3037 .set_coalesce = cpsw_set_coalesce,
3038 .get_sset_count = cpsw_get_sset_count,
3039 .get_strings = cpsw_get_strings,
3040 .get_ethtool_stats = cpsw_get_ethtool_stats,
3041 .get_pauseparam = cpsw_get_pauseparam,
3042 .set_pauseparam = cpsw_set_pauseparam,
3043 .get_wol = cpsw_get_wol,
3044 .set_wol = cpsw_set_wol,
3045 .get_regs_len = cpsw_get_regs_len,
3046 .get_regs = cpsw_get_regs,
3047 .begin = cpsw_ethtool_op_begin,
3048 .complete = cpsw_ethtool_op_complete,
3049 .get_channels = cpsw_get_channels,
3050 .set_channels = cpsw_set_channels,
3051 .get_link_ksettings = cpsw_get_link_ksettings,
3052 .set_link_ksettings = cpsw_set_link_ksettings,
3053 .get_eee = cpsw_get_eee,
3054 .set_eee = cpsw_set_eee,
3055 .nway_reset = cpsw_nway_reset,
3056 .get_ringparam = cpsw_get_ringparam,
3057 .set_ringparam = cpsw_set_ringparam,
3060 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
3061 u32 slave_reg_ofs, u32 sliver_reg_ofs)
3063 void __iomem *regs = cpsw->regs;
3064 int slave_num = slave->slave_num;
3065 struct cpsw_slave_data *data = cpsw->data.slave_data + slave_num;
3068 slave->regs = regs + slave_reg_ofs;
3069 slave->sliver = regs + sliver_reg_ofs;
3070 slave->port_vlan = data->dual_emac_res_vlan;
3073 static int cpsw_probe_dt(struct cpsw_platform_data *data,
3074 struct platform_device *pdev)
3076 struct device_node *node = pdev->dev.of_node;
3077 struct device_node *slave_node;
3084 if (of_property_read_u32(node, "slaves", &prop)) {
3085 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
3088 data->slaves = prop;
3090 if (of_property_read_u32(node, "active_slave", &prop)) {
3091 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
3094 data->active_slave = prop;
3096 data->slave_data = devm_kcalloc(&pdev->dev,
3098 sizeof(struct cpsw_slave_data),
3100 if (!data->slave_data)
3103 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
3104 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
3107 data->channels = prop;
3109 if (of_property_read_u32(node, "ale_entries", &prop)) {
3110 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
3113 data->ale_entries = prop;
3115 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
3116 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
3119 data->bd_ram_size = prop;
3121 if (of_property_read_u32(node, "mac_control", &prop)) {
3122 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
3125 data->mac_control = prop;
3127 if (of_property_read_bool(node, "dual_emac"))
3128 data->dual_emac = 1;
3131 * Populate all the child nodes here...
3133 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3134 /* We do not want to force this, as in some cases may not have child */
3136 dev_warn(&pdev->dev, "Doesn't have any child node\n");
3138 for_each_available_child_of_node(node, slave_node) {
3139 struct cpsw_slave_data *slave_data = data->slave_data + i;
3140 const void *mac_addr = NULL;
3144 /* This is no slave child node, continue */
3145 if (strcmp(slave_node->name, "slave"))
3148 slave_data->phy_node = of_parse_phandle(slave_node,
3150 parp = of_get_property(slave_node, "phy_id", &lenp);
3151 if (slave_data->phy_node) {
3153 "slave[%d] using phy-handle=\"%pOF\"\n",
3154 i, slave_data->phy_node);
3155 } else if (of_phy_is_fixed_link(slave_node)) {
3156 /* In the case of a fixed PHY, the DT node associated
3157 * to the PHY is the Ethernet MAC DT node.
3159 ret = of_phy_register_fixed_link(slave_node);
3161 if (ret != -EPROBE_DEFER)
3162 dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
3165 slave_data->phy_node = of_node_get(slave_node);
3168 struct device_node *mdio_node;
3169 struct platform_device *mdio;
3171 if (lenp != (sizeof(__be32) * 2)) {
3172 dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
3175 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
3176 phyid = be32_to_cpup(parp+1);
3177 mdio = of_find_device_by_node(mdio_node);
3178 of_node_put(mdio_node);
3180 dev_err(&pdev->dev, "Missing mdio platform device\n");
3183 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
3184 PHY_ID_FMT, mdio->name, phyid);
3185 put_device(&mdio->dev);
3188 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
3192 slave_data->phy_if = of_get_phy_mode(slave_node);
3193 if (slave_data->phy_if < 0) {
3194 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
3196 return slave_data->phy_if;
3200 mac_addr = of_get_mac_address(slave_node);
3202 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
3204 ret = ti_cm_get_macid(&pdev->dev, i,
3205 slave_data->mac_addr);
3209 if (data->dual_emac) {
3210 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
3212 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
3213 slave_data->dual_emac_res_vlan = i+1;
3214 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
3215 slave_data->dual_emac_res_vlan, i);
3217 slave_data->dual_emac_res_vlan = prop;
3222 if (i == data->slaves)
3229 static void cpsw_remove_dt(struct platform_device *pdev)
3231 struct net_device *ndev = platform_get_drvdata(pdev);
3232 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3233 struct cpsw_platform_data *data = &cpsw->data;
3234 struct device_node *node = pdev->dev.of_node;
3235 struct device_node *slave_node;
3238 for_each_available_child_of_node(node, slave_node) {
3239 struct cpsw_slave_data *slave_data = &data->slave_data[i];
3241 if (strcmp(slave_node->name, "slave"))
3244 if (of_phy_is_fixed_link(slave_node))
3245 of_phy_deregister_fixed_link(slave_node);
3247 of_node_put(slave_data->phy_node);
3250 if (i == data->slaves)
3254 of_platform_depopulate(&pdev->dev);
3257 static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
3259 struct cpsw_common *cpsw = priv->cpsw;
3260 struct cpsw_platform_data *data = &cpsw->data;
3261 struct net_device *ndev;
3262 struct cpsw_priv *priv_sl2;
3265 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
3267 dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
3271 priv_sl2 = netdev_priv(ndev);
3272 priv_sl2->cpsw = cpsw;
3273 priv_sl2->ndev = ndev;
3274 priv_sl2->dev = &ndev->dev;
3275 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
3277 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
3278 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
3280 dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
3281 priv_sl2->mac_addr);
3283 eth_random_addr(priv_sl2->mac_addr);
3284 dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
3285 priv_sl2->mac_addr);
3287 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
3289 priv_sl2->emac_port = 1;
3290 cpsw->slaves[1].ndev = ndev;
3291 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
3293 ndev->netdev_ops = &cpsw_netdev_ops;
3294 ndev->ethtool_ops = &cpsw_ethtool_ops;
3296 /* register the network device */
3297 SET_NETDEV_DEV(ndev, cpsw->dev);
3298 ret = register_netdev(ndev);
3300 dev_err(cpsw->dev, "cpsw: error registering net device\n");
3308 static const struct of_device_id cpsw_of_mtable[] = {
3309 { .compatible = "ti,cpsw"},