net: hns3: Add enable and process hw errors of TM scheduler
[muen/linux.git] / drivers / net / ethernet / hisilicon / hip04_eth.c
1
2 /* Copyright (c) 2014 Linaro Ltd.
3  * Copyright (c) 2014 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  */
10
11 #include <linux/module.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/ktime.h>
16 #include <linux/of_address.h>
17 #include <linux/phy.h>
18 #include <linux/of_mdio.h>
19 #include <linux/of_net.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/regmap.h>
22
23 #define PPE_CFG_RX_ADDR                 0x100
24 #define PPE_CFG_POOL_GRP                0x300
25 #define PPE_CFG_RX_BUF_SIZE             0x400
26 #define PPE_CFG_RX_FIFO_SIZE            0x500
27 #define PPE_CURR_BUF_CNT                0xa200
28
29 #define GE_DUPLEX_TYPE                  0x08
30 #define GE_MAX_FRM_SIZE_REG             0x3c
31 #define GE_PORT_MODE                    0x40
32 #define GE_PORT_EN                      0x44
33 #define GE_SHORT_RUNTS_THR_REG          0x50
34 #define GE_TX_LOCAL_PAGE_REG            0x5c
35 #define GE_TRANSMIT_CONTROL_REG         0x60
36 #define GE_CF_CRC_STRIP_REG             0x1b0
37 #define GE_MODE_CHANGE_REG              0x1b4
38 #define GE_RECV_CONTROL_REG             0x1e0
39 #define GE_STATION_MAC_ADDRESS          0x210
40 #define PPE_CFG_CPU_ADD_ADDR            0x580
41 #define PPE_CFG_MAX_FRAME_LEN_REG       0x408
42 #define PPE_CFG_BUS_CTRL_REG            0x424
43 #define PPE_CFG_RX_CTRL_REG             0x428
44 #define PPE_CFG_RX_PKT_MODE_REG         0x438
45 #define PPE_CFG_QOS_VMID_GEN            0x500
46 #define PPE_CFG_RX_PKT_INT              0x538
47 #define PPE_INTEN                       0x600
48 #define PPE_INTSTS                      0x608
49 #define PPE_RINT                        0x604
50 #define PPE_CFG_STS_MODE                0x700
51 #define PPE_HIS_RX_PKT_CNT              0x804
52
53 /* REG_INTERRUPT */
54 #define RCV_INT                         BIT(10)
55 #define RCV_NOBUF                       BIT(8)
56 #define RCV_DROP                        BIT(7)
57 #define TX_DROP                         BIT(6)
58 #define DEF_INT_ERR                     (RCV_NOBUF | RCV_DROP | TX_DROP)
59 #define DEF_INT_MASK                    (RCV_INT | DEF_INT_ERR)
60
61 /* TX descriptor config */
62 #define TX_FREE_MEM                     BIT(0)
63 #define TX_READ_ALLOC_L3                BIT(1)
64 #define TX_FINISH_CACHE_INV             BIT(2)
65 #define TX_CLEAR_WB                     BIT(4)
66 #define TX_L3_CHECKSUM                  BIT(5)
67 #define TX_LOOP_BACK                    BIT(11)
68
69 /* RX error */
70 #define RX_PKT_DROP                     BIT(0)
71 #define RX_L2_ERR                       BIT(1)
72 #define RX_PKT_ERR                      (RX_PKT_DROP | RX_L2_ERR)
73
74 #define SGMII_SPEED_1000                0x08
75 #define SGMII_SPEED_100                 0x07
76 #define SGMII_SPEED_10                  0x06
77 #define MII_SPEED_100                   0x01
78 #define MII_SPEED_10                    0x00
79
80 #define GE_DUPLEX_FULL                  BIT(0)
81 #define GE_DUPLEX_HALF                  0x00
82 #define GE_MODE_CHANGE_EN               BIT(0)
83
84 #define GE_TX_AUTO_NEG                  BIT(5)
85 #define GE_TX_ADD_CRC                   BIT(6)
86 #define GE_TX_SHORT_PAD_THROUGH         BIT(7)
87
88 #define GE_RX_STRIP_CRC                 BIT(0)
89 #define GE_RX_STRIP_PAD                 BIT(3)
90 #define GE_RX_PAD_EN                    BIT(4)
91
92 #define GE_AUTO_NEG_CTL                 BIT(0)
93
94 #define GE_RX_INT_THRESHOLD             BIT(6)
95 #define GE_RX_TIMEOUT                   0x04
96
97 #define GE_RX_PORT_EN                   BIT(1)
98 #define GE_TX_PORT_EN                   BIT(2)
99
100 #define PPE_CFG_STS_RX_PKT_CNT_RC       BIT(12)
101
102 #define PPE_CFG_RX_PKT_ALIGN            BIT(18)
103 #define PPE_CFG_QOS_VMID_MODE           BIT(14)
104 #define PPE_CFG_QOS_VMID_GRP_SHIFT      8
105
106 #define PPE_CFG_RX_FIFO_FSFU            BIT(11)
107 #define PPE_CFG_RX_DEPTH_SHIFT          16
108 #define PPE_CFG_RX_START_SHIFT          0
109 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT     11
110
111 #define PPE_CFG_BUS_LOCAL_REL           BIT(14)
112 #define PPE_CFG_BUS_BIG_ENDIEN          BIT(0)
113
114 #define RX_DESC_NUM                     128
115 #define TX_DESC_NUM                     256
116 #define TX_NEXT(N)                      (((N) + 1) & (TX_DESC_NUM-1))
117 #define RX_NEXT(N)                      (((N) + 1) & (RX_DESC_NUM-1))
118
119 #define GMAC_PPE_RX_PKT_MAX_LEN         379
120 #define GMAC_MAX_PKT_LEN                1516
121 #define GMAC_MIN_PKT_LEN                31
122 #define RX_BUF_SIZE                     1600
123 #define RESET_TIMEOUT                   1000
124 #define TX_TIMEOUT                      (6 * HZ)
125
126 #define DRV_NAME                        "hip04-ether"
127 #define DRV_VERSION                     "v1.0"
128
129 #define HIP04_MAX_TX_COALESCE_USECS     200
130 #define HIP04_MIN_TX_COALESCE_USECS     100
131 #define HIP04_MAX_TX_COALESCE_FRAMES    200
132 #define HIP04_MIN_TX_COALESCE_FRAMES    100
133
134 struct tx_desc {
135         u32 send_addr;
136         u32 send_size;
137         u32 next_addr;
138         u32 cfg;
139         u32 wb_addr;
140 } __aligned(64);
141
142 struct rx_desc {
143         u16 reserved_16;
144         u16 pkt_len;
145         u32 reserve1[3];
146         u32 pkt_err;
147         u32 reserve2[4];
148 };
149
150 struct hip04_priv {
151         void __iomem *base;
152         int phy_mode;
153         int chan;
154         unsigned int port;
155         unsigned int speed;
156         unsigned int duplex;
157         unsigned int reg_inten;
158
159         struct napi_struct napi;
160         struct net_device *ndev;
161
162         struct tx_desc *tx_desc;
163         dma_addr_t tx_desc_dma;
164         struct sk_buff *tx_skb[TX_DESC_NUM];
165         dma_addr_t tx_phys[TX_DESC_NUM];
166         unsigned int tx_head;
167
168         int tx_coalesce_frames;
169         int tx_coalesce_usecs;
170         struct hrtimer tx_coalesce_timer;
171
172         unsigned char *rx_buf[RX_DESC_NUM];
173         dma_addr_t rx_phys[RX_DESC_NUM];
174         unsigned int rx_head;
175         unsigned int rx_buf_size;
176
177         struct device_node *phy_node;
178         struct phy_device *phy;
179         struct regmap *map;
180         struct work_struct tx_timeout_task;
181
182         /* written only by tx cleanup */
183         unsigned int tx_tail ____cacheline_aligned_in_smp;
184 };
185
186 static inline unsigned int tx_count(unsigned int head, unsigned int tail)
187 {
188         return (head - tail) % (TX_DESC_NUM - 1);
189 }
190
191 static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
192 {
193         struct hip04_priv *priv = netdev_priv(ndev);
194         u32 val;
195
196         priv->speed = speed;
197         priv->duplex = duplex;
198
199         switch (priv->phy_mode) {
200         case PHY_INTERFACE_MODE_SGMII:
201                 if (speed == SPEED_1000)
202                         val = SGMII_SPEED_1000;
203                 else if (speed == SPEED_100)
204                         val = SGMII_SPEED_100;
205                 else
206                         val = SGMII_SPEED_10;
207                 break;
208         case PHY_INTERFACE_MODE_MII:
209                 if (speed == SPEED_100)
210                         val = MII_SPEED_100;
211                 else
212                         val = MII_SPEED_10;
213                 break;
214         default:
215                 netdev_warn(ndev, "not supported mode\n");
216                 val = MII_SPEED_10;
217                 break;
218         }
219         writel_relaxed(val, priv->base + GE_PORT_MODE);
220
221         val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
222         writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
223
224         val = GE_MODE_CHANGE_EN;
225         writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
226 }
227
228 static void hip04_reset_ppe(struct hip04_priv *priv)
229 {
230         u32 val, tmp, timeout = 0;
231
232         do {
233                 regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
234                 regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
235                 if (timeout++ > RESET_TIMEOUT)
236                         break;
237         } while (val & 0xfff);
238 }
239
240 static void hip04_config_fifo(struct hip04_priv *priv)
241 {
242         u32 val;
243
244         val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
245         val |= PPE_CFG_STS_RX_PKT_CNT_RC;
246         writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
247
248         val = BIT(priv->port);
249         regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
250
251         val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT;
252         val |= PPE_CFG_QOS_VMID_MODE;
253         writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
254
255         val = RX_BUF_SIZE;
256         regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
257
258         val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
259         val |= PPE_CFG_RX_FIFO_FSFU;
260         val |= priv->chan << PPE_CFG_RX_START_SHIFT;
261         regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
262
263         val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
264         writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
265
266         val = PPE_CFG_RX_PKT_ALIGN;
267         writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
268
269         val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
270         writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
271
272         val = GMAC_PPE_RX_PKT_MAX_LEN;
273         writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
274
275         val = GMAC_MAX_PKT_LEN;
276         writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
277
278         val = GMAC_MIN_PKT_LEN;
279         writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
280
281         val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
282         val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
283         writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
284
285         val = GE_RX_STRIP_CRC;
286         writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
287
288         val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
289         val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
290         writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
291
292         val = GE_AUTO_NEG_CTL;
293         writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
294 }
295
296 static void hip04_mac_enable(struct net_device *ndev)
297 {
298         struct hip04_priv *priv = netdev_priv(ndev);
299         u32 val;
300
301         /* enable tx & rx */
302         val = readl_relaxed(priv->base + GE_PORT_EN);
303         val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
304         writel_relaxed(val, priv->base + GE_PORT_EN);
305
306         /* clear rx int */
307         val = RCV_INT;
308         writel_relaxed(val, priv->base + PPE_RINT);
309
310         /* config recv int */
311         val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
312         writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
313
314         /* enable interrupt */
315         priv->reg_inten = DEF_INT_MASK;
316         writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
317 }
318
319 static void hip04_mac_disable(struct net_device *ndev)
320 {
321         struct hip04_priv *priv = netdev_priv(ndev);
322         u32 val;
323
324         /* disable int */
325         priv->reg_inten &= ~(DEF_INT_MASK);
326         writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
327
328         /* disable tx & rx */
329         val = readl_relaxed(priv->base + GE_PORT_EN);
330         val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
331         writel_relaxed(val, priv->base + GE_PORT_EN);
332 }
333
334 static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
335 {
336         writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR);
337 }
338
339 static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
340 {
341         regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys);
342 }
343
344 static u32 hip04_recv_cnt(struct hip04_priv *priv)
345 {
346         return readl(priv->base + PPE_HIS_RX_PKT_CNT);
347 }
348
349 static void hip04_update_mac_address(struct net_device *ndev)
350 {
351         struct hip04_priv *priv = netdev_priv(ndev);
352
353         writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
354                        priv->base + GE_STATION_MAC_ADDRESS);
355         writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
356                         (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
357                        priv->base + GE_STATION_MAC_ADDRESS + 4);
358 }
359
360 static int hip04_set_mac_address(struct net_device *ndev, void *addr)
361 {
362         eth_mac_addr(ndev, addr);
363         hip04_update_mac_address(ndev);
364         return 0;
365 }
366
367 static int hip04_tx_reclaim(struct net_device *ndev, bool force)
368 {
369         struct hip04_priv *priv = netdev_priv(ndev);
370         unsigned tx_tail = priv->tx_tail;
371         struct tx_desc *desc;
372         unsigned int bytes_compl = 0, pkts_compl = 0;
373         unsigned int count;
374
375         smp_rmb();
376         count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
377         if (count == 0)
378                 goto out;
379
380         while (count) {
381                 desc = &priv->tx_desc[tx_tail];
382                 if (desc->send_addr != 0) {
383                         if (force)
384                                 desc->send_addr = 0;
385                         else
386                                 break;
387                 }
388
389                 if (priv->tx_phys[tx_tail]) {
390                         dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
391                                          priv->tx_skb[tx_tail]->len,
392                                          DMA_TO_DEVICE);
393                         priv->tx_phys[tx_tail] = 0;
394                 }
395                 pkts_compl++;
396                 bytes_compl += priv->tx_skb[tx_tail]->len;
397                 dev_kfree_skb(priv->tx_skb[tx_tail]);
398                 priv->tx_skb[tx_tail] = NULL;
399                 tx_tail = TX_NEXT(tx_tail);
400                 count--;
401         }
402
403         priv->tx_tail = tx_tail;
404         smp_wmb(); /* Ensure tx_tail visible to xmit */
405
406 out:
407         if (pkts_compl || bytes_compl)
408                 netdev_completed_queue(ndev, pkts_compl, bytes_compl);
409
410         if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
411                 netif_wake_queue(ndev);
412
413         return count;
414 }
415
416 static void hip04_start_tx_timer(struct hip04_priv *priv)
417 {
418         unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
419
420         /* allow timer to fire after half the time at the earliest */
421         hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
422                                ns, HRTIMER_MODE_REL);
423 }
424
425 static netdev_tx_t
426 hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
427 {
428         struct hip04_priv *priv = netdev_priv(ndev);
429         struct net_device_stats *stats = &ndev->stats;
430         unsigned int tx_head = priv->tx_head, count;
431         struct tx_desc *desc = &priv->tx_desc[tx_head];
432         dma_addr_t phys;
433
434         smp_rmb();
435         count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
436         if (count == (TX_DESC_NUM - 1)) {
437                 netif_stop_queue(ndev);
438                 return NETDEV_TX_BUSY;
439         }
440
441         phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
442         if (dma_mapping_error(&ndev->dev, phys)) {
443                 dev_kfree_skb(skb);
444                 return NETDEV_TX_OK;
445         }
446
447         priv->tx_skb[tx_head] = skb;
448         priv->tx_phys[tx_head] = phys;
449         desc->send_addr = cpu_to_be32(phys);
450         desc->send_size = cpu_to_be32(skb->len);
451         desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
452         phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
453         desc->wb_addr = cpu_to_be32(phys);
454         skb_tx_timestamp(skb);
455
456         hip04_set_xmit_desc(priv, phys);
457         priv->tx_head = TX_NEXT(tx_head);
458         count++;
459         netdev_sent_queue(ndev, skb->len);
460
461         stats->tx_bytes += skb->len;
462         stats->tx_packets++;
463
464         /* Ensure tx_head update visible to tx reclaim */
465         smp_wmb();
466
467         /* queue is getting full, better start cleaning up now */
468         if (count >= priv->tx_coalesce_frames) {
469                 if (napi_schedule_prep(&priv->napi)) {
470                         /* disable rx interrupt and timer */
471                         priv->reg_inten &= ~(RCV_INT);
472                         writel_relaxed(DEF_INT_MASK & ~RCV_INT,
473                                        priv->base + PPE_INTEN);
474                         hrtimer_cancel(&priv->tx_coalesce_timer);
475                         __napi_schedule(&priv->napi);
476                 }
477         } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
478                 /* cleanup not pending yet, start a new timer */
479                 hip04_start_tx_timer(priv);
480         }
481
482         return NETDEV_TX_OK;
483 }
484
485 static int hip04_rx_poll(struct napi_struct *napi, int budget)
486 {
487         struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
488         struct net_device *ndev = priv->ndev;
489         struct net_device_stats *stats = &ndev->stats;
490         unsigned int cnt = hip04_recv_cnt(priv);
491         struct rx_desc *desc;
492         struct sk_buff *skb;
493         unsigned char *buf;
494         bool last = false;
495         dma_addr_t phys;
496         int rx = 0;
497         int tx_remaining;
498         u16 len;
499         u32 err;
500
501         while (cnt && !last) {
502                 buf = priv->rx_buf[priv->rx_head];
503                 skb = build_skb(buf, priv->rx_buf_size);
504                 if (unlikely(!skb)) {
505                         net_dbg_ratelimited("build_skb failed\n");
506                         goto refill;
507                 }
508
509                 dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
510                                  RX_BUF_SIZE, DMA_FROM_DEVICE);
511                 priv->rx_phys[priv->rx_head] = 0;
512
513                 desc = (struct rx_desc *)skb->data;
514                 len = be16_to_cpu(desc->pkt_len);
515                 err = be32_to_cpu(desc->pkt_err);
516
517                 if (0 == len) {
518                         dev_kfree_skb_any(skb);
519                         last = true;
520                 } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
521                         dev_kfree_skb_any(skb);
522                         stats->rx_dropped++;
523                         stats->rx_errors++;
524                 } else {
525                         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
526                         skb_put(skb, len);
527                         skb->protocol = eth_type_trans(skb, ndev);
528                         napi_gro_receive(&priv->napi, skb);
529                         stats->rx_packets++;
530                         stats->rx_bytes += len;
531                         rx++;
532                 }
533
534 refill:
535                 buf = netdev_alloc_frag(priv->rx_buf_size);
536                 if (!buf)
537                         goto done;
538                 phys = dma_map_single(&ndev->dev, buf,
539                                       RX_BUF_SIZE, DMA_FROM_DEVICE);
540                 if (dma_mapping_error(&ndev->dev, phys))
541                         goto done;
542                 priv->rx_buf[priv->rx_head] = buf;
543                 priv->rx_phys[priv->rx_head] = phys;
544                 hip04_set_recv_desc(priv, phys);
545
546                 priv->rx_head = RX_NEXT(priv->rx_head);
547                 if (rx >= budget)
548                         goto done;
549
550                 if (--cnt == 0)
551                         cnt = hip04_recv_cnt(priv);
552         }
553
554         if (!(priv->reg_inten & RCV_INT)) {
555                 /* enable rx interrupt */
556                 priv->reg_inten |= RCV_INT;
557                 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
558         }
559         napi_complete_done(napi, rx);
560 done:
561         /* clean up tx descriptors and start a new timer if necessary */
562         tx_remaining = hip04_tx_reclaim(ndev, false);
563         if (rx < budget && tx_remaining)
564                 hip04_start_tx_timer(priv);
565
566         return rx;
567 }
568
569 static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
570 {
571         struct net_device *ndev = (struct net_device *)dev_id;
572         struct hip04_priv *priv = netdev_priv(ndev);
573         struct net_device_stats *stats = &ndev->stats;
574         u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
575
576         if (!ists)
577                 return IRQ_NONE;
578
579         writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
580
581         if (unlikely(ists & DEF_INT_ERR)) {
582                 if (ists & (RCV_NOBUF | RCV_DROP)) {
583                         stats->rx_errors++;
584                         stats->rx_dropped++;
585                         netdev_err(ndev, "rx drop\n");
586                 }
587                 if (ists & TX_DROP) {
588                         stats->tx_dropped++;
589                         netdev_err(ndev, "tx drop\n");
590                 }
591         }
592
593         if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
594                 /* disable rx interrupt */
595                 priv->reg_inten &= ~(RCV_INT);
596                 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
597                 hrtimer_cancel(&priv->tx_coalesce_timer);
598                 __napi_schedule(&priv->napi);
599         }
600
601         return IRQ_HANDLED;
602 }
603
604 static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
605 {
606         struct hip04_priv *priv;
607
608         priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
609
610         if (napi_schedule_prep(&priv->napi)) {
611                 /* disable rx interrupt */
612                 priv->reg_inten &= ~(RCV_INT);
613                 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
614                 __napi_schedule(&priv->napi);
615         }
616
617         return HRTIMER_NORESTART;
618 }
619
620 static void hip04_adjust_link(struct net_device *ndev)
621 {
622         struct hip04_priv *priv = netdev_priv(ndev);
623         struct phy_device *phy = priv->phy;
624
625         if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
626                 hip04_config_port(ndev, phy->speed, phy->duplex);
627                 phy_print_status(phy);
628         }
629 }
630
631 static int hip04_mac_open(struct net_device *ndev)
632 {
633         struct hip04_priv *priv = netdev_priv(ndev);
634         int i;
635
636         priv->rx_head = 0;
637         priv->tx_head = 0;
638         priv->tx_tail = 0;
639         hip04_reset_ppe(priv);
640
641         for (i = 0; i < RX_DESC_NUM; i++) {
642                 dma_addr_t phys;
643
644                 phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
645                                       RX_BUF_SIZE, DMA_FROM_DEVICE);
646                 if (dma_mapping_error(&ndev->dev, phys))
647                         return -EIO;
648
649                 priv->rx_phys[i] = phys;
650                 hip04_set_recv_desc(priv, phys);
651         }
652
653         if (priv->phy)
654                 phy_start(priv->phy);
655
656         netdev_reset_queue(ndev);
657         netif_start_queue(ndev);
658         hip04_mac_enable(ndev);
659         napi_enable(&priv->napi);
660
661         return 0;
662 }
663
664 static int hip04_mac_stop(struct net_device *ndev)
665 {
666         struct hip04_priv *priv = netdev_priv(ndev);
667         int i;
668
669         napi_disable(&priv->napi);
670         netif_stop_queue(ndev);
671         hip04_mac_disable(ndev);
672         hip04_tx_reclaim(ndev, true);
673         hip04_reset_ppe(priv);
674
675         if (priv->phy)
676                 phy_stop(priv->phy);
677
678         for (i = 0; i < RX_DESC_NUM; i++) {
679                 if (priv->rx_phys[i]) {
680                         dma_unmap_single(&ndev->dev, priv->rx_phys[i],
681                                          RX_BUF_SIZE, DMA_FROM_DEVICE);
682                         priv->rx_phys[i] = 0;
683                 }
684         }
685
686         return 0;
687 }
688
689 static void hip04_timeout(struct net_device *ndev)
690 {
691         struct hip04_priv *priv = netdev_priv(ndev);
692
693         schedule_work(&priv->tx_timeout_task);
694 }
695
696 static void hip04_tx_timeout_task(struct work_struct *work)
697 {
698         struct hip04_priv *priv;
699
700         priv = container_of(work, struct hip04_priv, tx_timeout_task);
701         hip04_mac_stop(priv->ndev);
702         hip04_mac_open(priv->ndev);
703 }
704
705 static int hip04_get_coalesce(struct net_device *netdev,
706                               struct ethtool_coalesce *ec)
707 {
708         struct hip04_priv *priv = netdev_priv(netdev);
709
710         ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
711         ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
712
713         return 0;
714 }
715
716 static int hip04_set_coalesce(struct net_device *netdev,
717                               struct ethtool_coalesce *ec)
718 {
719         struct hip04_priv *priv = netdev_priv(netdev);
720
721         /* Check not supported parameters  */
722         if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
723             (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
724             (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
725             (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
726             (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
727             (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
728             (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
729             (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
730             (ec->tx_max_coalesced_frames_irq) ||
731             (ec->stats_block_coalesce_usecs) ||
732             (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
733                 return -EOPNOTSUPP;
734
735         if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
736              ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
737             (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
738              ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
739                 return -EINVAL;
740
741         priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
742         priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
743
744         return 0;
745 }
746
747 static void hip04_get_drvinfo(struct net_device *netdev,
748                               struct ethtool_drvinfo *drvinfo)
749 {
750         strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
751         strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
752 }
753
754 static const struct ethtool_ops hip04_ethtool_ops = {
755         .get_coalesce           = hip04_get_coalesce,
756         .set_coalesce           = hip04_set_coalesce,
757         .get_drvinfo            = hip04_get_drvinfo,
758 };
759
760 static const struct net_device_ops hip04_netdev_ops = {
761         .ndo_open               = hip04_mac_open,
762         .ndo_stop               = hip04_mac_stop,
763         .ndo_start_xmit         = hip04_mac_start_xmit,
764         .ndo_set_mac_address    = hip04_set_mac_address,
765         .ndo_tx_timeout         = hip04_timeout,
766         .ndo_validate_addr      = eth_validate_addr,
767 };
768
769 static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
770 {
771         struct hip04_priv *priv = netdev_priv(ndev);
772         int i;
773
774         priv->tx_desc = dma_alloc_coherent(d,
775                                            TX_DESC_NUM * sizeof(struct tx_desc),
776                                            &priv->tx_desc_dma, GFP_KERNEL);
777         if (!priv->tx_desc)
778                 return -ENOMEM;
779
780         priv->rx_buf_size = RX_BUF_SIZE +
781                             SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
782         for (i = 0; i < RX_DESC_NUM; i++) {
783                 priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
784                 if (!priv->rx_buf[i])
785                         return -ENOMEM;
786         }
787
788         return 0;
789 }
790
791 static void hip04_free_ring(struct net_device *ndev, struct device *d)
792 {
793         struct hip04_priv *priv = netdev_priv(ndev);
794         int i;
795
796         for (i = 0; i < RX_DESC_NUM; i++)
797                 if (priv->rx_buf[i])
798                         skb_free_frag(priv->rx_buf[i]);
799
800         for (i = 0; i < TX_DESC_NUM; i++)
801                 if (priv->tx_skb[i])
802                         dev_kfree_skb_any(priv->tx_skb[i]);
803
804         dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
805                           priv->tx_desc, priv->tx_desc_dma);
806 }
807
808 static int hip04_mac_probe(struct platform_device *pdev)
809 {
810         struct device *d = &pdev->dev;
811         struct device_node *node = d->of_node;
812         struct of_phandle_args arg;
813         struct net_device *ndev;
814         struct hip04_priv *priv;
815         struct resource *res;
816         int irq;
817         int ret;
818
819         ndev = alloc_etherdev(sizeof(struct hip04_priv));
820         if (!ndev)
821                 return -ENOMEM;
822
823         priv = netdev_priv(ndev);
824         priv->ndev = ndev;
825         platform_set_drvdata(pdev, ndev);
826         SET_NETDEV_DEV(ndev, &pdev->dev);
827
828         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
829         priv->base = devm_ioremap_resource(d, res);
830         if (IS_ERR(priv->base)) {
831                 ret = PTR_ERR(priv->base);
832                 goto init_fail;
833         }
834
835         ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg);
836         if (ret < 0) {
837                 dev_warn(d, "no port-handle\n");
838                 goto init_fail;
839         }
840
841         priv->port = arg.args[0];
842         priv->chan = arg.args[1] * RX_DESC_NUM;
843
844         hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
845
846         /* BQL will try to keep the TX queue as short as possible, but it can't
847          * be faster than tx_coalesce_usecs, so we need a fast timeout here,
848          * but also long enough to gather up enough frames to ensure we don't
849          * get more interrupts than necessary.
850          * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
851          */
852         priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
853         priv->tx_coalesce_usecs = 200;
854         priv->tx_coalesce_timer.function = tx_done;
855
856         priv->map = syscon_node_to_regmap(arg.np);
857         if (IS_ERR(priv->map)) {
858                 dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
859                 ret = PTR_ERR(priv->map);
860                 goto init_fail;
861         }
862
863         priv->phy_mode = of_get_phy_mode(node);
864         if (priv->phy_mode < 0) {
865                 dev_warn(d, "not find phy-mode\n");
866                 ret = -EINVAL;
867                 goto init_fail;
868         }
869
870         irq = platform_get_irq(pdev, 0);
871         if (irq <= 0) {
872                 ret = -EINVAL;
873                 goto init_fail;
874         }
875
876         ret = devm_request_irq(d, irq, hip04_mac_interrupt,
877                                0, pdev->name, ndev);
878         if (ret) {
879                 netdev_err(ndev, "devm_request_irq failed\n");
880                 goto init_fail;
881         }
882
883         priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
884         if (priv->phy_node) {
885                 priv->phy = of_phy_connect(ndev, priv->phy_node,
886                                            &hip04_adjust_link,
887                                            0, priv->phy_mode);
888                 if (!priv->phy) {
889                         ret = -EPROBE_DEFER;
890                         goto init_fail;
891                 }
892         }
893
894         INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
895
896         ndev->netdev_ops = &hip04_netdev_ops;
897         ndev->ethtool_ops = &hip04_ethtool_ops;
898         ndev->watchdog_timeo = TX_TIMEOUT;
899         ndev->priv_flags |= IFF_UNICAST_FLT;
900         ndev->irq = irq;
901         netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
902
903         hip04_reset_ppe(priv);
904         if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
905                 hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
906
907         hip04_config_fifo(priv);
908         eth_random_addr(ndev->dev_addr);
909         hip04_update_mac_address(ndev);
910
911         ret = hip04_alloc_ring(ndev, d);
912         if (ret) {
913                 netdev_err(ndev, "alloc ring fail\n");
914                 goto alloc_fail;
915         }
916
917         ret = register_netdev(ndev);
918         if (ret) {
919                 free_netdev(ndev);
920                 goto alloc_fail;
921         }
922
923         return 0;
924
925 alloc_fail:
926         hip04_free_ring(ndev, d);
927 init_fail:
928         of_node_put(priv->phy_node);
929         free_netdev(ndev);
930         return ret;
931 }
932
933 static int hip04_remove(struct platform_device *pdev)
934 {
935         struct net_device *ndev = platform_get_drvdata(pdev);
936         struct hip04_priv *priv = netdev_priv(ndev);
937         struct device *d = &pdev->dev;
938
939         if (priv->phy)
940                 phy_disconnect(priv->phy);
941
942         hip04_free_ring(ndev, d);
943         unregister_netdev(ndev);
944         free_irq(ndev->irq, ndev);
945         of_node_put(priv->phy_node);
946         cancel_work_sync(&priv->tx_timeout_task);
947         free_netdev(ndev);
948
949         return 0;
950 }
951
952 static const struct of_device_id hip04_mac_match[] = {
953         { .compatible = "hisilicon,hip04-mac" },
954         { }
955 };
956
957 MODULE_DEVICE_TABLE(of, hip04_mac_match);
958
959 static struct platform_driver hip04_mac_driver = {
960         .probe  = hip04_mac_probe,
961         .remove = hip04_remove,
962         .driver = {
963                 .name           = DRV_NAME,
964                 .of_match_table = hip04_mac_match,
965         },
966 };
967 module_platform_driver(hip04_mac_driver);
968
969 MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
970 MODULE_LICENSE("GPL");