net: hns3: Add enable and process hw errors from PPP
[muen/linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_err.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3
4 #include "hclge_err.h"
5
6 static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
7         { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" },
8         { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
9         { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" },
10         { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
11         { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" },
12         { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
13         { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" },
14         { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
15         { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" },
16         { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
17         { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" },
18         { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
19         { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" },
20         { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
21         { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" },
22         { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
23         { /* sentinel */ }
24 };
25
26 static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = {
27         { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" },
28         { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" },
29         { /* sentinel */ }
30 };
31
32 static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
33         { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" },
34         { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
35         { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" },
36         { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
37         { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" },
38         { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
39         { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" },
40         { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
41         { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" },
42         { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
43         { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" },
44         { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
45         { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" },
46         { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
47         { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" },
48         { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
49         { /* sentinel */ }
50 };
51
52 static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = {
53         { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" },
54         { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
55         { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" },
56         { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
57         { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" },
58         { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
59         { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" },
60         { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
61         { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" },
62         { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
63         { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" },
64         { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
65         { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" },
66         { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
67         { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" },
68         { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
69         { /* sentinel */ }
70 };
71
72 static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
73         { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" },
74         { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" },
75         { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" },
76         { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" },
77         { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" },
78         { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" },
79         { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
80         { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
81         { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
82         { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
83         { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
84         { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
85         { /* sentinel */ }
86 };
87
88 static const struct hclge_hw_error hclge_igu_com_err_int[] = {
89         { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
90         { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" },
91         { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
92         { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" },
93         { /* sentinel */ }
94 };
95
96 static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = {
97         { .int_msk = BIT(0), .msg = "rx_buf_overflow" },
98         { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
99         { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
100         { .int_msk = BIT(3), .msg = "tx_buf_overflow" },
101         { .int_msk = BIT(4), .msg = "tx_buf_underrun" },
102         { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
103         { /* sentinel */ }
104 };
105
106 static const struct hclge_hw_error hclge_ncsi_err_int[] = {
107         { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" },
108         { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
109         { /* sentinel */ }
110 };
111
112 static const struct hclge_hw_error hclge_ppp_mpf_int0[] = {
113         { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" },
114         { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" },
115         { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" },
116         { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" },
117         { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" },
118         { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" },
119         { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" },
120         { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" },
121         { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" },
122         { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" },
123         { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" },
124         { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" },
125         { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" },
126         { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" },
127         { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" },
128         { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" },
129         { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" },
130         { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" },
131         { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" },
132         { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" },
133         { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" },
134         { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" },
135         { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" },
136         { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" },
137         { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" },
138         { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" },
139         { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" },
140         { .int_msk = BIT(27),
141                 .msg = "flow_director_ad_mem0_ecc_1bit_err" },
142         { .int_msk = BIT(28),
143                 .msg = "flow_director_ad_mem1_ecc_1bit_err" },
144         { .int_msk = BIT(29),
145                 .msg = "rx_vlan_tag_memory_ecc_1bit_err" },
146         { .int_msk = BIT(30),
147                 .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" },
148         { /* sentinel */ }
149 };
150
151 static const struct hclge_hw_error hclge_ppp_mpf_int1[] = {
152         { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
153         { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
154         { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
155         { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
156         { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
157         { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
158         { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" },
159         { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
160         { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
161         { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
162         { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
163         { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
164         { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
165         { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
166         { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
167         { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
168         { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
169         { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
170         { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
171         { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
172         { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
173         { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
174         { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
175         { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
176         { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
177         { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
178         { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
179         { .int_msk = BIT(27),
180                 .msg = "flow_director_ad_mem0_ecc_mbit_err" },
181         { .int_msk = BIT(28),
182                 .msg = "flow_director_ad_mem1_ecc_mbit_err" },
183         { .int_msk = BIT(29),
184                 .msg = "rx_vlan_tag_memory_ecc_mbit_err" },
185         { .int_msk = BIT(30),
186                 .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
187         { /* sentinel */ }
188 };
189
190 static const struct hclge_hw_error hclge_ppp_pf_int[] = {
191         { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" },
192         { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
193         { /* sentinel */ }
194 };
195
196 static const struct hclge_hw_error hclge_ppp_mpf_int2[] = {
197         { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" },
198         { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" },
199         { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" },
200         { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" },
201         { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" },
202         { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" },
203         { /* sentinel */ }
204 };
205
206 static const struct hclge_hw_error hclge_ppp_mpf_int3[] = {
207         { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
208         { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
209         { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
210         { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
211         { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
212         { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
213         { /* sentinel */ }
214 };
215
216 static void hclge_log_error(struct device *dev,
217                             const struct hclge_hw_error *err_list,
218                             u32 err_sts)
219 {
220         const struct hclge_hw_error *err;
221         int i = 0;
222
223         while (err_list[i].msg) {
224                 err = &err_list[i];
225                 if (!(err->int_msk & err_sts)) {
226                         i++;
227                         continue;
228                 }
229                 dev_warn(dev, "%s [error status=0x%x] found\n",
230                          err->msg, err_sts);
231                 i++;
232         }
233 }
234
235 /* hclge_cmd_query_error: read the error information
236  * @hdev: pointer to struct hclge_dev
237  * @desc: descriptor for describing the command
238  * @cmd:  command opcode
239  * @flag: flag for extended command structure
240  * @w_num: offset for setting the read interrupt type.
241  * @int_type: select which type of the interrupt for which the error
242  * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
243  *
244  * This function query the error info from hw register/s using command
245  */
246 static int hclge_cmd_query_error(struct hclge_dev *hdev,
247                                  struct hclge_desc *desc, u32 cmd,
248                                  u16 flag, u8 w_num,
249                                  enum hclge_err_int_type int_type)
250 {
251         struct device *dev = &hdev->pdev->dev;
252         int num = 1;
253         int ret;
254
255         hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
256         if (flag) {
257                 desc[0].flag |= cpu_to_le16(flag);
258                 hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
259                 num = 2;
260         }
261         if (w_num)
262                 desc[0].data[w_num] = cpu_to_le32(int_type);
263
264         ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
265         if (ret)
266                 dev_err(dev, "query error cmd failed (%d)\n", ret);
267
268         return ret;
269 }
270
271 /* hclge_cmd_clear_error: clear the error status
272  * @hdev: pointer to struct hclge_dev
273  * @desc: descriptor for describing the command
274  * @desc_src: prefilled descriptor from the previous command for reusing
275  * @cmd:  command opcode
276  * @flag: flag for extended command structure
277  *
278  * This function clear the error status in the hw register/s using command
279  */
280 static int hclge_cmd_clear_error(struct hclge_dev *hdev,
281                                  struct hclge_desc *desc,
282                                  struct hclge_desc *desc_src,
283                                  u32 cmd, u16 flag)
284 {
285         struct device *dev = &hdev->pdev->dev;
286         int num = 1;
287         int ret, i;
288
289         if (cmd) {
290                 hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
291                 if (flag) {
292                         desc[0].flag |= cpu_to_le16(flag);
293                         hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
294                         num = 2;
295                 }
296                 if (desc_src) {
297                         for (i = 0; i < 6; i++) {
298                                 desc[0].data[i] = desc_src[0].data[i];
299                                 if (flag)
300                                         desc[1].data[i] = desc_src[1].data[i];
301                         }
302                 }
303         } else {
304                 hclge_cmd_reuse_desc(&desc[0], false);
305                 if (flag) {
306                         desc[0].flag |= cpu_to_le16(flag);
307                         hclge_cmd_reuse_desc(&desc[1], false);
308                         num = 2;
309                 }
310         }
311         ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
312         if (ret)
313                 dev_err(dev, "clear error cmd failed (%d)\n", ret);
314
315         return ret;
316 }
317
318 static int hclge_enable_common_error(struct hclge_dev *hdev, bool en)
319 {
320         struct device *dev = &hdev->pdev->dev;
321         struct hclge_desc desc[2];
322         int ret;
323
324         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
325         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
326         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
327
328         if (en) {
329                 /* enable COMMON error interrupts */
330                 desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
331                 desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
332                                         HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
333                 desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
334                 desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN);
335                 desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
336         } else {
337                 /* disable COMMON error interrupts */
338                 desc[0].data[0] = 0;
339                 desc[0].data[2] = 0;
340                 desc[0].data[3] = 0;
341                 desc[0].data[4] = 0;
342                 desc[0].data[5] = 0;
343         }
344         desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
345         desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
346                                 HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
347         desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
348         desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK);
349         desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
350
351         ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
352         if (ret)
353                 dev_err(dev,
354                         "failed(%d) to enable/disable COMMON err interrupts\n",
355                         ret);
356
357         return ret;
358 }
359
360 static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en)
361 {
362         struct device *dev = &hdev->pdev->dev;
363         struct hclge_desc desc;
364         int ret;
365
366         if (hdev->pdev->revision < 0x21)
367                 return 0;
368
369         /* enable/disable NCSI  error interrupts */
370         hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
371         if (en)
372                 desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
373         else
374                 desc.data[0] = 0;
375
376         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
377         if (ret)
378                 dev_err(dev,
379                         "failed(%d) to enable/disable NCSI error interrupts\n",
380                         ret);
381
382         return ret;
383 }
384
385 static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en)
386 {
387         struct device *dev = &hdev->pdev->dev;
388         struct hclge_desc desc;
389         int ret;
390
391         /* enable/disable error interrupts */
392         hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
393         if (en)
394                 desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
395         else
396                 desc.data[0] = 0;
397         desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
398
399         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
400         if (ret) {
401                 dev_err(dev,
402                         "failed(%d) to enable/disable IGU common interrupts\n",
403                         ret);
404                 return ret;
405         }
406
407         hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
408         if (en)
409                 desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
410         else
411                 desc.data[0] = 0;
412         desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
413
414         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
415         if (ret) {
416                 dev_err(dev,
417                         "failed(%d) to enable/disable IGU-EGU TNL interrupts\n",
418                         ret);
419                 return ret;
420         }
421
422         ret = hclge_enable_ncsi_error(hdev, en);
423         if (ret)
424                 dev_err(dev, "fail(%d) to en/disable err int\n", ret);
425
426         return ret;
427 }
428
429 static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
430                                             bool en)
431 {
432         struct device *dev = &hdev->pdev->dev;
433         struct hclge_desc desc[2];
434         int ret;
435
436         /* enable/disable PPP error interrupts */
437         hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
438         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
439         hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
440
441         if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
442                 if (en) {
443                         desc[0].data[0] =
444                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
445                         desc[0].data[1] =
446                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
447                 } else {
448                         desc[0].data[0] = 0;
449                         desc[0].data[1] = 0;
450                 }
451                 desc[1].data[0] =
452                         cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
453                 desc[1].data[1] =
454                         cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
455         } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
456                 if (en) {
457                         desc[0].data[0] =
458                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
459                         desc[0].data[1] =
460                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
461                 } else {
462                         desc[0].data[0] = 0;
463                         desc[0].data[1] = 0;
464                 }
465                 desc[1].data[0] =
466                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
467                 desc[1].data[1] =
468                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
469         }
470
471         ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
472         if (ret)
473                 dev_err(dev,
474                         "failed(%d) to enable/disable PPP error interrupts\n",
475                         ret);
476
477         return ret;
478 }
479
480 static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en)
481 {
482         struct device *dev = &hdev->pdev->dev;
483         int ret;
484
485         ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
486                                                en);
487         if (ret) {
488                 dev_err(dev,
489                         "failed(%d) to enable/disable PPP error intr 0,1\n",
490                         ret);
491                 return ret;
492         }
493
494         ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
495                                                en);
496         if (ret)
497                 dev_err(dev,
498                         "failed(%d) to enable/disable PPP error intr 2,3\n",
499                         ret);
500
501         return ret;
502 }
503
504 static void hclge_process_common_error(struct hclge_dev *hdev,
505                                        enum hclge_err_int_type type)
506 {
507         struct device *dev = &hdev->pdev->dev;
508         struct hclge_desc desc[2];
509         u32 err_sts;
510         int ret;
511
512         /* read err sts */
513         ret = hclge_cmd_query_error(hdev, &desc[0],
514                                     HCLGE_COMMON_ECC_INT_CFG,
515                                     HCLGE_CMD_FLAG_NEXT, 0, 0);
516         if (ret) {
517                 dev_err(dev,
518                         "failed(=%d) to query COMMON error interrupt status\n",
519                         ret);
520                 return;
521         }
522
523         /* log err */
524         err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK;
525         hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts);
526
527         err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK;
528         hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts);
529
530         err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT)
531                    & HCLGE_CMDQ_ECC_INT_MASK;
532         hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts);
533
534         if ((le32_to_cpu(desc[0].data[3])) & BIT(0))
535                 dev_warn(dev, "imp_rd_data_poison_err found\n");
536
537         err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) &
538                    HCLGE_TQP_ECC_INT_MASK;
539         hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts);
540
541         err_sts = (le32_to_cpu(desc[0].data[5])) &
542                    HCLGE_IMP_ITCM4_ECC_INT_MASK;
543         hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts);
544
545         /* clear error interrupts */
546         desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK);
547         desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK |
548                                 HCLGE_CMDQ_ROCEE_ECC_CLR_MASK);
549         desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK);
550         desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK);
551
552         ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
553                                     HCLGE_CMD_FLAG_NEXT);
554         if (ret)
555                 dev_err(dev,
556                         "failed(%d) to clear COMMON error interrupt status\n",
557                         ret);
558 }
559
560 static void hclge_process_ncsi_error(struct hclge_dev *hdev,
561                                      enum hclge_err_int_type type)
562 {
563         struct device *dev = &hdev->pdev->dev;
564         struct hclge_desc desc_rd;
565         struct hclge_desc desc_wr;
566         u32 err_sts;
567         int ret;
568
569         if (hdev->pdev->revision < 0x21)
570                 return;
571
572         /* read NCSI error status */
573         ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY,
574                                     0, 1, HCLGE_NCSI_ERR_INT_TYPE);
575         if (ret) {
576                 dev_err(dev,
577                         "failed(=%d) to query NCSI error interrupt status\n",
578                         ret);
579                 return;
580         }
581
582         /* log err */
583         err_sts = le32_to_cpu(desc_rd.data[0]);
584         hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts);
585
586         /* clear err int */
587         ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
588                                     HCLGE_NCSI_INT_CLR, 0);
589         if (ret)
590                 dev_err(dev, "failed(=%d) to clear NCSI intrerrupt status\n",
591                         ret);
592 }
593
594 static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
595                                         enum hclge_err_int_type int_type)
596 {
597         struct device *dev = &hdev->pdev->dev;
598         struct hclge_desc desc_rd;
599         struct hclge_desc desc_wr;
600         u32 err_sts;
601         int ret;
602
603         /* read IGU common err sts */
604         ret = hclge_cmd_query_error(hdev, &desc_rd,
605                                     HCLGE_IGU_COMMON_INT_QUERY,
606                                     0, 1, int_type);
607         if (ret) {
608                 dev_err(dev, "failed(=%d) to query IGU common int status\n",
609                         ret);
610                 return;
611         }
612
613         /* log err */
614         err_sts = le32_to_cpu(desc_rd.data[0]) &
615                                    HCLGE_IGU_COM_INT_MASK;
616         hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts);
617
618         /* clear err int */
619         ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
620                                     HCLGE_IGU_COMMON_INT_CLR, 0);
621         if (ret) {
622                 dev_err(dev, "failed(=%d) to clear IGU common int status\n",
623                         ret);
624                 return;
625         }
626
627         /* read IGU-EGU TNL err sts */
628         ret = hclge_cmd_query_error(hdev, &desc_rd,
629                                     HCLGE_IGU_EGU_TNL_INT_QUERY,
630                                     0, 1, int_type);
631         if (ret) {
632                 dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n",
633                         ret);
634                 return;
635         }
636
637         /* log err */
638         err_sts = le32_to_cpu(desc_rd.data[0]) &
639                                    HCLGE_IGU_EGU_TNL_INT_MASK;
640         hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts);
641
642         /* clear err int */
643         ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
644                                     HCLGE_IGU_EGU_TNL_INT_CLR, 0);
645         if (ret) {
646                 dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n",
647                         ret);
648                 return;
649         }
650
651         hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE);
652 }
653
654 static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
655                                          enum hclge_err_int_type int_type)
656 {
657         enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
658         struct device *dev = &hdev->pdev->dev;
659         const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
660         struct hclge_desc desc[2];
661         u32 err_sts;
662         int ret;
663
664         /* read PPP INT sts */
665         ret = hclge_cmd_query_error(hdev, &desc[0], cmd,
666                                     HCLGE_CMD_FLAG_NEXT, 5, int_type);
667         if (ret) {
668                 dev_err(dev, "failed(=%d) to query PPP interrupt status\n",
669                         ret);
670                 return -EIO;
671         }
672
673         /* log error */
674         if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
675                 hw_err_lst1 = &hclge_ppp_mpf_int0[0];
676                 hw_err_lst2 = &hclge_ppp_mpf_int1[0];
677                 hw_err_lst3 = &hclge_ppp_pf_int[0];
678         } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
679                 hw_err_lst1 = &hclge_ppp_mpf_int2[0];
680                 hw_err_lst2 = &hclge_ppp_mpf_int3[0];
681         } else {
682                 dev_err(dev, "invalid command(=%d)\n", cmd);
683                 return -EINVAL;
684         }
685
686         err_sts = le32_to_cpu(desc[0].data[2]);
687         if (err_sts) {
688                 hclge_log_error(dev, hw_err_lst1, err_sts);
689                 reset_level = HNAE3_FUNC_RESET;
690         }
691
692         err_sts = le32_to_cpu(desc[0].data[3]);
693         if (err_sts) {
694                 hclge_log_error(dev, hw_err_lst2, err_sts);
695                 reset_level = HNAE3_FUNC_RESET;
696         }
697
698         err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
699         if (err_sts) {
700                 hclge_log_error(dev, hw_err_lst3, err_sts);
701                 reset_level = HNAE3_FUNC_RESET;
702         }
703
704         /* clear PPP INT */
705         ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
706                                     HCLGE_CMD_FLAG_NEXT);
707         if (ret) {
708                 dev_err(dev, "failed(=%d) to clear PPP interrupt status\n",
709                         ret);
710                 return -EIO;
711         }
712
713         return 0;
714 }
715
716 static void hclge_process_ppp_error(struct hclge_dev *hdev,
717                                     enum hclge_err_int_type int_type)
718 {
719         struct device *dev = &hdev->pdev->dev;
720         int ret;
721
722         /* read PPP INT0,1 sts */
723         ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD,
724                                             int_type);
725         if (ret < 0) {
726                 dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n",
727                         ret);
728                 return;
729         }
730
731         /* read err PPP INT2,3 sts */
732         ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD,
733                                             int_type);
734         if (ret < 0)
735                 dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n",
736                         ret);
737 }
738
739 static const struct hclge_hw_blk hw_blk[] = {
740         { .msk = BIT(0), .name = "IGU_EGU",
741           .enable_error = hclge_enable_igu_egu_error,
742           .process_error = hclge_process_igu_egu_error, },
743         { .msk = BIT(5), .name = "COMMON",
744           .enable_error = hclge_enable_common_error,
745           .process_error = hclge_process_common_error, },
746         { .msk = BIT(1), .name = "PPP",
747           .enable_error = hclge_enable_ppp_error,
748           .process_error = hclge_process_ppp_error, },
749         { /* sentinel */ }
750 };
751
752 int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
753 {
754         struct device *dev = &hdev->pdev->dev;
755         int ret = 0;
756         int i = 0;
757
758         while (hw_blk[i].name) {
759                 if (!hw_blk[i].enable_error) {
760                         i++;
761                         continue;
762                 }
763                 ret = hw_blk[i].enable_error(hdev, state);
764                 if (ret) {
765                         dev_err(dev, "fail(%d) to en/disable err int\n", ret);
766                         return ret;
767                 }
768                 i++;
769         }
770
771         return ret;
772 }
773
774 pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev)
775 {
776         struct hclge_dev *hdev = ae_dev->priv;
777         struct device *dev = &hdev->pdev->dev;
778         u32 sts, val;
779         int i = 0;
780
781         sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
782
783         /* Processing Non-fatal errors */
784         if (sts & HCLGE_RAS_REG_NFE_MASK) {
785                 val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF;
786                 i = 0;
787                 while (hw_blk[i].name) {
788                         if (!(hw_blk[i].msk & val)) {
789                                 i++;
790                                 continue;
791                         }
792                         dev_warn(dev, "%s ras non-fatal error identified\n",
793                                  hw_blk[i].name);
794                         if (hw_blk[i].process_error)
795                                 hw_blk[i].process_error(hdev,
796                                                          HCLGE_ERR_INT_RAS_NFE);
797                         i++;
798                 }
799         }
800
801         return PCI_ERS_RESULT_NEED_RESET;
802 }