1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
6 static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
7 { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" },
8 { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
9 { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" },
10 { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
11 { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" },
12 { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
13 { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" },
14 { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
15 { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" },
16 { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
17 { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" },
18 { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
19 { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" },
20 { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
21 { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" },
22 { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
26 static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = {
27 { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" },
28 { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" },
32 static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
33 { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" },
34 { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
35 { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" },
36 { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
37 { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" },
38 { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
39 { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" },
40 { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
41 { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" },
42 { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
43 { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" },
44 { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
45 { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" },
46 { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
47 { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" },
48 { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
52 static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = {
53 { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" },
54 { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
55 { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" },
56 { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
57 { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" },
58 { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
59 { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" },
60 { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
61 { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" },
62 { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
63 { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" },
64 { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
65 { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" },
66 { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
67 { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" },
68 { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
72 static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
73 { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" },
74 { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" },
75 { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" },
76 { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" },
77 { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" },
78 { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" },
79 { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
80 { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
81 { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
82 { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
83 { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
84 { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
88 static const struct hclge_hw_error hclge_igu_com_err_int[] = {
89 { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
90 { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" },
91 { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
92 { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" },
96 static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = {
97 { .int_msk = BIT(0), .msg = "rx_buf_overflow" },
98 { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
99 { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
100 { .int_msk = BIT(3), .msg = "tx_buf_overflow" },
101 { .int_msk = BIT(4), .msg = "tx_buf_underrun" },
102 { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
106 static const struct hclge_hw_error hclge_ncsi_err_int[] = {
107 { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" },
108 { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
112 static const struct hclge_hw_error hclge_ppp_mpf_int0[] = {
113 { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" },
114 { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" },
115 { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" },
116 { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" },
117 { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" },
118 { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" },
119 { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" },
120 { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" },
121 { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" },
122 { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" },
123 { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" },
124 { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" },
125 { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" },
126 { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" },
127 { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" },
128 { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" },
129 { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" },
130 { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" },
131 { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" },
132 { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" },
133 { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" },
134 { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" },
135 { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" },
136 { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" },
137 { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" },
138 { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" },
139 { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" },
140 { .int_msk = BIT(27),
141 .msg = "flow_director_ad_mem0_ecc_1bit_err" },
142 { .int_msk = BIT(28),
143 .msg = "flow_director_ad_mem1_ecc_1bit_err" },
144 { .int_msk = BIT(29),
145 .msg = "rx_vlan_tag_memory_ecc_1bit_err" },
146 { .int_msk = BIT(30),
147 .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" },
151 static const struct hclge_hw_error hclge_ppp_mpf_int1[] = {
152 { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
153 { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
154 { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
155 { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
156 { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
157 { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
158 { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" },
159 { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
160 { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
161 { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
162 { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
163 { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
164 { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
165 { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
166 { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
167 { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
168 { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
169 { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
170 { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
171 { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
172 { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
173 { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
174 { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
175 { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
176 { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
177 { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
178 { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
179 { .int_msk = BIT(27),
180 .msg = "flow_director_ad_mem0_ecc_mbit_err" },
181 { .int_msk = BIT(28),
182 .msg = "flow_director_ad_mem1_ecc_mbit_err" },
183 { .int_msk = BIT(29),
184 .msg = "rx_vlan_tag_memory_ecc_mbit_err" },
185 { .int_msk = BIT(30),
186 .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
190 static const struct hclge_hw_error hclge_ppp_pf_int[] = {
191 { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" },
192 { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
196 static const struct hclge_hw_error hclge_ppp_mpf_int2[] = {
197 { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" },
198 { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" },
199 { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" },
200 { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" },
201 { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" },
202 { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" },
206 static const struct hclge_hw_error hclge_ppp_mpf_int3[] = {
207 { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
208 { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
209 { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
210 { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
211 { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
212 { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
216 static void hclge_log_error(struct device *dev,
217 const struct hclge_hw_error *err_list,
220 const struct hclge_hw_error *err;
223 while (err_list[i].msg) {
225 if (!(err->int_msk & err_sts)) {
229 dev_warn(dev, "%s [error status=0x%x] found\n",
235 /* hclge_cmd_query_error: read the error information
236 * @hdev: pointer to struct hclge_dev
237 * @desc: descriptor for describing the command
238 * @cmd: command opcode
239 * @flag: flag for extended command structure
240 * @w_num: offset for setting the read interrupt type.
241 * @int_type: select which type of the interrupt for which the error
242 * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
244 * This function query the error info from hw register/s using command
246 static int hclge_cmd_query_error(struct hclge_dev *hdev,
247 struct hclge_desc *desc, u32 cmd,
249 enum hclge_err_int_type int_type)
251 struct device *dev = &hdev->pdev->dev;
255 hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
257 desc[0].flag |= cpu_to_le16(flag);
258 hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
262 desc[0].data[w_num] = cpu_to_le32(int_type);
264 ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
266 dev_err(dev, "query error cmd failed (%d)\n", ret);
271 /* hclge_cmd_clear_error: clear the error status
272 * @hdev: pointer to struct hclge_dev
273 * @desc: descriptor for describing the command
274 * @desc_src: prefilled descriptor from the previous command for reusing
275 * @cmd: command opcode
276 * @flag: flag for extended command structure
278 * This function clear the error status in the hw register/s using command
280 static int hclge_cmd_clear_error(struct hclge_dev *hdev,
281 struct hclge_desc *desc,
282 struct hclge_desc *desc_src,
285 struct device *dev = &hdev->pdev->dev;
290 hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
292 desc[0].flag |= cpu_to_le16(flag);
293 hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
297 for (i = 0; i < 6; i++) {
298 desc[0].data[i] = desc_src[0].data[i];
300 desc[1].data[i] = desc_src[1].data[i];
304 hclge_cmd_reuse_desc(&desc[0], false);
306 desc[0].flag |= cpu_to_le16(flag);
307 hclge_cmd_reuse_desc(&desc[1], false);
311 ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
313 dev_err(dev, "clear error cmd failed (%d)\n", ret);
318 static int hclge_enable_common_error(struct hclge_dev *hdev, bool en)
320 struct device *dev = &hdev->pdev->dev;
321 struct hclge_desc desc[2];
324 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
325 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
326 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
329 /* enable COMMON error interrupts */
330 desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
331 desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
332 HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
333 desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
334 desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN);
335 desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
337 /* disable COMMON error interrupts */
344 desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
345 desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
346 HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
347 desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
348 desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK);
349 desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
351 ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
354 "failed(%d) to enable/disable COMMON err interrupts\n",
360 static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en)
362 struct device *dev = &hdev->pdev->dev;
363 struct hclge_desc desc;
366 if (hdev->pdev->revision < 0x21)
369 /* enable/disable NCSI error interrupts */
370 hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
372 desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
376 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
379 "failed(%d) to enable/disable NCSI error interrupts\n",
385 static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en)
387 struct device *dev = &hdev->pdev->dev;
388 struct hclge_desc desc;
391 /* enable/disable error interrupts */
392 hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
394 desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
397 desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
399 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
402 "failed(%d) to enable/disable IGU common interrupts\n",
407 hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
409 desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
412 desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
414 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
417 "failed(%d) to enable/disable IGU-EGU TNL interrupts\n",
422 ret = hclge_enable_ncsi_error(hdev, en);
424 dev_err(dev, "fail(%d) to en/disable err int\n", ret);
429 static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
432 struct device *dev = &hdev->pdev->dev;
433 struct hclge_desc desc[2];
436 /* enable/disable PPP error interrupts */
437 hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
438 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
439 hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
441 if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
444 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
446 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
452 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
454 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
455 } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
458 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
460 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
466 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
468 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
471 ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
474 "failed(%d) to enable/disable PPP error interrupts\n",
480 static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en)
482 struct device *dev = &hdev->pdev->dev;
485 ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
489 "failed(%d) to enable/disable PPP error intr 0,1\n",
494 ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
498 "failed(%d) to enable/disable PPP error intr 2,3\n",
504 static void hclge_process_common_error(struct hclge_dev *hdev,
505 enum hclge_err_int_type type)
507 struct device *dev = &hdev->pdev->dev;
508 struct hclge_desc desc[2];
513 ret = hclge_cmd_query_error(hdev, &desc[0],
514 HCLGE_COMMON_ECC_INT_CFG,
515 HCLGE_CMD_FLAG_NEXT, 0, 0);
518 "failed(=%d) to query COMMON error interrupt status\n",
524 err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK;
525 hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts);
527 err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK;
528 hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts);
530 err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT)
531 & HCLGE_CMDQ_ECC_INT_MASK;
532 hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts);
534 if ((le32_to_cpu(desc[0].data[3])) & BIT(0))
535 dev_warn(dev, "imp_rd_data_poison_err found\n");
537 err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) &
538 HCLGE_TQP_ECC_INT_MASK;
539 hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts);
541 err_sts = (le32_to_cpu(desc[0].data[5])) &
542 HCLGE_IMP_ITCM4_ECC_INT_MASK;
543 hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts);
545 /* clear error interrupts */
546 desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK);
547 desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK |
548 HCLGE_CMDQ_ROCEE_ECC_CLR_MASK);
549 desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK);
550 desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK);
552 ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
553 HCLGE_CMD_FLAG_NEXT);
556 "failed(%d) to clear COMMON error interrupt status\n",
560 static void hclge_process_ncsi_error(struct hclge_dev *hdev,
561 enum hclge_err_int_type type)
563 struct device *dev = &hdev->pdev->dev;
564 struct hclge_desc desc_rd;
565 struct hclge_desc desc_wr;
569 if (hdev->pdev->revision < 0x21)
572 /* read NCSI error status */
573 ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY,
574 0, 1, HCLGE_NCSI_ERR_INT_TYPE);
577 "failed(=%d) to query NCSI error interrupt status\n",
583 err_sts = le32_to_cpu(desc_rd.data[0]);
584 hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts);
587 ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
588 HCLGE_NCSI_INT_CLR, 0);
590 dev_err(dev, "failed(=%d) to clear NCSI intrerrupt status\n",
594 static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
595 enum hclge_err_int_type int_type)
597 struct device *dev = &hdev->pdev->dev;
598 struct hclge_desc desc_rd;
599 struct hclge_desc desc_wr;
603 /* read IGU common err sts */
604 ret = hclge_cmd_query_error(hdev, &desc_rd,
605 HCLGE_IGU_COMMON_INT_QUERY,
608 dev_err(dev, "failed(=%d) to query IGU common int status\n",
614 err_sts = le32_to_cpu(desc_rd.data[0]) &
615 HCLGE_IGU_COM_INT_MASK;
616 hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts);
619 ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
620 HCLGE_IGU_COMMON_INT_CLR, 0);
622 dev_err(dev, "failed(=%d) to clear IGU common int status\n",
627 /* read IGU-EGU TNL err sts */
628 ret = hclge_cmd_query_error(hdev, &desc_rd,
629 HCLGE_IGU_EGU_TNL_INT_QUERY,
632 dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n",
638 err_sts = le32_to_cpu(desc_rd.data[0]) &
639 HCLGE_IGU_EGU_TNL_INT_MASK;
640 hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts);
643 ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
644 HCLGE_IGU_EGU_TNL_INT_CLR, 0);
646 dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n",
651 hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE);
654 static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
655 enum hclge_err_int_type int_type)
657 enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
658 struct device *dev = &hdev->pdev->dev;
659 const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
660 struct hclge_desc desc[2];
664 /* read PPP INT sts */
665 ret = hclge_cmd_query_error(hdev, &desc[0], cmd,
666 HCLGE_CMD_FLAG_NEXT, 5, int_type);
668 dev_err(dev, "failed(=%d) to query PPP interrupt status\n",
674 if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
675 hw_err_lst1 = &hclge_ppp_mpf_int0[0];
676 hw_err_lst2 = &hclge_ppp_mpf_int1[0];
677 hw_err_lst3 = &hclge_ppp_pf_int[0];
678 } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
679 hw_err_lst1 = &hclge_ppp_mpf_int2[0];
680 hw_err_lst2 = &hclge_ppp_mpf_int3[0];
682 dev_err(dev, "invalid command(=%d)\n", cmd);
686 err_sts = le32_to_cpu(desc[0].data[2]);
688 hclge_log_error(dev, hw_err_lst1, err_sts);
689 reset_level = HNAE3_FUNC_RESET;
692 err_sts = le32_to_cpu(desc[0].data[3]);
694 hclge_log_error(dev, hw_err_lst2, err_sts);
695 reset_level = HNAE3_FUNC_RESET;
698 err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
700 hclge_log_error(dev, hw_err_lst3, err_sts);
701 reset_level = HNAE3_FUNC_RESET;
705 ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
706 HCLGE_CMD_FLAG_NEXT);
708 dev_err(dev, "failed(=%d) to clear PPP interrupt status\n",
716 static void hclge_process_ppp_error(struct hclge_dev *hdev,
717 enum hclge_err_int_type int_type)
719 struct device *dev = &hdev->pdev->dev;
722 /* read PPP INT0,1 sts */
723 ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD,
726 dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n",
731 /* read err PPP INT2,3 sts */
732 ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD,
735 dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n",
739 static const struct hclge_hw_blk hw_blk[] = {
740 { .msk = BIT(0), .name = "IGU_EGU",
741 .enable_error = hclge_enable_igu_egu_error,
742 .process_error = hclge_process_igu_egu_error, },
743 { .msk = BIT(5), .name = "COMMON",
744 .enable_error = hclge_enable_common_error,
745 .process_error = hclge_process_common_error, },
746 { .msk = BIT(1), .name = "PPP",
747 .enable_error = hclge_enable_ppp_error,
748 .process_error = hclge_process_ppp_error, },
752 int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
754 struct device *dev = &hdev->pdev->dev;
758 while (hw_blk[i].name) {
759 if (!hw_blk[i].enable_error) {
763 ret = hw_blk[i].enable_error(hdev, state);
765 dev_err(dev, "fail(%d) to en/disable err int\n", ret);
774 pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev)
776 struct hclge_dev *hdev = ae_dev->priv;
777 struct device *dev = &hdev->pdev->dev;
781 sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
783 /* Processing Non-fatal errors */
784 if (sts & HCLGE_RAS_REG_NFE_MASK) {
785 val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF;
787 while (hw_blk[i].name) {
788 if (!(hw_blk[i].msk & val)) {
792 dev_warn(dev, "%s ras non-fatal error identified\n",
794 if (hw_blk[i].process_error)
795 hw_blk[i].process_error(hdev,
796 HCLGE_ERR_INT_RAS_NFE);
801 return PCI_ERS_RESULT_NEED_RESET;