1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
20 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
46 /* For now considering MC resources needed for broadcast
47 * pkt replication only. i.e 256 HWVFs + 12 PFs.
49 #define MC_TBL_SIZE MC_TBL_SZ_512
50 #define MC_BUF_CNT MC_BUF_CNT_128
53 struct hlist_node node;
58 int rvu_get_nixlf_count(struct rvu *rvu)
60 struct rvu_block *block;
63 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
66 block = &rvu->hw->block[blkaddr];
70 static void nix_mce_list_init(struct nix_mce_list *list, int max)
72 INIT_HLIST_HEAD(&list->head);
77 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
84 idx = mcast->next_free_mce;
85 mcast->next_free_mce += count;
89 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
91 if (blkaddr == BLKADDR_NIX0 && hw->nix0)
97 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
98 int lvl, u16 pcifunc, u16 schq)
100 struct nix_txsch *txsch;
101 struct nix_hw *nix_hw;
103 nix_hw = get_nix_hw(rvu->hw, blkaddr);
107 txsch = &nix_hw->txsch[lvl];
108 /* Check out of bounds */
109 if (schq >= txsch->schq.max)
112 spin_lock(&rvu->rsrc_lock);
113 if (txsch->pfvf_map[schq] != pcifunc) {
114 spin_unlock(&rvu->rsrc_lock);
117 spin_unlock(&rvu->rsrc_lock);
121 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
123 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
128 pf = rvu_get_pf(pcifunc);
129 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
133 case NIX_INTF_TYPE_CGX:
134 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
135 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
137 pkind = rvu_npc_get_pkind(rvu, pf);
140 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
143 pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
144 pfvf->tx_chan_base = pfvf->rx_chan_base;
145 pfvf->rx_chan_cnt = 1;
146 pfvf->tx_chan_cnt = 1;
147 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
148 rvu_npc_set_pkind(rvu, pkind, pfvf);
150 case NIX_INTF_TYPE_LBK:
154 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
155 * RVU PF/VF's MAC address.
157 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
158 pfvf->rx_chan_base, pfvf->mac_addr);
160 /* Add this PF_FUNC to bcast pkt replication list */
161 err = nix_update_bcast_mce_list(rvu, pcifunc, true);
164 "Bcast list, failed to enable PF_FUNC 0x%x\n",
169 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
170 nixlf, pfvf->rx_chan_base);
175 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
179 /* Remove this PF_FUNC from bcast pkt replication list */
180 err = nix_update_bcast_mce_list(rvu, pcifunc, false);
183 "Bcast list, failed to disable PF_FUNC 0x%x\n",
187 /* Free and disable any MCAM entries used by this NIX LF */
188 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
191 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
192 u64 format, bool v4, u64 *fidx)
194 struct nix_lso_format field = {0};
196 /* IP's Length field */
197 field.layer = NIX_TXLAYER_OL3;
198 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
199 field.offset = v4 ? 2 : 4;
200 field.sizem1 = 1; /* i.e 2 bytes */
201 field.alg = NIX_LSOALG_ADD_PAYLEN;
202 rvu_write64(rvu, blkaddr,
203 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
206 /* No ID field in IPv6 header */
211 field.layer = NIX_TXLAYER_OL3;
213 field.sizem1 = 1; /* i.e 2 bytes */
214 field.alg = NIX_LSOALG_ADD_SEGNUM;
215 rvu_write64(rvu, blkaddr,
216 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
220 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
221 u64 format, u64 *fidx)
223 struct nix_lso_format field = {0};
225 /* TCP's sequence number field */
226 field.layer = NIX_TXLAYER_OL4;
228 field.sizem1 = 3; /* i.e 4 bytes */
229 field.alg = NIX_LSOALG_ADD_OFFSET;
230 rvu_write64(rvu, blkaddr,
231 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
234 /* TCP's flags field */
235 field.layer = NIX_TXLAYER_OL4;
237 field.sizem1 = 0; /* not needed */
238 field.alg = NIX_LSOALG_TCP_FLAGS;
239 rvu_write64(rvu, blkaddr,
240 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
244 static void nix_setup_lso(struct rvu *rvu, int blkaddr)
246 u64 cfg, idx, fidx = 0;
249 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
250 /* For TSO, set first and middle segment flags to
251 * mask out PSH, RST & FIN flags in TCP packet
253 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
254 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
255 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
257 /* Configure format fields for TCPv4 segmentation offload */
258 idx = NIX_LSO_FORMAT_IDX_TSOV4;
259 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
260 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
262 /* Set rest of the fields to NOP */
263 for (; fidx < 8; fidx++) {
264 rvu_write64(rvu, blkaddr,
265 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
268 /* Configure format fields for TCPv6 segmentation offload */
269 idx = NIX_LSO_FORMAT_IDX_TSOV6;
271 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
272 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
274 /* Set rest of the fields to NOP */
275 for (; fidx < 8; fidx++) {
276 rvu_write64(rvu, blkaddr,
277 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
281 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
283 kfree(pfvf->rq_bmap);
284 kfree(pfvf->sq_bmap);
285 kfree(pfvf->cq_bmap);
287 qmem_free(rvu->dev, pfvf->rq_ctx);
289 qmem_free(rvu->dev, pfvf->sq_ctx);
291 qmem_free(rvu->dev, pfvf->cq_ctx);
293 qmem_free(rvu->dev, pfvf->rss_ctx);
294 if (pfvf->nix_qints_ctx)
295 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
296 if (pfvf->cq_ints_ctx)
297 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
299 pfvf->rq_bmap = NULL;
300 pfvf->cq_bmap = NULL;
301 pfvf->sq_bmap = NULL;
305 pfvf->rss_ctx = NULL;
306 pfvf->nix_qints_ctx = NULL;
307 pfvf->cq_ints_ctx = NULL;
310 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
311 struct rvu_pfvf *pfvf, int nixlf,
312 int rss_sz, int rss_grps, int hwctx_size)
314 int err, grp, num_indices;
316 /* RSS is not requested for this NIXLF */
319 num_indices = rss_sz * rss_grps;
321 /* Alloc NIX RSS HW context memory and config the base */
322 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
326 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
327 (u64)pfvf->rss_ctx->iova);
329 /* Config full RSS table size, enable RSS and caching */
330 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
331 BIT_ULL(36) | BIT_ULL(4) |
332 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
333 /* Config RSS group offset and sizes */
334 for (grp = 0; grp < rss_grps; grp++)
335 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
336 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
340 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
341 struct nix_aq_inst_s *inst)
343 struct admin_queue *aq = block->aq;
344 struct nix_aq_res_s *result;
348 result = (struct nix_aq_res_s *)aq->res->base;
350 /* Get current head pointer where to append this instruction */
351 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
352 head = (reg >> 4) & AQ_PTR_MASK;
354 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
355 (void *)inst, aq->inst->entry_sz);
356 memset(result, 0, sizeof(*result));
357 /* sync into memory */
360 /* Ring the doorbell and wait for result */
361 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
362 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
370 if (result->compcode != NIX_AQ_COMP_GOOD)
371 /* TODO: Replace this with some error code */
377 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
378 struct nix_aq_enq_rsp *rsp)
380 struct rvu_hwinfo *hw = rvu->hw;
381 u16 pcifunc = req->hdr.pcifunc;
382 int nixlf, blkaddr, rc = 0;
383 struct nix_aq_inst_s inst;
384 struct rvu_block *block;
385 struct admin_queue *aq;
386 struct rvu_pfvf *pfvf;
391 pfvf = rvu_get_pfvf(rvu, pcifunc);
392 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
393 if (!pfvf->nixlf || blkaddr < 0)
394 return NIX_AF_ERR_AF_LF_INVALID;
396 block = &hw->block[blkaddr];
399 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
400 return NIX_AF_ERR_AQ_ENQUEUE;
403 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
405 return NIX_AF_ERR_AF_LF_INVALID;
407 switch (req->ctype) {
408 case NIX_AQ_CTYPE_RQ:
409 /* Check if index exceeds max no of queues */
410 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
411 rc = NIX_AF_ERR_AQ_ENQUEUE;
413 case NIX_AQ_CTYPE_SQ:
414 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
415 rc = NIX_AF_ERR_AQ_ENQUEUE;
417 case NIX_AQ_CTYPE_CQ:
418 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
419 rc = NIX_AF_ERR_AQ_ENQUEUE;
421 case NIX_AQ_CTYPE_RSS:
422 /* Check if RSS is enabled and qidx is within range */
423 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
424 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
425 (req->qidx >= (256UL << (cfg & 0xF))))
426 rc = NIX_AF_ERR_AQ_ENQUEUE;
428 case NIX_AQ_CTYPE_MCE:
429 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
430 /* Check if index exceeds MCE list length */
431 if (!hw->nix0->mcast.mce_ctx ||
432 (req->qidx >= (256UL << (cfg & 0xF))))
433 rc = NIX_AF_ERR_AQ_ENQUEUE;
435 /* Adding multicast lists for requests from PF/VFs is not
436 * yet supported, so ignore this.
439 rc = NIX_AF_ERR_AQ_ENQUEUE;
442 rc = NIX_AF_ERR_AQ_ENQUEUE;
448 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
449 if (req->ctype == NIX_AQ_CTYPE_SQ &&
450 req->op != NIX_AQ_INSTOP_WRITE) {
451 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
452 pcifunc, req->sq.smq))
453 return NIX_AF_ERR_AQ_ENQUEUE;
456 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
458 inst.cindex = req->qidx;
459 inst.ctype = req->ctype;
461 /* Currently we are not supporting enqueuing multiple instructions,
462 * so always choose first entry in result memory.
464 inst.res_addr = (u64)aq->res->iova;
466 /* Clean result + context memory */
467 memset(aq->res->base, 0, aq->res->entry_sz);
468 /* Context needs to be written at RES_ADDR + 128 */
469 ctx = aq->res->base + 128;
470 /* Mask needs to be written at RES_ADDR + 256 */
471 mask = aq->res->base + 256;
474 case NIX_AQ_INSTOP_WRITE:
475 if (req->ctype == NIX_AQ_CTYPE_RQ)
476 memcpy(mask, &req->rq_mask,
477 sizeof(struct nix_rq_ctx_s));
478 else if (req->ctype == NIX_AQ_CTYPE_SQ)
479 memcpy(mask, &req->sq_mask,
480 sizeof(struct nix_sq_ctx_s));
481 else if (req->ctype == NIX_AQ_CTYPE_CQ)
482 memcpy(mask, &req->cq_mask,
483 sizeof(struct nix_cq_ctx_s));
484 else if (req->ctype == NIX_AQ_CTYPE_RSS)
485 memcpy(mask, &req->rss_mask,
486 sizeof(struct nix_rsse_s));
487 else if (req->ctype == NIX_AQ_CTYPE_MCE)
488 memcpy(mask, &req->mce_mask,
489 sizeof(struct nix_rx_mce_s));
491 case NIX_AQ_INSTOP_INIT:
492 if (req->ctype == NIX_AQ_CTYPE_RQ)
493 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
494 else if (req->ctype == NIX_AQ_CTYPE_SQ)
495 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
496 else if (req->ctype == NIX_AQ_CTYPE_CQ)
497 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
498 else if (req->ctype == NIX_AQ_CTYPE_RSS)
499 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
500 else if (req->ctype == NIX_AQ_CTYPE_MCE)
501 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
503 case NIX_AQ_INSTOP_NOP:
504 case NIX_AQ_INSTOP_READ:
505 case NIX_AQ_INSTOP_LOCK:
506 case NIX_AQ_INSTOP_UNLOCK:
509 rc = NIX_AF_ERR_AQ_ENQUEUE;
513 spin_lock(&aq->lock);
515 /* Submit the instruction to AQ */
516 rc = nix_aq_enqueue_wait(rvu, block, &inst);
518 spin_unlock(&aq->lock);
522 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
523 if (req->op == NIX_AQ_INSTOP_INIT) {
524 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
525 __set_bit(req->qidx, pfvf->rq_bmap);
526 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
527 __set_bit(req->qidx, pfvf->sq_bmap);
528 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
529 __set_bit(req->qidx, pfvf->cq_bmap);
532 if (req->op == NIX_AQ_INSTOP_WRITE) {
533 if (req->ctype == NIX_AQ_CTYPE_RQ) {
534 ena = (req->rq.ena & req->rq_mask.ena) |
535 (test_bit(req->qidx, pfvf->rq_bmap) &
538 __set_bit(req->qidx, pfvf->rq_bmap);
540 __clear_bit(req->qidx, pfvf->rq_bmap);
542 if (req->ctype == NIX_AQ_CTYPE_SQ) {
543 ena = (req->rq.ena & req->sq_mask.ena) |
544 (test_bit(req->qidx, pfvf->sq_bmap) &
547 __set_bit(req->qidx, pfvf->sq_bmap);
549 __clear_bit(req->qidx, pfvf->sq_bmap);
551 if (req->ctype == NIX_AQ_CTYPE_CQ) {
552 ena = (req->rq.ena & req->cq_mask.ena) |
553 (test_bit(req->qidx, pfvf->cq_bmap) &
556 __set_bit(req->qidx, pfvf->cq_bmap);
558 __clear_bit(req->qidx, pfvf->cq_bmap);
563 /* Copy read context into mailbox */
564 if (req->op == NIX_AQ_INSTOP_READ) {
565 if (req->ctype == NIX_AQ_CTYPE_RQ)
566 memcpy(&rsp->rq, ctx,
567 sizeof(struct nix_rq_ctx_s));
568 else if (req->ctype == NIX_AQ_CTYPE_SQ)
569 memcpy(&rsp->sq, ctx,
570 sizeof(struct nix_sq_ctx_s));
571 else if (req->ctype == NIX_AQ_CTYPE_CQ)
572 memcpy(&rsp->cq, ctx,
573 sizeof(struct nix_cq_ctx_s));
574 else if (req->ctype == NIX_AQ_CTYPE_RSS)
575 memcpy(&rsp->rss, ctx,
576 sizeof(struct nix_cq_ctx_s));
577 else if (req->ctype == NIX_AQ_CTYPE_MCE)
578 memcpy(&rsp->mce, ctx,
579 sizeof(struct nix_rx_mce_s));
583 spin_unlock(&aq->lock);
587 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
589 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
590 struct nix_aq_enq_req aq_req;
595 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
596 return NIX_AF_ERR_AQ_ENQUEUE;
598 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
599 aq_req.hdr.pcifunc = req->hdr.pcifunc;
601 if (req->ctype == NIX_AQ_CTYPE_CQ) {
603 aq_req.cq_mask.ena = 1;
604 q_cnt = pfvf->cq_ctx->qsize;
605 bmap = pfvf->cq_bmap;
607 if (req->ctype == NIX_AQ_CTYPE_SQ) {
609 aq_req.sq_mask.ena = 1;
610 q_cnt = pfvf->sq_ctx->qsize;
611 bmap = pfvf->sq_bmap;
613 if (req->ctype == NIX_AQ_CTYPE_RQ) {
615 aq_req.rq_mask.ena = 1;
616 q_cnt = pfvf->rq_ctx->qsize;
617 bmap = pfvf->rq_bmap;
620 aq_req.ctype = req->ctype;
621 aq_req.op = NIX_AQ_INSTOP_WRITE;
623 for (qidx = 0; qidx < q_cnt; qidx++) {
624 if (!test_bit(qidx, bmap))
627 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
630 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
631 (req->ctype == NIX_AQ_CTYPE_CQ) ?
632 "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
640 int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
641 struct nix_aq_enq_req *req,
642 struct nix_aq_enq_rsp *rsp)
644 return rvu_nix_aq_enq_inst(rvu, req, rsp);
647 int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
648 struct hwctx_disable_req *req,
651 return nix_lf_hwctx_disable(rvu, req);
654 int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
655 struct nix_lf_alloc_req *req,
656 struct nix_lf_alloc_rsp *rsp)
658 int nixlf, qints, hwctx_size, err, rc = 0;
659 struct rvu_hwinfo *hw = rvu->hw;
660 u16 pcifunc = req->hdr.pcifunc;
661 struct rvu_block *block;
662 struct rvu_pfvf *pfvf;
666 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
667 return NIX_AF_ERR_PARAM;
669 pfvf = rvu_get_pfvf(rvu, pcifunc);
670 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
671 if (!pfvf->nixlf || blkaddr < 0)
672 return NIX_AF_ERR_AF_LF_INVALID;
674 block = &hw->block[blkaddr];
675 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
677 return NIX_AF_ERR_AF_LF_INVALID;
679 /* If RSS is being enabled, check if requested config is valid.
680 * RSS table size should be power of two, otherwise
681 * RSS_GRP::OFFSET + adder might go beyond that group or
682 * won't be able to use entire table.
684 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
685 !is_power_of_2(req->rss_sz)))
686 return NIX_AF_ERR_RSS_SIZE_INVALID;
689 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
690 return NIX_AF_ERR_RSS_GRPS_INVALID;
692 /* Reset this NIX LF */
693 err = rvu_lf_reset(rvu, block, nixlf);
695 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
696 block->addr - BLKADDR_NIX0, nixlf);
697 return NIX_AF_ERR_LF_RESET;
700 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
702 /* Alloc NIX RQ HW context memory and config the base */
703 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
704 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
708 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
712 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
713 (u64)pfvf->rq_ctx->iova);
715 /* Set caching and queue count in HW */
716 cfg = BIT_ULL(36) | (req->rq_cnt - 1);
717 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
719 /* Alloc NIX SQ HW context memory and config the base */
720 hwctx_size = 1UL << (ctx_cfg & 0xF);
721 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
725 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
729 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
730 (u64)pfvf->sq_ctx->iova);
731 cfg = BIT_ULL(36) | (req->sq_cnt - 1);
732 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
734 /* Alloc NIX CQ HW context memory and config the base */
735 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
736 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
740 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
744 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
745 (u64)pfvf->cq_ctx->iova);
746 cfg = BIT_ULL(36) | (req->cq_cnt - 1);
747 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
749 /* Initialize receive side scaling (RSS) */
750 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
751 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
752 req->rss_sz, req->rss_grps, hwctx_size);
756 /* Alloc memory for CQINT's HW contexts */
757 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
758 qints = (cfg >> 24) & 0xFFF;
759 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
760 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
764 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
765 (u64)pfvf->cq_ints_ctx->iova);
766 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
768 /* Alloc memory for QINT's HW contexts */
769 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
770 qints = (cfg >> 12) & 0xFFF;
771 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
772 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
776 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
777 (u64)pfvf->nix_qints_ctx->iova);
778 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
780 /* Enable LMTST for this NIX LF */
781 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
783 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
784 * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
787 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
792 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
793 cfg |= (u64)pcifunc << 16;
795 cfg |= (u64)req->sso_func << 16;
797 cfg |= (u64)req->xqe_sz << 33;
798 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
800 /* Config Rx pkt length, csum checks and apad enable / disable */
801 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
803 err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
810 nix_ctx_free(rvu, pfvf);
814 /* Set macaddr of this PF/VF */
815 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
817 /* set SQB size info */
818 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
819 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
820 rsp->rx_chan_base = pfvf->rx_chan_base;
821 rsp->tx_chan_base = pfvf->tx_chan_base;
822 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
823 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
824 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
825 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
829 int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
832 struct rvu_hwinfo *hw = rvu->hw;
833 u16 pcifunc = req->hdr.pcifunc;
834 struct rvu_block *block;
835 int blkaddr, nixlf, err;
836 struct rvu_pfvf *pfvf;
838 pfvf = rvu_get_pfvf(rvu, pcifunc);
839 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
840 if (!pfvf->nixlf || blkaddr < 0)
841 return NIX_AF_ERR_AF_LF_INVALID;
843 block = &hw->block[blkaddr];
844 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
846 return NIX_AF_ERR_AF_LF_INVALID;
848 nix_interface_deinit(rvu, pcifunc, nixlf);
850 /* Reset this NIX LF */
851 err = rvu_lf_reset(rvu, block, nixlf);
853 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
854 block->addr - BLKADDR_NIX0, nixlf);
855 return NIX_AF_ERR_LF_RESET;
858 nix_ctx_free(rvu, pfvf);
863 /* Disable shaping of pkts by a scheduler queue
864 * at a given scheduler level.
866 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
869 u64 cir_reg = 0, pir_reg = 0;
873 case NIX_TXSCH_LVL_TL1:
874 cir_reg = NIX_AF_TL1X_CIR(schq);
875 pir_reg = 0; /* PIR not available at TL1 */
877 case NIX_TXSCH_LVL_TL2:
878 cir_reg = NIX_AF_TL2X_CIR(schq);
879 pir_reg = NIX_AF_TL2X_PIR(schq);
881 case NIX_TXSCH_LVL_TL3:
882 cir_reg = NIX_AF_TL3X_CIR(schq);
883 pir_reg = NIX_AF_TL3X_PIR(schq);
885 case NIX_TXSCH_LVL_TL4:
886 cir_reg = NIX_AF_TL4X_CIR(schq);
887 pir_reg = NIX_AF_TL4X_PIR(schq);
893 cfg = rvu_read64(rvu, blkaddr, cir_reg);
894 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
898 cfg = rvu_read64(rvu, blkaddr, pir_reg);
899 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
902 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
905 struct rvu_hwinfo *hw = rvu->hw;
908 /* Reset TL4's SDP link config */
909 if (lvl == NIX_TXSCH_LVL_TL4)
910 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
912 if (lvl != NIX_TXSCH_LVL_TL2)
915 /* Reset TL2's CGX or LBK link config */
916 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
917 rvu_write64(rvu, blkaddr,
918 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
921 int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
922 struct nix_txsch_alloc_req *req,
923 struct nix_txsch_alloc_rsp *rsp)
925 u16 pcifunc = req->hdr.pcifunc;
926 struct nix_txsch *txsch;
927 int lvl, idx, req_schq;
928 struct rvu_pfvf *pfvf;
929 struct nix_hw *nix_hw;
933 pfvf = rvu_get_pfvf(rvu, pcifunc);
934 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
935 if (!pfvf->nixlf || blkaddr < 0)
936 return NIX_AF_ERR_AF_LF_INVALID;
938 nix_hw = get_nix_hw(rvu->hw, blkaddr);
942 spin_lock(&rvu->rsrc_lock);
943 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
944 txsch = &nix_hw->txsch[lvl];
945 req_schq = req->schq_contig[lvl] + req->schq[lvl];
947 /* There are only 28 TL1s */
948 if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
951 /* Check if request is valid */
952 if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
955 /* If contiguous queues are needed, check for availability */
956 if (req->schq_contig[lvl] &&
957 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
960 /* Check if full request can be accommodated */
961 if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
965 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
966 txsch = &nix_hw->txsch[lvl];
967 rsp->schq_contig[lvl] = req->schq_contig[lvl];
968 rsp->schq[lvl] = req->schq[lvl];
971 /* Alloc contiguous queues first */
972 if (req->schq_contig[lvl]) {
973 schq = rvu_alloc_rsrc_contig(&txsch->schq,
974 req->schq_contig[lvl]);
976 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
977 txsch->pfvf_map[schq] = pcifunc;
978 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
979 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
980 rsp->schq_contig_list[lvl][idx] = schq;
985 /* Alloc non-contiguous queues */
986 for (idx = 0; idx < req->schq[lvl]; idx++) {
987 schq = rvu_alloc_rsrc(&txsch->schq);
988 txsch->pfvf_map[schq] = pcifunc;
989 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
990 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
991 rsp->schq_list[lvl][idx] = schq;
996 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
998 spin_unlock(&rvu->rsrc_lock);
1002 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1004 int blkaddr, nixlf, lvl, schq, err;
1005 struct rvu_hwinfo *hw = rvu->hw;
1006 struct nix_txsch *txsch;
1007 struct nix_hw *nix_hw;
1010 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1012 return NIX_AF_ERR_AF_LF_INVALID;
1014 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1018 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1020 return NIX_AF_ERR_AF_LF_INVALID;
1022 /* Disable TL2/3 queue links before SMQ flush*/
1023 spin_lock(&rvu->rsrc_lock);
1024 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1025 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1028 txsch = &nix_hw->txsch[lvl];
1029 for (schq = 0; schq < txsch->schq.max; schq++) {
1030 if (txsch->pfvf_map[schq] != pcifunc)
1032 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1037 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1038 for (schq = 0; schq < txsch->schq.max; schq++) {
1039 if (txsch->pfvf_map[schq] != pcifunc)
1041 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1042 /* Do SMQ flush and set enqueue xoff */
1043 cfg |= BIT_ULL(50) | BIT_ULL(49);
1044 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1046 /* Wait for flush to complete */
1047 err = rvu_poll_reg(rvu, blkaddr,
1048 NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
1051 "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
1055 /* Now free scheduler queues to free pool */
1056 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1057 txsch = &nix_hw->txsch[lvl];
1058 for (schq = 0; schq < txsch->schq.max; schq++) {
1059 if (txsch->pfvf_map[schq] != pcifunc)
1061 rvu_free_rsrc(&txsch->schq, schq);
1062 txsch->pfvf_map[schq] = 0;
1065 spin_unlock(&rvu->rsrc_lock);
1067 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1068 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1069 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1071 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1076 int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
1077 struct nix_txsch_free_req *req,
1078 struct msg_rsp *rsp)
1080 return nix_txschq_free(rvu, req->hdr.pcifunc);
1083 static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1084 int lvl, u64 reg, u64 regval)
1086 u64 regbase = reg & 0xFFFF;
1089 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1092 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1093 /* Check if this schq belongs to this PF/VF or not */
1094 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1097 parent = (regval >> 16) & 0x1FF;
1098 /* Validate MDQ's TL4 parent */
1099 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1100 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1103 /* Validate TL4's TL3 parent */
1104 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1105 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1108 /* Validate TL3's TL2 parent */
1109 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1110 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1113 /* Validate TL2's TL1 parent */
1114 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1115 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1121 int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
1122 struct nix_txschq_config *req,
1123 struct msg_rsp *rsp)
1125 struct rvu_hwinfo *hw = rvu->hw;
1126 u16 pcifunc = req->hdr.pcifunc;
1127 u64 reg, regval, schq_regbase;
1128 struct nix_txsch *txsch;
1129 struct nix_hw *nix_hw;
1130 int blkaddr, idx, err;
1133 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1134 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1135 return NIX_AF_INVAL_TXSCHQ_CFG;
1137 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1139 return NIX_AF_ERR_AF_LF_INVALID;
1141 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1145 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1147 return NIX_AF_ERR_AF_LF_INVALID;
1149 txsch = &nix_hw->txsch[req->lvl];
1150 for (idx = 0; idx < req->num_regs; idx++) {
1151 reg = req->reg[idx];
1152 regval = req->regval[idx];
1153 schq_regbase = reg & 0xFFFF;
1155 if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
1156 txsch->lvl, reg, regval))
1157 return NIX_AF_INVAL_TXSCHQ_CFG;
1159 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1160 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1161 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1163 regval &= ~(0x7FULL << 24);
1164 regval |= ((u64)nixlf << 24);
1167 rvu_write64(rvu, blkaddr, reg, regval);
1169 /* Check for SMQ flush, if so, poll for its completion */
1170 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1171 (regval & BIT_ULL(49))) {
1172 err = rvu_poll_reg(rvu, blkaddr,
1173 reg, BIT_ULL(49), true);
1175 return NIX_AF_SMQ_FLUSH_FAILED;
1181 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1182 struct nix_vtag_config *req)
1186 #define NIX_VTAGTYPE_MAX 0x8ull
1187 #define NIX_VTAGSIZE_MASK 0x7ull
1188 #define NIX_VTAGSTRIP_CAP_MASK 0x30ull
1190 if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX ||
1191 req->vtag_size > VTAGSIZE_T8)
1194 regval = rvu_read64(rvu, blkaddr,
1195 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type));
1197 if (req->rx.strip_vtag && req->rx.capture_vtag)
1198 regval |= BIT_ULL(4) | BIT_ULL(5);
1199 else if (req->rx.strip_vtag)
1200 regval |= BIT_ULL(4);
1202 regval &= ~(BIT_ULL(4) | BIT_ULL(5));
1204 regval &= ~NIX_VTAGSIZE_MASK;
1205 regval |= req->vtag_size & NIX_VTAGSIZE_MASK;
1207 rvu_write64(rvu, blkaddr,
1208 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1212 int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
1213 struct nix_vtag_config *req,
1214 struct msg_rsp *rsp)
1216 struct rvu_hwinfo *hw = rvu->hw;
1217 u16 pcifunc = req->hdr.pcifunc;
1218 int blkaddr, nixlf, err;
1220 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1222 return NIX_AF_ERR_AF_LF_INVALID;
1224 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1226 return NIX_AF_ERR_AF_LF_INVALID;
1228 if (req->cfg_type) {
1229 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1231 return NIX_AF_ERR_PARAM;
1233 /* TODO: handle tx vtag configuration */
1240 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1241 u16 pcifunc, int next, bool eol)
1243 struct nix_aq_enq_req aq_req;
1246 aq_req.hdr.pcifunc = pcifunc;
1247 aq_req.ctype = NIX_AQ_CTYPE_MCE;
1251 /* Forward bcast pkts to RQ0, RSS not needed */
1253 aq_req.mce.index = 0;
1254 aq_req.mce.eol = eol;
1255 aq_req.mce.pf_func = pcifunc;
1256 aq_req.mce.next = next;
1258 /* All fields valid */
1259 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
1261 err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1263 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1264 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1270 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1271 u16 pcifunc, int idx, bool add)
1273 struct mce *mce, *tail = NULL;
1274 bool delete = false;
1276 /* Scan through the current list */
1277 hlist_for_each_entry(mce, &mce_list->head, node) {
1278 /* If already exists, then delete */
1279 if (mce->pcifunc == pcifunc && !add) {
1287 hlist_del(&mce->node);
1296 /* Add a new one to the list, at the tail */
1297 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
1301 mce->pcifunc = pcifunc;
1303 hlist_add_head(&mce->node, &mce_list->head);
1305 hlist_add_behind(&mce->node, &tail->node);
1310 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
1312 int err = 0, idx, next_idx, count;
1313 struct nix_mce_list *mce_list;
1314 struct mce *mce, *next_mce;
1315 struct nix_mcast *mcast;
1316 struct nix_hw *nix_hw;
1317 struct rvu_pfvf *pfvf;
1320 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1324 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1328 mcast = &nix_hw->mcast;
1330 /* Get this PF/VF func's MCE index */
1331 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1332 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
1334 mce_list = &pfvf->bcast_mce_list;
1335 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
1337 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
1338 __func__, idx, mce_list->max,
1339 pcifunc >> RVU_PFVF_PF_SHIFT);
1343 spin_lock(&mcast->mce_lock);
1345 err = nix_update_mce_list(mce_list, pcifunc, idx, add);
1349 /* Disable MCAM entry in NPC */
1351 if (!mce_list->count)
1353 count = mce_list->count;
1355 /* Dump the updated list to HW */
1356 hlist_for_each_entry(mce, &mce_list->head, node) {
1360 next_mce = hlist_entry(mce->node.next,
1362 next_idx = next_mce->idx;
1364 /* EOL should be set in last MCE */
1365 err = nix_setup_mce(rvu, mce->idx,
1366 NIX_AQ_INSTOP_WRITE, mce->pcifunc,
1367 next_idx, count ? false : true);
1373 spin_unlock(&mcast->mce_lock);
1377 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
1379 struct nix_mcast *mcast = &nix_hw->mcast;
1380 int err, pf, numvfs, idx;
1381 struct rvu_pfvf *pfvf;
1385 /* Skip PF0 (i.e AF) */
1386 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
1387 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1388 /* If PF is not enabled, nothing to do */
1389 if (!((cfg >> 20) & 0x01))
1391 /* Get numVFs attached to this PF */
1392 numvfs = (cfg >> 12) & 0xFF;
1394 pfvf = &rvu->pf[pf];
1395 /* Save the start MCE */
1396 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
1398 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
1400 for (idx = 0; idx < (numvfs + 1); idx++) {
1401 /* idx-0 is for PF, followed by VFs */
1402 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1404 /* Add dummy entries now, so that we don't have to check
1405 * for whether AQ_OP should be INIT/WRITE later on.
1406 * Will be updated when a NIXLF is attached/detached to
1409 err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
1419 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1421 struct nix_mcast *mcast = &nix_hw->mcast;
1422 struct rvu_hwinfo *hw = rvu->hw;
1425 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
1426 size = (1ULL << size);
1428 /* Alloc memory for multicast/mirror replication entries */
1429 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
1430 (256UL << MC_TBL_SIZE), size);
1434 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
1435 (u64)mcast->mce_ctx->iova);
1437 /* Set max list length equal to max no of VFs per PF + PF itself */
1438 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
1439 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
1441 /* Alloc memory for multicast replication buffers */
1442 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
1443 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
1444 (8UL << MC_BUF_CNT), size);
1448 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
1449 (u64)mcast->mcast_buf->iova);
1451 /* Alloc pkind for NIX internal RX multicast/mirror replay */
1452 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
1454 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
1455 BIT_ULL(63) | (mcast->replay_pkind << 24) |
1456 BIT_ULL(20) | MC_BUF_CNT);
1458 spin_lock_init(&mcast->mce_lock);
1460 return nix_setup_bcast_tables(rvu, nix_hw);
1463 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1465 struct nix_txsch *txsch;
1469 /* Get scheduler queue count of each type and alloc
1470 * bitmap for each for alloc/free/attach operations.
1472 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1473 txsch = &nix_hw->txsch[lvl];
1476 case NIX_TXSCH_LVL_SMQ:
1477 reg = NIX_AF_MDQ_CONST;
1479 case NIX_TXSCH_LVL_TL4:
1480 reg = NIX_AF_TL4_CONST;
1482 case NIX_TXSCH_LVL_TL3:
1483 reg = NIX_AF_TL3_CONST;
1485 case NIX_TXSCH_LVL_TL2:
1486 reg = NIX_AF_TL2_CONST;
1488 case NIX_TXSCH_LVL_TL1:
1489 reg = NIX_AF_TL1_CONST;
1492 cfg = rvu_read64(rvu, blkaddr, reg);
1493 txsch->schq.max = cfg & 0xFFFF;
1494 err = rvu_alloc_bitmap(&txsch->schq);
1498 /* Allocate memory for scheduler queues to
1499 * PF/VF pcifunc mapping info.
1501 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
1502 sizeof(u16), GFP_KERNEL);
1503 if (!txsch->pfvf_map)
1509 int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
1510 struct msg_rsp *rsp)
1512 struct rvu_hwinfo *hw = rvu->hw;
1513 u16 pcifunc = req->hdr.pcifunc;
1514 int i, nixlf, blkaddr;
1517 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1519 return NIX_AF_ERR_AF_LF_INVALID;
1521 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1523 return NIX_AF_ERR_AF_LF_INVALID;
1525 /* Get stats count supported by HW */
1526 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1528 /* Reset tx stats */
1529 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
1530 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
1532 /* Reset rx stats */
1533 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
1534 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
1539 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
1544 /* Start X2P bus calibration */
1545 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
1546 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
1547 /* Wait for calibration to complete */
1548 err = rvu_poll_reg(rvu, blkaddr,
1549 NIX_AF_STATUS, BIT_ULL(10), false);
1551 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
1555 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
1556 /* Check if CGX devices are ready */
1557 for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
1558 if (status & (BIT_ULL(16 + idx)))
1561 "CGX%d didn't respond to NIX X2P calibration\n", idx);
1565 /* Check if LBK is ready */
1566 if (!(status & BIT_ULL(19))) {
1568 "LBK didn't respond to NIX X2P calibration\n");
1572 /* Clear 'calibrate_x2p' bit */
1573 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
1574 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
1575 if (err || (status & 0x3FFULL))
1577 "NIX X2P calibration failed, status 0x%llx\n", status);
1583 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
1588 /* Set admin queue endianness */
1589 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
1592 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
1595 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
1598 /* Do not bypass NDC cache */
1599 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
1601 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
1603 /* Result structure can be followed by RQ/SQ/CQ context at
1604 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
1605 * operation type. Alloc sufficient result memory for all operations.
1607 err = rvu_aq_alloc(rvu, &block->aq,
1608 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
1609 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
1613 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
1614 rvu_write64(rvu, block->addr,
1615 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
1619 int rvu_nix_init(struct rvu *rvu)
1621 struct rvu_hwinfo *hw = rvu->hw;
1622 struct rvu_block *block;
1626 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
1629 block = &hw->block[blkaddr];
1631 /* Calibrate X2P bus to check if CGX/LBK links are fine */
1632 err = nix_calibrate_x2p(rvu, blkaddr);
1636 /* Set num of links of each type */
1637 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
1638 hw->cgx = (cfg >> 12) & 0xF;
1639 hw->lmac_per_cgx = (cfg >> 8) & 0xF;
1640 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
1644 /* Initialize admin queue */
1645 err = nix_aq_init(rvu, block);
1649 /* Restore CINT timer delay to HW reset values */
1650 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
1652 /* Configure segmentation offload formats */
1653 nix_setup_lso(rvu, blkaddr);
1655 if (blkaddr == BLKADDR_NIX0) {
1656 hw->nix0 = devm_kzalloc(rvu->dev,
1657 sizeof(struct nix_hw), GFP_KERNEL);
1661 err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
1665 err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
1669 /* Config Outer L2, IP, TCP and UDP's NPC layer info.
1670 * This helps HW protocol checker to identify headers
1671 * and validate length and checksums.
1673 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
1674 (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
1675 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
1676 (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
1677 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
1678 (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
1679 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
1680 (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
1685 void rvu_nix_freemem(struct rvu *rvu)
1687 struct rvu_hwinfo *hw = rvu->hw;
1688 struct rvu_block *block;
1689 struct nix_txsch *txsch;
1690 struct nix_mcast *mcast;
1691 struct nix_hw *nix_hw;
1694 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
1698 block = &hw->block[blkaddr];
1699 rvu_aq_free(rvu, block->aq);
1701 if (blkaddr == BLKADDR_NIX0) {
1702 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1706 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1707 txsch = &nix_hw->txsch[lvl];
1708 kfree(txsch->schq.bmap);
1711 mcast = &nix_hw->mcast;
1712 qmem_free(rvu->dev, mcast->mce_ctx);
1713 qmem_free(rvu->dev, mcast->mcast_buf);