1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
20 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
46 /* For now considering MC resources needed for broadcast
47 * pkt replication only. i.e 256 HWVFs + 12 PFs.
49 #define MC_TBL_SIZE MC_TBL_SZ_512
50 #define MC_BUF_CNT MC_BUF_CNT_128
53 struct hlist_node node;
58 static void nix_mce_list_init(struct nix_mce_list *list, int max)
60 INIT_HLIST_HEAD(&list->head);
65 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
72 idx = mcast->next_free_mce;
73 mcast->next_free_mce += count;
77 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
79 if (blkaddr == BLKADDR_NIX0 && hw->nix0)
85 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
86 int lvl, u16 pcifunc, u16 schq)
88 struct nix_txsch *txsch;
89 struct nix_hw *nix_hw;
91 nix_hw = get_nix_hw(rvu->hw, blkaddr);
95 txsch = &nix_hw->txsch[lvl];
96 /* Check out of bounds */
97 if (schq >= txsch->schq.max)
100 spin_lock(&rvu->rsrc_lock);
101 if (txsch->pfvf_map[schq] != pcifunc) {
102 spin_unlock(&rvu->rsrc_lock);
105 spin_unlock(&rvu->rsrc_lock);
109 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
111 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
116 pf = rvu_get_pf(pcifunc);
117 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
121 case NIX_INTF_TYPE_CGX:
122 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
123 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
125 pkind = rvu_npc_get_pkind(rvu, pf);
128 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
131 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
132 rvu_npc_set_pkind(rvu, pkind, pfvf);
134 case NIX_INTF_TYPE_LBK:
138 /* Add this PF_FUNC to bcast pkt replication list */
139 err = nix_update_bcast_mce_list(rvu, pcifunc, true);
142 "Bcast list, failed to enable PF_FUNC 0x%x\n",
148 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
152 /* Remove this PF_FUNC from bcast pkt replication list */
153 err = nix_update_bcast_mce_list(rvu, pcifunc, false);
156 "Bcast list, failed to disable PF_FUNC 0x%x\n",
161 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
162 u64 format, bool v4, u64 *fidx)
164 struct nix_lso_format field = {0};
166 /* IP's Length field */
167 field.layer = NIX_TXLAYER_OL3;
168 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
169 field.offset = v4 ? 2 : 4;
170 field.sizem1 = 1; /* i.e 2 bytes */
171 field.alg = NIX_LSOALG_ADD_PAYLEN;
172 rvu_write64(rvu, blkaddr,
173 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
176 /* No ID field in IPv6 header */
181 field.layer = NIX_TXLAYER_OL3;
183 field.sizem1 = 1; /* i.e 2 bytes */
184 field.alg = NIX_LSOALG_ADD_SEGNUM;
185 rvu_write64(rvu, blkaddr,
186 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
190 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
191 u64 format, u64 *fidx)
193 struct nix_lso_format field = {0};
195 /* TCP's sequence number field */
196 field.layer = NIX_TXLAYER_OL4;
198 field.sizem1 = 3; /* i.e 4 bytes */
199 field.alg = NIX_LSOALG_ADD_OFFSET;
200 rvu_write64(rvu, blkaddr,
201 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
204 /* TCP's flags field */
205 field.layer = NIX_TXLAYER_OL4;
207 field.sizem1 = 0; /* not needed */
208 field.alg = NIX_LSOALG_TCP_FLAGS;
209 rvu_write64(rvu, blkaddr,
210 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
214 static void nix_setup_lso(struct rvu *rvu, int blkaddr)
216 u64 cfg, idx, fidx = 0;
219 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
220 /* For TSO, set first and middle segment flags to
221 * mask out PSH, RST & FIN flags in TCP packet
223 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
224 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
225 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
227 /* Configure format fields for TCPv4 segmentation offload */
228 idx = NIX_LSO_FORMAT_IDX_TSOV4;
229 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
230 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
232 /* Set rest of the fields to NOP */
233 for (; fidx < 8; fidx++) {
234 rvu_write64(rvu, blkaddr,
235 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
238 /* Configure format fields for TCPv6 segmentation offload */
239 idx = NIX_LSO_FORMAT_IDX_TSOV6;
241 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
242 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
244 /* Set rest of the fields to NOP */
245 for (; fidx < 8; fidx++) {
246 rvu_write64(rvu, blkaddr,
247 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
251 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
253 kfree(pfvf->rq_bmap);
254 kfree(pfvf->sq_bmap);
255 kfree(pfvf->cq_bmap);
257 qmem_free(rvu->dev, pfvf->rq_ctx);
259 qmem_free(rvu->dev, pfvf->sq_ctx);
261 qmem_free(rvu->dev, pfvf->cq_ctx);
263 qmem_free(rvu->dev, pfvf->rss_ctx);
264 if (pfvf->nix_qints_ctx)
265 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
266 if (pfvf->cq_ints_ctx)
267 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
269 pfvf->rq_bmap = NULL;
270 pfvf->cq_bmap = NULL;
271 pfvf->sq_bmap = NULL;
275 pfvf->rss_ctx = NULL;
276 pfvf->nix_qints_ctx = NULL;
277 pfvf->cq_ints_ctx = NULL;
280 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
281 struct rvu_pfvf *pfvf, int nixlf,
282 int rss_sz, int rss_grps, int hwctx_size)
284 int err, grp, num_indices;
286 /* RSS is not requested for this NIXLF */
289 num_indices = rss_sz * rss_grps;
291 /* Alloc NIX RSS HW context memory and config the base */
292 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
296 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
297 (u64)pfvf->rss_ctx->iova);
299 /* Config full RSS table size, enable RSS and caching */
300 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
301 BIT_ULL(36) | BIT_ULL(4) |
302 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
303 /* Config RSS group offset and sizes */
304 for (grp = 0; grp < rss_grps; grp++)
305 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
306 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
310 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
311 struct nix_aq_inst_s *inst)
313 struct admin_queue *aq = block->aq;
314 struct nix_aq_res_s *result;
318 result = (struct nix_aq_res_s *)aq->res->base;
320 /* Get current head pointer where to append this instruction */
321 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
322 head = (reg >> 4) & AQ_PTR_MASK;
324 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
325 (void *)inst, aq->inst->entry_sz);
326 memset(result, 0, sizeof(*result));
327 /* sync into memory */
330 /* Ring the doorbell and wait for result */
331 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
332 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
340 if (result->compcode != NIX_AQ_COMP_GOOD)
341 /* TODO: Replace this with some error code */
347 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
348 struct nix_aq_enq_rsp *rsp)
350 struct rvu_hwinfo *hw = rvu->hw;
351 u16 pcifunc = req->hdr.pcifunc;
352 int nixlf, blkaddr, rc = 0;
353 struct nix_aq_inst_s inst;
354 struct rvu_block *block;
355 struct admin_queue *aq;
356 struct rvu_pfvf *pfvf;
361 pfvf = rvu_get_pfvf(rvu, pcifunc);
362 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
363 if (!pfvf->nixlf || blkaddr < 0)
364 return NIX_AF_ERR_AF_LF_INVALID;
366 block = &hw->block[blkaddr];
369 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
370 return NIX_AF_ERR_AQ_ENQUEUE;
373 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
375 return NIX_AF_ERR_AF_LF_INVALID;
377 switch (req->ctype) {
378 case NIX_AQ_CTYPE_RQ:
379 /* Check if index exceeds max no of queues */
380 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
381 rc = NIX_AF_ERR_AQ_ENQUEUE;
383 case NIX_AQ_CTYPE_SQ:
384 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
385 rc = NIX_AF_ERR_AQ_ENQUEUE;
387 case NIX_AQ_CTYPE_CQ:
388 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
389 rc = NIX_AF_ERR_AQ_ENQUEUE;
391 case NIX_AQ_CTYPE_RSS:
392 /* Check if RSS is enabled and qidx is within range */
393 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
394 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
395 (req->qidx >= (256UL << (cfg & 0xF))))
396 rc = NIX_AF_ERR_AQ_ENQUEUE;
398 case NIX_AQ_CTYPE_MCE:
399 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
400 /* Check if index exceeds MCE list length */
401 if (!hw->nix0->mcast.mce_ctx ||
402 (req->qidx >= (256UL << (cfg & 0xF))))
403 rc = NIX_AF_ERR_AQ_ENQUEUE;
405 /* Adding multicast lists for requests from PF/VFs is not
406 * yet supported, so ignore this.
409 rc = NIX_AF_ERR_AQ_ENQUEUE;
412 rc = NIX_AF_ERR_AQ_ENQUEUE;
418 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
419 if (req->ctype == NIX_AQ_CTYPE_SQ &&
420 req->op != NIX_AQ_INSTOP_WRITE) {
421 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
422 pcifunc, req->sq.smq))
423 return NIX_AF_ERR_AQ_ENQUEUE;
426 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
428 inst.cindex = req->qidx;
429 inst.ctype = req->ctype;
431 /* Currently we are not supporting enqueuing multiple instructions,
432 * so always choose first entry in result memory.
434 inst.res_addr = (u64)aq->res->iova;
436 /* Clean result + context memory */
437 memset(aq->res->base, 0, aq->res->entry_sz);
438 /* Context needs to be written at RES_ADDR + 128 */
439 ctx = aq->res->base + 128;
440 /* Mask needs to be written at RES_ADDR + 256 */
441 mask = aq->res->base + 256;
444 case NIX_AQ_INSTOP_WRITE:
445 if (req->ctype == NIX_AQ_CTYPE_RQ)
446 memcpy(mask, &req->rq_mask,
447 sizeof(struct nix_rq_ctx_s));
448 else if (req->ctype == NIX_AQ_CTYPE_SQ)
449 memcpy(mask, &req->sq_mask,
450 sizeof(struct nix_sq_ctx_s));
451 else if (req->ctype == NIX_AQ_CTYPE_CQ)
452 memcpy(mask, &req->cq_mask,
453 sizeof(struct nix_cq_ctx_s));
454 else if (req->ctype == NIX_AQ_CTYPE_RSS)
455 memcpy(mask, &req->rss_mask,
456 sizeof(struct nix_rsse_s));
457 else if (req->ctype == NIX_AQ_CTYPE_MCE)
458 memcpy(mask, &req->mce_mask,
459 sizeof(struct nix_rx_mce_s));
461 case NIX_AQ_INSTOP_INIT:
462 if (req->ctype == NIX_AQ_CTYPE_RQ)
463 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
464 else if (req->ctype == NIX_AQ_CTYPE_SQ)
465 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
466 else if (req->ctype == NIX_AQ_CTYPE_CQ)
467 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
468 else if (req->ctype == NIX_AQ_CTYPE_RSS)
469 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
470 else if (req->ctype == NIX_AQ_CTYPE_MCE)
471 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
473 case NIX_AQ_INSTOP_NOP:
474 case NIX_AQ_INSTOP_READ:
475 case NIX_AQ_INSTOP_LOCK:
476 case NIX_AQ_INSTOP_UNLOCK:
479 rc = NIX_AF_ERR_AQ_ENQUEUE;
483 spin_lock(&aq->lock);
485 /* Submit the instruction to AQ */
486 rc = nix_aq_enqueue_wait(rvu, block, &inst);
488 spin_unlock(&aq->lock);
492 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
493 if (req->op == NIX_AQ_INSTOP_INIT) {
494 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
495 __set_bit(req->qidx, pfvf->rq_bmap);
496 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
497 __set_bit(req->qidx, pfvf->sq_bmap);
498 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
499 __set_bit(req->qidx, pfvf->cq_bmap);
502 if (req->op == NIX_AQ_INSTOP_WRITE) {
503 if (req->ctype == NIX_AQ_CTYPE_RQ) {
504 ena = (req->rq.ena & req->rq_mask.ena) |
505 (test_bit(req->qidx, pfvf->rq_bmap) &
508 __set_bit(req->qidx, pfvf->rq_bmap);
510 __clear_bit(req->qidx, pfvf->rq_bmap);
512 if (req->ctype == NIX_AQ_CTYPE_SQ) {
513 ena = (req->rq.ena & req->sq_mask.ena) |
514 (test_bit(req->qidx, pfvf->sq_bmap) &
517 __set_bit(req->qidx, pfvf->sq_bmap);
519 __clear_bit(req->qidx, pfvf->sq_bmap);
521 if (req->ctype == NIX_AQ_CTYPE_CQ) {
522 ena = (req->rq.ena & req->cq_mask.ena) |
523 (test_bit(req->qidx, pfvf->cq_bmap) &
526 __set_bit(req->qidx, pfvf->cq_bmap);
528 __clear_bit(req->qidx, pfvf->cq_bmap);
533 /* Copy read context into mailbox */
534 if (req->op == NIX_AQ_INSTOP_READ) {
535 if (req->ctype == NIX_AQ_CTYPE_RQ)
536 memcpy(&rsp->rq, ctx,
537 sizeof(struct nix_rq_ctx_s));
538 else if (req->ctype == NIX_AQ_CTYPE_SQ)
539 memcpy(&rsp->sq, ctx,
540 sizeof(struct nix_sq_ctx_s));
541 else if (req->ctype == NIX_AQ_CTYPE_CQ)
542 memcpy(&rsp->cq, ctx,
543 sizeof(struct nix_cq_ctx_s));
544 else if (req->ctype == NIX_AQ_CTYPE_RSS)
545 memcpy(&rsp->rss, ctx,
546 sizeof(struct nix_cq_ctx_s));
547 else if (req->ctype == NIX_AQ_CTYPE_MCE)
548 memcpy(&rsp->mce, ctx,
549 sizeof(struct nix_rx_mce_s));
553 spin_unlock(&aq->lock);
557 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
559 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
560 struct nix_aq_enq_req aq_req;
565 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
566 return NIX_AF_ERR_AQ_ENQUEUE;
568 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
569 aq_req.hdr.pcifunc = req->hdr.pcifunc;
571 if (req->ctype == NIX_AQ_CTYPE_CQ) {
573 aq_req.cq_mask.ena = 1;
574 q_cnt = pfvf->cq_ctx->qsize;
575 bmap = pfvf->cq_bmap;
577 if (req->ctype == NIX_AQ_CTYPE_SQ) {
579 aq_req.sq_mask.ena = 1;
580 q_cnt = pfvf->sq_ctx->qsize;
581 bmap = pfvf->sq_bmap;
583 if (req->ctype == NIX_AQ_CTYPE_RQ) {
585 aq_req.rq_mask.ena = 1;
586 q_cnt = pfvf->rq_ctx->qsize;
587 bmap = pfvf->rq_bmap;
590 aq_req.ctype = req->ctype;
591 aq_req.op = NIX_AQ_INSTOP_WRITE;
593 for (qidx = 0; qidx < q_cnt; qidx++) {
594 if (!test_bit(qidx, bmap))
597 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
600 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
601 (req->ctype == NIX_AQ_CTYPE_CQ) ?
602 "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
610 int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
611 struct nix_aq_enq_req *req,
612 struct nix_aq_enq_rsp *rsp)
614 return rvu_nix_aq_enq_inst(rvu, req, rsp);
617 int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
618 struct hwctx_disable_req *req,
621 return nix_lf_hwctx_disable(rvu, req);
624 int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
625 struct nix_lf_alloc_req *req,
626 struct nix_lf_alloc_rsp *rsp)
628 int nixlf, qints, hwctx_size, err, rc = 0;
629 struct rvu_hwinfo *hw = rvu->hw;
630 u16 pcifunc = req->hdr.pcifunc;
631 struct rvu_block *block;
632 struct rvu_pfvf *pfvf;
636 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
637 return NIX_AF_ERR_PARAM;
639 pfvf = rvu_get_pfvf(rvu, pcifunc);
640 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
641 if (!pfvf->nixlf || blkaddr < 0)
642 return NIX_AF_ERR_AF_LF_INVALID;
644 block = &hw->block[blkaddr];
645 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
647 return NIX_AF_ERR_AF_LF_INVALID;
649 /* If RSS is being enabled, check if requested config is valid.
650 * RSS table size should be power of two, otherwise
651 * RSS_GRP::OFFSET + adder might go beyond that group or
652 * won't be able to use entire table.
654 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
655 !is_power_of_2(req->rss_sz)))
656 return NIX_AF_ERR_RSS_SIZE_INVALID;
659 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
660 return NIX_AF_ERR_RSS_GRPS_INVALID;
662 /* Reset this NIX LF */
663 err = rvu_lf_reset(rvu, block, nixlf);
665 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
666 block->addr - BLKADDR_NIX0, nixlf);
667 return NIX_AF_ERR_LF_RESET;
670 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
672 /* Alloc NIX RQ HW context memory and config the base */
673 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
674 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
678 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
682 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
683 (u64)pfvf->rq_ctx->iova);
685 /* Set caching and queue count in HW */
686 cfg = BIT_ULL(36) | (req->rq_cnt - 1);
687 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
689 /* Alloc NIX SQ HW context memory and config the base */
690 hwctx_size = 1UL << (ctx_cfg & 0xF);
691 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
695 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
699 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
700 (u64)pfvf->sq_ctx->iova);
701 cfg = BIT_ULL(36) | (req->sq_cnt - 1);
702 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
704 /* Alloc NIX CQ HW context memory and config the base */
705 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
706 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
710 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
714 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
715 (u64)pfvf->cq_ctx->iova);
716 cfg = BIT_ULL(36) | (req->cq_cnt - 1);
717 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
719 /* Initialize receive side scaling (RSS) */
720 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
721 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
722 req->rss_sz, req->rss_grps, hwctx_size);
726 /* Alloc memory for CQINT's HW contexts */
727 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
728 qints = (cfg >> 24) & 0xFFF;
729 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
730 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
734 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
735 (u64)pfvf->cq_ints_ctx->iova);
736 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
738 /* Alloc memory for QINT's HW contexts */
739 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
740 qints = (cfg >> 12) & 0xFFF;
741 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
742 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
746 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
747 (u64)pfvf->nix_qints_ctx->iova);
748 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
750 /* Enable LMTST for this NIX LF */
751 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
753 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
754 * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
757 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
762 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
763 cfg |= (u64)pcifunc << 16;
765 cfg |= (u64)req->sso_func << 16;
767 cfg |= (u64)req->xqe_sz << 33;
768 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
770 /* Config Rx pkt length, csum checks and apad enable / disable */
771 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
773 err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
780 nix_ctx_free(rvu, pfvf);
784 /* Set macaddr of this PF/VF */
785 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
787 /* set SQB size info */
788 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
789 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
790 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
791 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
795 int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
798 struct rvu_hwinfo *hw = rvu->hw;
799 u16 pcifunc = req->hdr.pcifunc;
800 struct rvu_block *block;
801 int blkaddr, nixlf, err;
802 struct rvu_pfvf *pfvf;
804 pfvf = rvu_get_pfvf(rvu, pcifunc);
805 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
806 if (!pfvf->nixlf || blkaddr < 0)
807 return NIX_AF_ERR_AF_LF_INVALID;
809 block = &hw->block[blkaddr];
810 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
812 return NIX_AF_ERR_AF_LF_INVALID;
814 nix_interface_deinit(rvu, pcifunc, nixlf);
816 /* Reset this NIX LF */
817 err = rvu_lf_reset(rvu, block, nixlf);
819 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
820 block->addr - BLKADDR_NIX0, nixlf);
821 return NIX_AF_ERR_LF_RESET;
824 nix_ctx_free(rvu, pfvf);
829 /* Disable shaping of pkts by a scheduler queue
830 * at a given scheduler level.
832 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
835 u64 cir_reg = 0, pir_reg = 0;
839 case NIX_TXSCH_LVL_TL1:
840 cir_reg = NIX_AF_TL1X_CIR(schq);
841 pir_reg = 0; /* PIR not available at TL1 */
843 case NIX_TXSCH_LVL_TL2:
844 cir_reg = NIX_AF_TL2X_CIR(schq);
845 pir_reg = NIX_AF_TL2X_PIR(schq);
847 case NIX_TXSCH_LVL_TL3:
848 cir_reg = NIX_AF_TL3X_CIR(schq);
849 pir_reg = NIX_AF_TL3X_PIR(schq);
851 case NIX_TXSCH_LVL_TL4:
852 cir_reg = NIX_AF_TL4X_CIR(schq);
853 pir_reg = NIX_AF_TL4X_PIR(schq);
859 cfg = rvu_read64(rvu, blkaddr, cir_reg);
860 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
864 cfg = rvu_read64(rvu, blkaddr, pir_reg);
865 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
868 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
871 struct rvu_hwinfo *hw = rvu->hw;
874 /* Reset TL4's SDP link config */
875 if (lvl == NIX_TXSCH_LVL_TL4)
876 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
878 if (lvl != NIX_TXSCH_LVL_TL2)
881 /* Reset TL2's CGX or LBK link config */
882 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
883 rvu_write64(rvu, blkaddr,
884 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
887 int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
888 struct nix_txsch_alloc_req *req,
889 struct nix_txsch_alloc_rsp *rsp)
891 u16 pcifunc = req->hdr.pcifunc;
892 struct nix_txsch *txsch;
893 int lvl, idx, req_schq;
894 struct rvu_pfvf *pfvf;
895 struct nix_hw *nix_hw;
899 pfvf = rvu_get_pfvf(rvu, pcifunc);
900 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
901 if (!pfvf->nixlf || blkaddr < 0)
902 return NIX_AF_ERR_AF_LF_INVALID;
904 nix_hw = get_nix_hw(rvu->hw, blkaddr);
908 spin_lock(&rvu->rsrc_lock);
909 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
910 txsch = &nix_hw->txsch[lvl];
911 req_schq = req->schq_contig[lvl] + req->schq[lvl];
913 /* There are only 28 TL1s */
914 if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
917 /* Check if request is valid */
918 if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
921 /* If contiguous queues are needed, check for availability */
922 if (req->schq_contig[lvl] &&
923 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
926 /* Check if full request can be accommodated */
927 if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
931 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
932 txsch = &nix_hw->txsch[lvl];
933 rsp->schq_contig[lvl] = req->schq_contig[lvl];
934 rsp->schq[lvl] = req->schq[lvl];
937 /* Alloc contiguous queues first */
938 if (req->schq_contig[lvl]) {
939 schq = rvu_alloc_rsrc_contig(&txsch->schq,
940 req->schq_contig[lvl]);
942 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
943 txsch->pfvf_map[schq] = pcifunc;
944 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
945 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
946 rsp->schq_contig_list[lvl][idx] = schq;
951 /* Alloc non-contiguous queues */
952 for (idx = 0; idx < req->schq[lvl]; idx++) {
953 schq = rvu_alloc_rsrc(&txsch->schq);
954 txsch->pfvf_map[schq] = pcifunc;
955 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
956 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
957 rsp->schq_list[lvl][idx] = schq;
962 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
964 spin_unlock(&rvu->rsrc_lock);
968 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
970 int blkaddr, nixlf, lvl, schq, err;
971 struct rvu_hwinfo *hw = rvu->hw;
972 struct nix_txsch *txsch;
973 struct nix_hw *nix_hw;
976 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
978 return NIX_AF_ERR_AF_LF_INVALID;
980 nix_hw = get_nix_hw(rvu->hw, blkaddr);
984 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
986 return NIX_AF_ERR_AF_LF_INVALID;
988 /* Disable TL2/3 queue links before SMQ flush*/
989 spin_lock(&rvu->rsrc_lock);
990 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
991 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
994 txsch = &nix_hw->txsch[lvl];
995 for (schq = 0; schq < txsch->schq.max; schq++) {
996 if (txsch->pfvf_map[schq] != pcifunc)
998 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1003 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1004 for (schq = 0; schq < txsch->schq.max; schq++) {
1005 if (txsch->pfvf_map[schq] != pcifunc)
1007 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1008 /* Do SMQ flush and set enqueue xoff */
1009 cfg |= BIT_ULL(50) | BIT_ULL(49);
1010 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1012 /* Wait for flush to complete */
1013 err = rvu_poll_reg(rvu, blkaddr,
1014 NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
1017 "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
1021 /* Now free scheduler queues to free pool */
1022 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1023 txsch = &nix_hw->txsch[lvl];
1024 for (schq = 0; schq < txsch->schq.max; schq++) {
1025 if (txsch->pfvf_map[schq] != pcifunc)
1027 rvu_free_rsrc(&txsch->schq, schq);
1028 txsch->pfvf_map[schq] = 0;
1031 spin_unlock(&rvu->rsrc_lock);
1033 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1034 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1035 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1037 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1042 int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
1043 struct nix_txsch_free_req *req,
1044 struct msg_rsp *rsp)
1046 return nix_txschq_free(rvu, req->hdr.pcifunc);
1049 static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1050 int lvl, u64 reg, u64 regval)
1052 u64 regbase = reg & 0xFFFF;
1055 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1058 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1059 /* Check if this schq belongs to this PF/VF or not */
1060 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1063 parent = (regval >> 16) & 0x1FF;
1064 /* Validate MDQ's TL4 parent */
1065 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1066 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1069 /* Validate TL4's TL3 parent */
1070 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1071 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1074 /* Validate TL3's TL2 parent */
1075 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1076 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1079 /* Validate TL2's TL1 parent */
1080 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1081 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1087 int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
1088 struct nix_txschq_config *req,
1089 struct msg_rsp *rsp)
1091 struct rvu_hwinfo *hw = rvu->hw;
1092 u16 pcifunc = req->hdr.pcifunc;
1093 u64 reg, regval, schq_regbase;
1094 struct nix_txsch *txsch;
1095 struct nix_hw *nix_hw;
1096 int blkaddr, idx, err;
1099 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1100 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1101 return NIX_AF_INVAL_TXSCHQ_CFG;
1103 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1105 return NIX_AF_ERR_AF_LF_INVALID;
1107 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1111 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1113 return NIX_AF_ERR_AF_LF_INVALID;
1115 txsch = &nix_hw->txsch[req->lvl];
1116 for (idx = 0; idx < req->num_regs; idx++) {
1117 reg = req->reg[idx];
1118 regval = req->regval[idx];
1119 schq_regbase = reg & 0xFFFF;
1121 if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
1122 txsch->lvl, reg, regval))
1123 return NIX_AF_INVAL_TXSCHQ_CFG;
1125 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1126 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1127 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1129 regval &= ~(0x7FULL << 24);
1130 regval |= ((u64)nixlf << 24);
1133 rvu_write64(rvu, blkaddr, reg, regval);
1135 /* Check for SMQ flush, if so, poll for its completion */
1136 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1137 (regval & BIT_ULL(49))) {
1138 err = rvu_poll_reg(rvu, blkaddr,
1139 reg, BIT_ULL(49), true);
1141 return NIX_AF_SMQ_FLUSH_FAILED;
1147 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1148 struct nix_vtag_config *req)
1152 #define NIX_VTAGTYPE_MAX 0x8ull
1153 #define NIX_VTAGSIZE_MASK 0x7ull
1154 #define NIX_VTAGSTRIP_CAP_MASK 0x30ull
1156 if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX ||
1157 req->vtag_size > VTAGSIZE_T8)
1160 regval = rvu_read64(rvu, blkaddr,
1161 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type));
1163 if (req->rx.strip_vtag && req->rx.capture_vtag)
1164 regval |= BIT_ULL(4) | BIT_ULL(5);
1165 else if (req->rx.strip_vtag)
1166 regval |= BIT_ULL(4);
1168 regval &= ~(BIT_ULL(4) | BIT_ULL(5));
1170 regval &= ~NIX_VTAGSIZE_MASK;
1171 regval |= req->vtag_size & NIX_VTAGSIZE_MASK;
1173 rvu_write64(rvu, blkaddr,
1174 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1178 int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
1179 struct nix_vtag_config *req,
1180 struct msg_rsp *rsp)
1182 struct rvu_hwinfo *hw = rvu->hw;
1183 u16 pcifunc = req->hdr.pcifunc;
1184 int blkaddr, nixlf, err;
1186 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1188 return NIX_AF_ERR_AF_LF_INVALID;
1190 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1192 return NIX_AF_ERR_AF_LF_INVALID;
1194 if (req->cfg_type) {
1195 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1197 return NIX_AF_ERR_PARAM;
1199 /* TODO: handle tx vtag configuration */
1206 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1207 u16 pcifunc, int next, bool eol)
1209 struct nix_aq_enq_req aq_req;
1212 aq_req.hdr.pcifunc = pcifunc;
1213 aq_req.ctype = NIX_AQ_CTYPE_MCE;
1217 /* Forward bcast pkts to RQ0, RSS not needed */
1219 aq_req.mce.index = 0;
1220 aq_req.mce.eol = eol;
1221 aq_req.mce.pf_func = pcifunc;
1222 aq_req.mce.next = next;
1224 /* All fields valid */
1225 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
1227 err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1229 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1230 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1236 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1237 u16 pcifunc, int idx, bool add)
1239 struct mce *mce, *tail = NULL;
1240 bool delete = false;
1242 /* Scan through the current list */
1243 hlist_for_each_entry(mce, &mce_list->head, node) {
1244 /* If already exists, then delete */
1245 if (mce->pcifunc == pcifunc && !add) {
1253 hlist_del(&mce->node);
1262 /* Add a new one to the list, at the tail */
1263 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
1267 mce->pcifunc = pcifunc;
1269 hlist_add_head(&mce->node, &mce_list->head);
1271 hlist_add_behind(&mce->node, &tail->node);
1276 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
1278 int err = 0, idx, next_idx, count;
1279 struct nix_mce_list *mce_list;
1280 struct mce *mce, *next_mce;
1281 struct nix_mcast *mcast;
1282 struct nix_hw *nix_hw;
1283 struct rvu_pfvf *pfvf;
1286 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1290 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1294 mcast = &nix_hw->mcast;
1296 /* Get this PF/VF func's MCE index */
1297 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1298 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
1300 mce_list = &pfvf->bcast_mce_list;
1301 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
1303 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
1304 __func__, idx, mce_list->max,
1305 pcifunc >> RVU_PFVF_PF_SHIFT);
1309 spin_lock(&mcast->mce_lock);
1311 err = nix_update_mce_list(mce_list, pcifunc, idx, add);
1315 /* Disable MCAM entry in NPC */
1317 if (!mce_list->count)
1319 count = mce_list->count;
1321 /* Dump the updated list to HW */
1322 hlist_for_each_entry(mce, &mce_list->head, node) {
1326 next_mce = hlist_entry(mce->node.next,
1328 next_idx = next_mce->idx;
1330 /* EOL should be set in last MCE */
1331 err = nix_setup_mce(rvu, mce->idx,
1332 NIX_AQ_INSTOP_WRITE, mce->pcifunc,
1333 next_idx, count ? false : true);
1339 spin_unlock(&mcast->mce_lock);
1343 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
1345 struct nix_mcast *mcast = &nix_hw->mcast;
1346 int err, pf, numvfs, idx;
1347 struct rvu_pfvf *pfvf;
1351 /* Skip PF0 (i.e AF) */
1352 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
1353 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1354 /* If PF is not enabled, nothing to do */
1355 if (!((cfg >> 20) & 0x01))
1357 /* Get numVFs attached to this PF */
1358 numvfs = (cfg >> 12) & 0xFF;
1360 pfvf = &rvu->pf[pf];
1361 /* Save the start MCE */
1362 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
1364 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
1366 for (idx = 0; idx < (numvfs + 1); idx++) {
1367 /* idx-0 is for PF, followed by VFs */
1368 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1370 /* Add dummy entries now, so that we don't have to check
1371 * for whether AQ_OP should be INIT/WRITE later on.
1372 * Will be updated when a NIXLF is attached/detached to
1375 err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
1385 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1387 struct nix_mcast *mcast = &nix_hw->mcast;
1388 struct rvu_hwinfo *hw = rvu->hw;
1391 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
1392 size = (1ULL << size);
1394 /* Alloc memory for multicast/mirror replication entries */
1395 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
1396 (256UL << MC_TBL_SIZE), size);
1400 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
1401 (u64)mcast->mce_ctx->iova);
1403 /* Set max list length equal to max no of VFs per PF + PF itself */
1404 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
1405 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
1407 /* Alloc memory for multicast replication buffers */
1408 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
1409 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
1410 (8UL << MC_BUF_CNT), size);
1414 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
1415 (u64)mcast->mcast_buf->iova);
1417 /* Alloc pkind for NIX internal RX multicast/mirror replay */
1418 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
1420 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
1421 BIT_ULL(63) | (mcast->replay_pkind << 24) |
1422 BIT_ULL(20) | MC_BUF_CNT);
1424 spin_lock_init(&mcast->mce_lock);
1426 return nix_setup_bcast_tables(rvu, nix_hw);
1429 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1431 struct nix_txsch *txsch;
1435 /* Get scheduler queue count of each type and alloc
1436 * bitmap for each for alloc/free/attach operations.
1438 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1439 txsch = &nix_hw->txsch[lvl];
1442 case NIX_TXSCH_LVL_SMQ:
1443 reg = NIX_AF_MDQ_CONST;
1445 case NIX_TXSCH_LVL_TL4:
1446 reg = NIX_AF_TL4_CONST;
1448 case NIX_TXSCH_LVL_TL3:
1449 reg = NIX_AF_TL3_CONST;
1451 case NIX_TXSCH_LVL_TL2:
1452 reg = NIX_AF_TL2_CONST;
1454 case NIX_TXSCH_LVL_TL1:
1455 reg = NIX_AF_TL1_CONST;
1458 cfg = rvu_read64(rvu, blkaddr, reg);
1459 txsch->schq.max = cfg & 0xFFFF;
1460 err = rvu_alloc_bitmap(&txsch->schq);
1464 /* Allocate memory for scheduler queues to
1465 * PF/VF pcifunc mapping info.
1467 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
1468 sizeof(u16), GFP_KERNEL);
1469 if (!txsch->pfvf_map)
1475 int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
1476 struct msg_rsp *rsp)
1478 struct rvu_hwinfo *hw = rvu->hw;
1479 u16 pcifunc = req->hdr.pcifunc;
1480 int i, nixlf, blkaddr;
1483 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1485 return NIX_AF_ERR_AF_LF_INVALID;
1487 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1489 return NIX_AF_ERR_AF_LF_INVALID;
1491 /* Get stats count supported by HW */
1492 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1494 /* Reset tx stats */
1495 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
1496 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
1498 /* Reset rx stats */
1499 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
1500 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
1505 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
1510 /* Start X2P bus calibration */
1511 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
1512 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
1513 /* Wait for calibration to complete */
1514 err = rvu_poll_reg(rvu, blkaddr,
1515 NIX_AF_STATUS, BIT_ULL(10), false);
1517 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
1521 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
1522 /* Check if CGX devices are ready */
1523 for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
1524 if (status & (BIT_ULL(16 + idx)))
1527 "CGX%d didn't respond to NIX X2P calibration\n", idx);
1531 /* Check if LBK is ready */
1532 if (!(status & BIT_ULL(19))) {
1534 "LBK didn't respond to NIX X2P calibration\n");
1538 /* Clear 'calibrate_x2p' bit */
1539 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
1540 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
1541 if (err || (status & 0x3FFULL))
1543 "NIX X2P calibration failed, status 0x%llx\n", status);
1549 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
1554 /* Set admin queue endianness */
1555 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
1558 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
1561 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
1564 /* Do not bypass NDC cache */
1565 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
1567 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
1569 /* Result structure can be followed by RQ/SQ/CQ context at
1570 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
1571 * operation type. Alloc sufficient result memory for all operations.
1573 err = rvu_aq_alloc(rvu, &block->aq,
1574 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
1575 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
1579 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
1580 rvu_write64(rvu, block->addr,
1581 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
1585 int rvu_nix_init(struct rvu *rvu)
1587 struct rvu_hwinfo *hw = rvu->hw;
1588 struct rvu_block *block;
1592 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
1595 block = &hw->block[blkaddr];
1597 /* Calibrate X2P bus to check if CGX/LBK links are fine */
1598 err = nix_calibrate_x2p(rvu, blkaddr);
1602 /* Set num of links of each type */
1603 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
1604 hw->cgx = (cfg >> 12) & 0xF;
1605 hw->lmac_per_cgx = (cfg >> 8) & 0xF;
1606 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
1610 /* Initialize admin queue */
1611 err = nix_aq_init(rvu, block);
1615 /* Restore CINT timer delay to HW reset values */
1616 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
1618 /* Configure segmentation offload formats */
1619 nix_setup_lso(rvu, blkaddr);
1621 if (blkaddr == BLKADDR_NIX0) {
1622 hw->nix0 = devm_kzalloc(rvu->dev,
1623 sizeof(struct nix_hw), GFP_KERNEL);
1627 err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
1631 err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
1635 /* Config Outer L2, IP, TCP and UDP's NPC layer info.
1636 * This helps HW protocol checker to identify headers
1637 * and validate length and checksums.
1639 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
1640 (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
1641 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
1642 (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
1643 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
1644 (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
1645 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
1646 (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
1651 void rvu_nix_freemem(struct rvu *rvu)
1653 struct rvu_hwinfo *hw = rvu->hw;
1654 struct rvu_block *block;
1655 struct nix_txsch *txsch;
1656 struct nix_mcast *mcast;
1657 struct nix_hw *nix_hw;
1660 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
1664 block = &hw->block[blkaddr];
1665 rvu_aq_free(rvu, block->aq);
1667 if (blkaddr == BLKADDR_NIX0) {
1668 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1672 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1673 txsch = &nix_hw->txsch[lvl];
1674 kfree(txsch->schq.bmap);
1677 mcast = &nix_hw->mcast;
1678 qmem_free(rvu->dev, mcast->mce_ctx);
1679 qmem_free(rvu->dev, mcast->mcast_buf);