Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 20 Jun 2018 22:22:30 +0000 (07:22 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 20 Jun 2018 22:22:30 +0000 (07:22 +0900)
Pull rdma fixes from Jason Gunthorpe:
 "Here are eight fairly small fixes collected over the last two weeks.

  Regression and crashing bug fixes:

   - mlx4/5: Fixes for issues found from various checkers

   - A resource tracking and uverbs regression in the core code

   - qedr: NULL pointer regression found during testing

   - rxe: Various small bugs"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  IB/rxe: Fix missing completion for mem_reg work requests
  RDMA/core: Save kernel caller name when creating CQ using ib_create_cq()
  IB/uverbs: Fix ordering of ucontext check in ib_uverbs_write
  IB/mlx4: Fix an error handling path in 'mlx4_ib_rereg_user_mr()'
  RDMA/qedr: Fix NULL pointer dereference when running over iWARP without RDMA-CM
  IB/mlx5: Fix return value check in flow_counters_set_data()
  IB/mlx5: Fix memory leak in mlx5_ib_create_flow
  IB/rxe: avoid double kfree skb

1  2 
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/qedr/verbs.c
include/rdma/ib_verbs.h

index e52dd21519b45ff00268ae33c21816a8b5a96b53,645fc69997bc7422548857b411aa1ef63dc683e7..e3e330f59c2c01216f3e8e90a161c06c73c5e4ec
@@@ -3199,8 -3199,8 +3199,8 @@@ static int flow_counters_set_data(struc
        if (!mcounters->hw_cntrs_hndl) {
                mcounters->hw_cntrs_hndl = mlx5_fc_create(
                        to_mdev(ibcounters->device)->mdev, false);
-               if (!mcounters->hw_cntrs_hndl) {
-                       ret = -ENOMEM;
+               if (IS_ERR(mcounters->hw_cntrs_hndl)) {
+                       ret = PTR_ERR(mcounters->hw_cntrs_hndl);
                        goto free;
                }
                hw_hndl = true;
@@@ -3546,29 -3546,35 +3546,35 @@@ static struct ib_flow *mlx5_ib_create_f
                        return ERR_PTR(-ENOMEM);
  
                err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
-               if (err) {
-                       kfree(ucmd);
-                       return ERR_PTR(err);
-               }
+               if (err)
+                       goto free_ucmd;
        }
  
-       if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
-               return ERR_PTR(-ENOMEM);
+       if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
+               err = -ENOMEM;
+               goto free_ucmd;
+       }
  
        if (domain != IB_FLOW_DOMAIN_USER ||
            flow_attr->port > dev->num_ports ||
            (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
-                                 IB_FLOW_ATTR_FLAGS_EGRESS)))
-               return ERR_PTR(-EINVAL);
+                                 IB_FLOW_ATTR_FLAGS_EGRESS))) {
+               err = -EINVAL;
+               goto free_ucmd;
+       }
  
        if (is_egress &&
            (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
-            flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT))
-               return ERR_PTR(-EINVAL);
+            flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
+               err = -EINVAL;
+               goto free_ucmd;
+       }
  
        dst = kzalloc(sizeof(*dst), GFP_KERNEL);
-       if (!dst)
-               return ERR_PTR(-ENOMEM);
+       if (!dst) {
+               err = -ENOMEM;
+               goto free_ucmd;
+       }
  
        mutex_lock(&dev->flow_db->lock);
  
@@@ -3637,8 -3643,8 +3643,8 @@@ destroy_ft
  unlock:
        mutex_unlock(&dev->flow_db->lock);
        kfree(dst);
+ free_ucmd:
        kfree(ucmd);
-       kfree(handler);
        return ERR_PTR(err);
  }
  
@@@ -5054,7 -5060,7 +5060,7 @@@ mlx5_ib_get_vector_affinity(struct ib_d
  {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
  
 -      return mlx5_get_vector_affinity(dev->mdev, comp_vector);
 +      return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
  }
  
  /* The mlx5_ib_multiport_mutex should be held when calling this function */
index f7ac8fc9b531d7550fb0b41233b55e0bec51b4ff,f9b198455fc9c23a61cdff7e5ed9bf462af8008d..f07b8df96f43954e67d4dfc32148e96a751e6974
@@@ -1614,7 -1614,7 +1614,7 @@@ static int qedr_create_kernel_qp(struc
        qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
                              dev->attr.max_sqe);
  
 -      qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
 +      qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
                                GFP_KERNEL);
        if (!qp->wqe_wr_id) {
                DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
        qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
  
        /* Allocate driver internal RQ array */
 -      qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
 +      qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
                                GFP_KERNEL);
        if (!qp->rqe_wr_id) {
                DP_ERR(dev,
@@@ -1957,6 -1957,9 +1957,9 @@@ int qedr_modify_qp(struct ib_qp *ibqp, 
        }
  
        if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+               if (rdma_protocol_iwarp(&dev->ibdev, 1))
+                       return -EINVAL;
                if (attr_mask & IB_QP_PATH_MTU) {
                        if (attr->path_mtu < IB_MTU_256 ||
                            attr->path_mtu > IB_MTU_4096) {
@@@ -3276,7 -3279,7 +3279,7 @@@ int qedr_post_recv(struct ib_qp *ibqp, 
                                SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
                                          wr->num_sge);
  
 -                      SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
 +                      SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
                                  wr->sg_list[i].lkey);
  
                        RQ_SGE_SET(rqe, wr->sg_list[i].addr,
                        /* First one must include the number
                         * of SGE in the list
                         */
 -                      SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
 +                      SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
                        SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
  
                        RQ_SGE_SET(rqe, 0, 0, flags);
diff --combined include/rdma/ib_verbs.h
index 4c6241bc203931dcc6b74de5be72349e741cb6be,4f71d6a073bae92771e3f6df796672532a637d07..6c003995347a3904cda6e57814c50bcf6c0733a7
@@@ -2093,7 -2093,10 +2093,7 @@@ struct ib_flow_attr 
        u32          flags;
        u8           num_of_specs;
        u8           port;
 -      /* Following are the optional layers according to user request
 -       * struct ib_flow_spec_xxx
 -       * struct ib_flow_spec_yyy
 -       */
 +      union ib_flow_spec flows[];
  };
  
  struct ib_flow {
@@@ -3391,11 -3394,14 +3391,14 @@@ int ib_process_cq_direct(struct ib_cq *
   *
   * Users can examine the cq structure to determine the actual CQ size.
   */
- struct ib_cq *ib_create_cq(struct ib_device *device,
-                          ib_comp_handler comp_handler,
-                          void (*event_handler)(struct ib_event *, void *),
-                          void *cq_context,
-                          const struct ib_cq_init_attr *cq_attr);
+ struct ib_cq *__ib_create_cq(struct ib_device *device,
+                            ib_comp_handler comp_handler,
+                            void (*event_handler)(struct ib_event *, void *),
+                            void *cq_context,
+                            const struct ib_cq_init_attr *cq_attr,
+                            const char *caller);
+ #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
+       __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
  
  /**
   * ib_resize_cq - Modifies the capacity of the CQ.