diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r-- | drivers/infiniband/hw/mlx5/doorbell.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/gsi.c | 51 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 145 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 232 |
6 files changed, 263 insertions, 182 deletions
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c index 9ca2e61807ec..6398e2f48579 100644 --- a/drivers/infiniband/hw/mlx5/doorbell.c +++ b/drivers/infiniband/hw/mlx5/doorbell.c @@ -78,7 +78,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, list_add(&page->list, &context->db_page_list); found: - db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); + db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + + (virt & ~PAGE_MASK); db->u.user_page = page; ++page->refcnt; diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c index 7fcad9135276..3ad8f637c589 100644 --- a/drivers/infiniband/hw/mlx5/gsi.c +++ b/drivers/infiniband/hw/mlx5/gsi.c @@ -116,8 +116,6 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp, goto err_free_tx; } - mutex_lock(&dev->devr.mutex); - if (dev->devr.ports[port_num - 1].gsi) { mlx5_ib_warn(dev, "GSI QP already exists on port %d\n", port_num); @@ -147,35 +145,20 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp, hw_init_attr.cap.max_inline_data = 0; } - gsi->rx_qp = mlx5_ib_create_qp(pd, &hw_init_attr, NULL); + gsi->rx_qp = ib_create_qp(pd, &hw_init_attr); if (IS_ERR(gsi->rx_qp)) { mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n", PTR_ERR(gsi->rx_qp)); ret = PTR_ERR(gsi->rx_qp); goto err_destroy_cq; } - gsi->rx_qp->device = pd->device; - gsi->rx_qp->pd = pd; - gsi->rx_qp->real_qp = gsi->rx_qp; - - gsi->rx_qp->qp_type = hw_init_attr.qp_type; - gsi->rx_qp->send_cq = hw_init_attr.send_cq; - gsi->rx_qp->recv_cq = hw_init_attr.recv_cq; - gsi->rx_qp->event_handler = hw_init_attr.event_handler; - spin_lock_init(&gsi->rx_qp->mr_lock); - INIT_LIST_HEAD(&gsi->rx_qp->rdma_mrs); - INIT_LIST_HEAD(&gsi->rx_qp->sig_mrs); dev->devr.ports[attr->port_num - 1].gsi = gsi; - - mutex_unlock(&dev->devr.mutex); - return 0; err_destroy_cq: ib_free_cq(gsi->cq); err_free_wrs: - mutex_unlock(&dev->devr.mutex); kfree(gsi->outstanding_wrs); err_free_tx: kfree(gsi->tx_qps); @@ -190,16 +173,13 @@ int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp) int qp_index; int ret; - mutex_lock(&dev->devr.mutex); - ret = mlx5_ib_destroy_qp(gsi->rx_qp, NULL); + ret = ib_destroy_qp(gsi->rx_qp); if (ret) { mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n", ret); - mutex_unlock(&dev->devr.mutex); return ret; } dev->devr.ports[port_num - 1].gsi = NULL; - mutex_unlock(&dev->devr.mutex); gsi->rx_qp = NULL; for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) { @@ -213,8 +193,6 @@ int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp) kfree(gsi->outstanding_wrs); kfree(gsi->tx_qps); - kfree(mqp); - return 0; } @@ -339,23 +317,13 @@ err_destroy_qp: WARN_ON_ONCE(qp); } -static void setup_qps(struct mlx5_ib_gsi_qp *gsi) -{ - struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device); - u16 qp_index; - - mutex_lock(&dev->devr.mutex); - for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) - setup_qp(gsi, qp_index); - mutex_unlock(&dev->devr.mutex); -} - int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, int attr_mask) { struct mlx5_ib_dev *dev = to_mdev(qp->device); struct mlx5_ib_qp *mqp = to_mqp(qp); struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; + u16 qp_index; int ret; mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state); @@ -366,8 +334,11 @@ int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, return ret; } - if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS) - setup_qps(gsi); + if (to_mqp(gsi->rx_qp)->state != IB_QPS_RTS) + return 0; + + for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) + setup_qp(gsi, qp_index); return 0; } @@ -511,8 +482,8 @@ int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr, void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi) { - if (!gsi) - return; + u16 qp_index; - setup_qps(gsi); + for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) + setup_qp(gsi, qp_index); } diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 466f0a521940..8664bcf6d3f5 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1184,6 +1184,16 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; } + if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) { + resp.response_length += sizeof(resp.dci_streams_caps); + + resp.dci_streams_caps.max_log_num_concurent = + MLX5_CAP_GEN(mdev, log_max_dci_stream_channels); + + resp.dci_streams_caps.max_log_num_errored = + MLX5_CAP_GEN(mdev, log_max_dci_errored_streams); + } + if (uhw_outlen) { err = ib_copy_to_udata(uhw, &resp, resp.response_length); @@ -2501,6 +2511,13 @@ static void pkey_change_handler(struct work_struct *work) container_of(work, struct mlx5_ib_port_resources, pkey_change_work); + if (!ports->gsi) + /* + * We got this event before device was fully configured + * and MAD registration code wasn't called/finished yet. + */ + return; + mlx5_ib_gsi_pkey_change(ports->gsi); } @@ -2795,33 +2812,16 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) if (!MLX5_CAP_GEN(dev->mdev, xrc)) return -EOPNOTSUPP; - mutex_init(&devr->mutex); - - devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd); - if (!devr->p0) - return -ENOMEM; - - devr->p0->device = ibdev; - devr->p0->uobject = NULL; - atomic_set(&devr->p0->usecnt, 0); - - ret = mlx5_ib_alloc_pd(devr->p0, NULL); - if (ret) - goto error0; + devr->p0 = ib_alloc_pd(ibdev, 0); + if (IS_ERR(devr->p0)) + return PTR_ERR(devr->p0); - devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq); - if (!devr->c0) { - ret = -ENOMEM; + devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr); + if (IS_ERR(devr->c0)) { + ret = PTR_ERR(devr->c0); goto error1; } - devr->c0->device = &dev->ib_dev; - atomic_set(&devr->c0->usecnt, 0); - - ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL); - if (ret) - goto err_create_cq; - ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0); if (ret) goto error2; @@ -2836,45 +2836,22 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) attr.srq_type = IB_SRQT_XRC; attr.ext.cq = devr->c0; - devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq); - if (!devr->s0) { - ret = -ENOMEM; - goto error4; - } - - devr->s0->device = &dev->ib_dev; - devr->s0->pd = devr->p0; - devr->s0->srq_type = IB_SRQT_XRC; - devr->s0->ext.cq = devr->c0; - ret = mlx5_ib_create_srq(devr->s0, &attr, NULL); - if (ret) + devr->s0 = ib_create_srq(devr->p0, &attr); + if (IS_ERR(devr->s0)) { + ret = PTR_ERR(devr->s0); goto err_create; - - atomic_inc(&devr->s0->ext.cq->usecnt); - atomic_inc(&devr->p0->usecnt); - atomic_set(&devr->s0->usecnt, 0); + } memset(&attr, 0, sizeof(attr)); attr.attr.max_sge = 1; attr.attr.max_wr = 1; attr.srq_type = IB_SRQT_BASIC; - devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq); - if (!devr->s1) { - ret = -ENOMEM; - goto error5; - } - - devr->s1->device = &dev->ib_dev; - devr->s1->pd = devr->p0; - devr->s1->srq_type = IB_SRQT_BASIC; - devr->s1->ext.cq = devr->c0; - ret = mlx5_ib_create_srq(devr->s1, &attr, NULL); - if (ret) + devr->s1 = ib_create_srq(devr->p0, &attr); + if (IS_ERR(devr->s1)) { + ret = PTR_ERR(devr->s1); goto error6; - - atomic_inc(&devr->p0->usecnt); - atomic_set(&devr->s1->usecnt, 0); + } for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) INIT_WORK(&devr->ports[port].pkey_change_work, @@ -2883,23 +2860,15 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) return 0; error6: - kfree(devr->s1); -error5: - mlx5_ib_destroy_srq(devr->s0, NULL); + ib_destroy_srq(devr->s0); err_create: - kfree(devr->s0); -error4: mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0); error3: mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); error2: - mlx5_ib_destroy_cq(devr->c0, NULL); -err_create_cq: - kfree(devr->c0); + ib_destroy_cq(devr->c0); error1: - mlx5_ib_dealloc_pd(devr->p0, NULL); -error0: - kfree(devr->p0); + ib_dealloc_pd(devr->p0); return ret; } @@ -2908,20 +2877,21 @@ static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev) struct mlx5_ib_resources *devr = &dev->devr; int port; - mlx5_ib_destroy_srq(devr->s1, NULL); - kfree(devr->s1); - mlx5_ib_destroy_srq(devr->s0, NULL); - kfree(devr->s0); - mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0); - mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); - mlx5_ib_destroy_cq(devr->c0, NULL); - kfree(devr->c0); - mlx5_ib_dealloc_pd(devr->p0, NULL); - kfree(devr->p0); - - /* Make sure no change P_Key work items are still executing */ + /* + * Make sure no change P_Key work items are still executing. + * + * At this stage, the mlx5_ib_event should be unregistered + * and it ensures that no new works are added. + */ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) cancel_work_sync(&devr->ports[port].pkey_change_work); + + ib_destroy_srq(devr->s1); + ib_destroy_srq(devr->s0); + mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0); + mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); + ib_destroy_cq(devr->c0); + ib_dealloc_pd(devr->p0); } static u32 get_core_cap_flags(struct ib_device *ibdev, @@ -3799,6 +3769,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs), INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp), INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), }; @@ -4061,7 +4032,7 @@ static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) mlx5_ib_warn(dev, "mr cache cleanup failed\n"); if (dev->umrc.qp) - mlx5_ib_destroy_qp(dev->umrc.qp, NULL); + ib_destroy_qp(dev->umrc.qp); if (dev->umrc.cq) ib_free_cq(dev->umrc.cq); if (dev->umrc.pd) @@ -4114,23 +4085,17 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) init_attr->cap.max_send_sge = 1; init_attr->qp_type = MLX5_IB_QPT_REG_UMR; init_attr->port_num = 1; - qp = mlx5_ib_create_qp(pd, init_attr, NULL); + qp = ib_create_qp(pd, init_attr); if (IS_ERR(qp)) { mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); ret = PTR_ERR(qp); goto error_3; } - qp->device = &dev->ib_dev; - qp->real_qp = qp; - qp->uobject = NULL; - qp->qp_type = MLX5_IB_QPT_REG_UMR; - qp->send_cq = init_attr->send_cq; - qp->recv_cq = init_attr->recv_cq; attr->qp_state = IB_QPS_INIT; attr->port_num = 1; - ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | - IB_QP_PORT, NULL); + ret = ib_modify_qp(qp, attr, + IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); goto error_4; @@ -4140,7 +4105,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) attr->qp_state = IB_QPS_RTR; attr->path_mtu = IB_MTU_256; - ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); + ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); goto error_4; @@ -4148,7 +4113,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) memset(attr, 0, sizeof(*attr)); attr->qp_state = IB_QPS_RTS; - ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); + ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); goto error_4; @@ -4171,7 +4136,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) return 0; error_4: - mlx5_ib_destroy_qp(qp, NULL); + ib_destroy_qp(qp); dev->umrc.qp = NULL; error_3: diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 585fb00bdce8..bf20a388eabe 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -795,8 +795,6 @@ struct mlx5_ib_resources { struct ib_srq *s0; struct ib_srq *s1; struct mlx5_ib_port_resources ports[2]; - /* Protects changes to the port resources */ - struct mutex mutex; }; struct mlx5_ib_counters { @@ -1221,9 +1219,8 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr); int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); -struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *init_attr, - struct ib_udata *udata); +int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 3f1c5a4f158b..3be36ebbf67a 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -995,7 +995,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask) { const size_t xlt_chunk_align = - MLX5_UMR_MTT_ALIGNMENT / sizeof(ent_size); + MLX5_UMR_MTT_ALIGNMENT / ent_size; size_t size; void *res = NULL; @@ -1024,7 +1024,7 @@ static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask) if (size > MLX5_SPARE_UMR_CHUNK) { size = MLX5_SPARE_UMR_CHUNK; - *nents = get_order(size) / ent_size; + *nents = size / ent_size; res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, get_order(size)); if (res) @@ -1226,7 +1226,8 @@ int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags) orig_sg_length = sg.length; cur_mtt = mtt; - rdma_for_each_block (mr->umem->sg_head.sgl, &biter, mr->umem->nmap, + rdma_for_each_block (mr->umem->sgt_append.sgt.sgl, &biter, + mr->umem->sgt_append.sgt.nents, BIT(mr->page_shift)) { if (cur_mtt == (void *)mtt + sg.length) { dma_sync_single_for_device(ddev, sg.addr, sg.length, diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index a77db29f8391..b2fca110346c 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1906,7 +1906,6 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev, static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct mlx5_create_qp_params *params) { - struct mlx5_ib_create_qp *ucmd = params->ucmd; struct ib_qp_init_attr *attr = params->attr; u32 uidx = params->uidx; struct mlx5_ib_resources *devr = &dev->devr; @@ -1926,8 +1925,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (!in) return -ENOMEM; - if (MLX5_CAP_GEN(mdev, ece_support) && ucmd) - MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC); @@ -1982,6 +1979,167 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, return 0; } +static int create_dci(struct mlx5_ib_dev *dev, struct ib_pd *pd, + struct mlx5_ib_qp *qp, + struct mlx5_create_qp_params *params) +{ + struct ib_qp_init_attr *init_attr = params->attr; + struct mlx5_ib_create_qp *ucmd = params->ucmd; + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; + struct ib_udata *udata = params->udata; + u32 uidx = params->uidx; + struct mlx5_ib_resources *devr = &dev->devr; + int inlen = MLX5_ST_SZ_BYTES(create_qp_in); + struct mlx5_core_dev *mdev = dev->mdev; + struct mlx5_ib_cq *send_cq; + struct mlx5_ib_cq *recv_cq; + unsigned long flags; + struct mlx5_ib_qp_base *base; + int ts_format; + int mlx5_st; + void *qpc; + u32 *in; + int err; + + spin_lock_init(&qp->sq.lock); + spin_lock_init(&qp->rq.lock); + + mlx5_st = to_mlx5_st(qp->type); + if (mlx5_st < 0) + return -EINVAL; + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; + + base = &qp->trans_qp.base; + + qp->has_rq = qp_has_rq(init_attr); + err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd); + if (err) { + mlx5_ib_dbg(dev, "err %d\n", err); + return err; + } + + if (ucmd->rq_wqe_shift != qp->rq.wqe_shift || + ucmd->rq_wqe_count != qp->rq.wqe_cnt) + return -EINVAL; + + if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz))) + return -EINVAL; + + ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq)); + + if (ts_format < 0) + return ts_format; + + err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, ¶ms->resp, + &inlen, base, ucmd); + if (err) + return err; + + if (MLX5_CAP_GEN(mdev, ece_support)) + MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + + MLX5_SET(qpc, qpc, st, mlx5_st); + MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); + MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn); + + if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) + MLX5_SET(qpc, qpc, wq_signature, 1); + + if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) + MLX5_SET(qpc, qpc, cd_master, 1); + if (qp->flags & IB_QP_CREATE_MANAGED_SEND) + MLX5_SET(qpc, qpc, cd_slave_send, 1); + if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) + configure_requester_scat_cqe(dev, qp, init_attr, qpc); + + if (qp->rq.wqe_cnt) { + MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); + MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); + } + + if (qp->flags_en & MLX5_QP_FLAG_DCI_STREAM) { + MLX5_SET(qpc, qpc, log_num_dci_stream_channels, + ucmd->dci_streams.log_num_concurent); + MLX5_SET(qpc, qpc, log_num_dci_errored_streams, + ucmd->dci_streams.log_num_errored); + } + + MLX5_SET(qpc, qpc, ts_format, ts_format); + MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); + + MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); + + /* Set default resources */ + if (init_attr->srq) { + MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0); + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, + to_msrq(init_attr->srq)->msrq.srqn); + } else { + MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1); + MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, + to_msrq(devr->s1)->msrq.srqn); + } + + if (init_attr->send_cq) + MLX5_SET(qpc, qpc, cqn_snd, + to_mcq(init_attr->send_cq)->mcq.cqn); + + if (init_attr->recv_cq) + MLX5_SET(qpc, qpc, cqn_rcv, + to_mcq(init_attr->recv_cq)->mcq.cqn); + + MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); + + /* 0xffffff means we ask to work with cqe version 0 */ + if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) + MLX5_SET(qpc, qpc, user_index, uidx); + + if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) { + MLX5_SET(qpc, qpc, end_padding_mode, + MLX5_WQ_END_PAD_MODE_ALIGN); + /* Special case to clean flag */ + qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; + } + + err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out); + + kvfree(in); + if (err) + goto err_create; + + base->container_mibqp = qp; + base->mqp.event = mlx5_ib_qp_event; + if (MLX5_CAP_GEN(mdev, ece_support)) + params->resp.ece_options = MLX5_GET(create_qp_out, out, ece); + + get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq, + &send_cq, &recv_cq); + spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); + mlx5_ib_lock_cqs(send_cq, recv_cq); + /* Maintain device to QPs access, needed for further handling via reset + * flow + */ + list_add_tail(&qp->qps_list, &dev->qp_list); + /* Maintain CQ to QPs access, needed for further handling via reset flow + */ + if (send_cq) + list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); + if (recv_cq) + list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); + mlx5_ib_unlock_cqs(send_cq, recv_cq); + spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); + + return 0; + +err_create: + destroy_qp(dev, qp, base, udata); + return err; +} + static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, struct mlx5_create_qp_params *params) @@ -2512,7 +2670,6 @@ static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd, } qp->state = IB_QPS_RESET; - rdma_restrack_no_track(&qp->ibqp.res); return 0; } @@ -2653,6 +2810,9 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp); process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp); + process_vendor_flag(dev, &flags, MLX5_QP_FLAG_DCI_STREAM, + MLX5_CAP_GEN(mdev, log_max_dci_stream_channels), + qp); process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp); process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE, @@ -2847,6 +3007,10 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, switch (qp->type) { case MLX5_IB_QPT_DCT: err = create_dct(dev, pd, qp, params); + rdma_restrack_no_track(&qp->ibqp.res); + break; + case MLX5_IB_QPT_DCI: + err = create_dci(dev, pd, qp, params); break; case IB_QPT_XRC_TGT: err = create_xrc_tgt_qp(dev, qp, params); @@ -2854,6 +3018,10 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, case IB_QPT_GSI: err = mlx5_ib_create_gsi(pd, qp, params->attr); break; + case MLX5_IB_QPT_HW_GSI: + case MLX5_IB_QPT_REG_UMR: + rdma_restrack_no_track(&qp->ibqp.res); + fallthrough; default: if (params->udata) err = create_user_qp(dev, pd, qp, params); @@ -2942,7 +3110,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) } kfree(mqp->dct.in); - kfree(mqp); return 0; } @@ -2980,25 +3147,23 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev, return ret ? 0 : -EINVAL; } -struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, - struct ib_udata *udata) +int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, + struct ib_udata *udata) { struct mlx5_create_qp_params params = {}; - struct mlx5_ib_dev *dev; - struct mlx5_ib_qp *qp; + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + struct mlx5_ib_qp *qp = to_mqp(ibqp); + struct ib_pd *pd = ibqp->pd; enum ib_qp_type type; int err; - dev = pd ? to_mdev(pd->device) : - to_mdev(to_mxrcd(attr->xrcd)->ibxrcd.device); - err = check_qp_type(dev, attr, &type); if (err) - return ERR_PTR(err); + return err; err = check_valid_flow(dev, pd, attr, udata); if (err) - return ERR_PTR(err); + return err; params.udata = udata; params.uidx = MLX5_IB_DEFAULT_UIDX; @@ -3008,49 +3173,43 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, if (udata) { err = process_udata_size(dev, ¶ms); if (err) - return ERR_PTR(err); + return err; err = check_ucmd_data(dev, ¶ms); if (err) - return ERR_PTR(err); + return err; params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL); if (!params.ucmd) - return ERR_PTR(-ENOMEM); + return -ENOMEM; err = ib_copy_from_udata(params.ucmd, udata, params.inlen); if (err) goto free_ucmd; } - qp = kzalloc(sizeof(*qp), GFP_KERNEL); - if (!qp) { - err = -ENOMEM; - goto free_ucmd; - } - mutex_init(&qp->mutex); qp->type = type; if (udata) { err = process_vendor_flags(dev, qp, params.ucmd, attr); if (err) - goto free_qp; + goto free_ucmd; err = get_qp_uidx(qp, ¶ms); if (err) - goto free_qp; + goto free_ucmd; } err = process_create_flags(dev, qp, attr); if (err) - goto free_qp; + goto free_ucmd; err = check_qp_attr(dev, qp, attr); if (err) - goto free_qp; + goto free_ucmd; err = create_qp(dev, pd, qp, ¶ms); if (err) - goto free_qp; + goto free_ucmd; kfree(params.ucmd); params.ucmd = NULL; @@ -3065,7 +3224,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, if (err) goto destroy_qp; - return &qp->ibqp; + return 0; destroy_qp: switch (qp->type) { @@ -3076,22 +3235,12 @@ destroy_qp: mlx5_ib_destroy_gsi(qp); break; default: - /* - * These lines below are temp solution till QP allocation - * will be moved to be under IB/core responsiblity. - */ - qp->ibqp.send_cq = attr->send_cq; - qp->ibqp.recv_cq = attr->recv_cq; - qp->ibqp.pd = pd; destroy_qp_common(dev, qp, udata); } - qp = NULL; -free_qp: - kfree(qp); free_ucmd: kfree(params.ucmd); - return ERR_PTR(err); + return err; } int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) @@ -3106,9 +3255,6 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) return mlx5_ib_destroy_dct(mqp); destroy_qp_common(dev, mqp, udata); - - kfree(mqp); - return 0; } |