diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 487 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.h | 20 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmmii.c | 6 | ||||
-rw-r--r-- | drivers/net/mdio/mdio-bcm-unimac.c | 93 | ||||
-rw-r--r-- | drivers/net/phy/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/phy/marvell-88q2xxx.c | 640 | ||||
-rw-r--r-- | drivers/net/phy/mxl-gpy.c | 20 | ||||
-rw-r--r-- | drivers/net/phy/phy-c45.c | 3 |
9 files changed, 993 insertions, 281 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6f415425dc14..9968d67e6c77 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4211,8 +4211,12 @@ static int bnxt_alloc_vnics(struct bnxt *bp) int num_vnics = 1; #ifdef CONFIG_RFS_ACCEL - if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS) - num_vnics += bp->rx_nr_rings; + if (bp->flags & BNXT_FLAG_RFS) { + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + num_vnics++; + else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + num_vnics += bp->rx_nr_rings; + } #endif if (BNXT_CHIP_TYPE_NITRO_A0(bp)) @@ -4229,6 +4233,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp) static void bnxt_init_vnics(struct bnxt *bp) { + struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; int i; for (i = 0; i < bp->nr_vnics; i++) { @@ -4242,7 +4247,7 @@ static void bnxt_init_vnics(struct bnxt *bp) vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; if (bp->vnic_info[i].rss_hash_key) { - if (!i) { + if (i == BNXT_VNIC_DEFAULT) { u8 *key = (void *)vnic->rss_hash_key; int k; @@ -4268,8 +4273,7 @@ static void bnxt_init_vnics(struct bnxt *bp) bp->toeplitz_prefix |= key[k]; } } else { - memcpy(vnic->rss_hash_key, - bp->vnic_info[0].rss_hash_key, + memcpy(vnic->rss_hash_key, vnic0->rss_hash_key, HW_HASH_KEY_SIZE); } } @@ -5000,6 +5004,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) { + struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; int i, j, rc, size, arr_size; void *bnapi; @@ -5128,8 +5133,12 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) if (rc) goto alloc_mem_err; - bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | - BNXT_VNIC_UCAST_FLAG; + vnic0->flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | + BNXT_VNIC_UCAST_FLAG; + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS)) + bp->vnic_info[BNXT_VNIC_NTUPLE].flags |= + BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG; + rc = bnxt_alloc_vnic_attributes(bp); if (rc) goto alloc_mem_err; @@ -5776,6 +5785,29 @@ void bnxt_fill_ipv6_mask(__be32 mask[4]) mask[i] = cpu_to_be32(~0); } +static void +bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, + struct hwrm_cfa_ntuple_filter_alloc_input *req, + u16 rxq) +{ + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { + struct bnxt_vnic_info *vnic; + u32 enables; + + vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); + enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; + req->enables |= cpu_to_le32(enables); + req->rfs_ring_tbl_idx = cpu_to_le16(rxq); + } else { + u32 flags; + + flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; + req->flags |= cpu_to_le32(flags); + req->dst_id = cpu_to_le16(rxq); + } +} + int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { @@ -5785,7 +5817,6 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct flow_keys *keys = &fltr->fkeys; struct bnxt_l2_filter *l2_fltr; struct bnxt_vnic_info *vnic; - u32 flags = 0; int rc; rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); @@ -5796,16 +5827,15 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req->l2_filter_id = l2_fltr->base.filter_id; if (fltr->base.flags & BNXT_ACT_DROP) { - flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP; + req->flags = + cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { - flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; - req->dst_id = cpu_to_le16(fltr->base.rxq); + bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq); } else { vnic = &bp->vnic_info[fltr->base.rxq + 1]; req->dst_id = cpu_to_le16(vnic->fw_vnic_id); } - req->flags = cpu_to_le32(flags); - req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); + req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS); req->ethertype = htons(ETH_P_IP); req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; @@ -6085,7 +6115,10 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, for (i = 0; i < tbl_size; i++) { u16 ring_id, j; - j = bp->rss_indir_tbl[i]; + if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) + j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); + else + j = bp->rss_indir_tbl[i]; rxr = &bp->rx_ring[j]; ring_id = rxr->rx_ring_struct.fw_ring_id; @@ -6178,7 +6211,7 @@ exit: static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; struct hwrm_vnic_rss_qcfg_output *resp; struct hwrm_vnic_rss_qcfg_input *req; @@ -6282,6 +6315,7 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { + struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_cfg_input *req; unsigned int ring = 0, grp_idx; @@ -6311,8 +6345,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { - req->rss_rule = - cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); + req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]); req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); @@ -6409,7 +6442,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, vnic_no_ring_grps: for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; - if (vnic_id == 0) + if (vnic_id == BNXT_VNIC_DEFAULT) req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); resp = hwrm_req_hold(bp, req); @@ -7043,6 +7076,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) hw_resc->resv_hw_ring_grps = le32_to_cpu(resp->alloc_hw_ring_grps); hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); + hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx); cp = le16_to_cpu(resp->alloc_cmpl_rings); stats = le16_to_cpu(resp->alloc_stat_ctx); hw_resc->resv_irqs = cp; @@ -7098,8 +7132,7 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) static bool bnxt_rfs_supported(struct bnxt *bp); static struct hwrm_func_cfg_input * -__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, int vnics) +__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_cfg_input *req; u32 enables = 0; @@ -7108,52 +7141,42 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return NULL; req->fid = cpu_to_le16(0xffff); - enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - req->num_tx_rings = cpu_to_le16(tx_rings); + enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + req->num_tx_rings = cpu_to_le16(hwr->tx); if (BNXT_NEW_RM(bp)) { - enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; - enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; - enables |= tx_rings + ring_grps ? + enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; + enables |= hwr->cp_p5 ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; - enables |= rx_rings ? - FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; } else { - enables |= cp_rings ? + enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; - enables |= ring_grps ? - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | - FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; - } - enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; - - req->num_rx_rings = cpu_to_le16(rx_rings); + enables |= hwr->grp ? + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + } + enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; + enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : + 0; + req->num_rx_rings = cpu_to_le16(hwr->rx); + req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps); - - req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); - req->num_msix = cpu_to_le16(cp_rings); - req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); + req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); + req->num_msix = cpu_to_le16(hwr->cp); } else { - req->num_cmpl_rings = cpu_to_le16(cp_rings); - req->num_hw_ring_grps = cpu_to_le16(ring_grps); - req->num_rsscos_ctxs = cpu_to_le16(1); - if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && - bnxt_rfs_supported(bp)) - req->num_rsscos_ctxs = - cpu_to_le16(ring_grps + 1); + req->num_cmpl_rings = cpu_to_le16(hwr->cp); + req->num_hw_ring_grps = cpu_to_le16(hwr->grp); } - req->num_stat_ctxs = cpu_to_le16(stats); - req->num_vnics = cpu_to_le16(vnics); + req->num_stat_ctxs = cpu_to_le16(hwr->stat); + req->num_vnics = cpu_to_le16(hwr->vnic); } req->enables = cpu_to_le32(enables); return req; } static struct hwrm_func_vf_cfg_input * -__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, int vnics) +__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_vf_cfg_input *req; u32 enables = 0; @@ -7161,51 +7184,46 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) return NULL; - enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; - enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - enables |= tx_rings + ring_grps ? + enables |= hwr->cp_p5 ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; } else { - enables |= cp_rings ? - FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; - enables |= ring_grps ? + enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; + enables |= hwr->grp ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; } - enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req->num_tx_rings = cpu_to_le16(tx_rings); - req->num_rx_rings = cpu_to_le16(rx_rings); + req->num_tx_rings = cpu_to_le16(hwr->tx); + req->num_rx_rings = cpu_to_le16(hwr->rx); + req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps); - - req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); - req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); + req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); } else { - req->num_cmpl_rings = cpu_to_le16(cp_rings); - req->num_hw_ring_grps = cpu_to_le16(ring_grps); - req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); + req->num_cmpl_rings = cpu_to_le16(hwr->cp); + req->num_hw_ring_grps = cpu_to_le16(hwr->grp); } - req->num_stat_ctxs = cpu_to_le16(stats); - req->num_vnics = cpu_to_le16(vnics); + req->num_stat_ctxs = cpu_to_le16(hwr->stat); + req->num_vnics = cpu_to_le16(hwr->vnic); req->enables = cpu_to_le32(enables); return req; } static int -bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, int vnics) +bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_cfg_input *req; int rc; - req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); if (!req) return -ENOMEM; @@ -7219,25 +7237,23 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return rc; if (bp->hwrm_spec_code < 0x10601) - bp->hw_resc.resv_tx_rings = tx_rings; + bp->hw_resc.resv_tx_rings = hwr->tx; return bnxt_hwrm_get_rings(bp); } static int -bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, int vnics) +bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_vf_cfg_input *req; int rc; if (!BNXT_NEW_RM(bp)) { - bp->hw_resc.resv_tx_rings = tx_rings; + bp->hw_resc.resv_tx_rings = hwr->tx; return 0; } - req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); if (!req) return -ENOMEM; @@ -7248,15 +7264,12 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return bnxt_hwrm_get_rings(bp); } -static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, - int cp, int stat, int vnic) +static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { if (BNXT_PF(bp)) - return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, - vnic); + return bnxt_hwrm_reserve_pf_rings(bp, hwr); else - return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, - vnic); + return bnxt_hwrm_reserve_vf_rings(bp, hwr); } int bnxt_nq_rings_in_use(struct bnxt *bp) @@ -7299,6 +7312,24 @@ static int bnxt_get_func_stat_ctxs(struct bnxt *bp) return cp + ulp_stat; } +static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + if (!hwr->grp) + return 0; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp); + + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + rss_ctx *= hwr->vnic; + return rss_ctx; + } + if (BNXT_VF(bp)) + return BNXT_VF_MAX_RSS_CTX; + if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp)) + return hwr->grp + 1; + return 1; +} + /* Check if a default RSS map needs to be setup. This function is only * used on older firmware that does not require reserving RX rings. */ @@ -7314,13 +7345,24 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) } } +static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) +{ + if (bp->flags & BNXT_FLAG_RFS) { + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + return 2; + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return rx_rings + 1; + } + return 1; +} + static bool bnxt_need_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int cp = bnxt_cp_rings_in_use(bp); int nq = bnxt_nq_rings_in_use(bp); int rx = bp->rx_nr_rings, stat; - int vnic = 1, grp = rx; + int vnic, grp = rx; if (hw_resc->resv_tx_rings != bp->tx_nr_rings && bp->hwrm_spec_code >= 0x10601) @@ -7335,9 +7377,9 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) bnxt_check_rss_tbl_no_rmgr(bp); return false; } - if ((bp->flags & BNXT_FLAG_RFS) && - !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) - vnic = rx + 1; + + vnic = bnxt_get_total_vnics(bp, rx); + if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; stat = bnxt_get_func_stat_ctxs(bp); @@ -7352,47 +7394,65 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) return false; } -static int __bnxt_reserve_rings(struct bnxt *bp) +static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - int cp = bnxt_nq_rings_in_use(bp); - int tx = bp->tx_nr_rings; - int rx = bp->rx_nr_rings; - int grp, rx_rings, rc; - int vnic = 1, stat; + + hwr->tx = hw_resc->resv_tx_rings; + if (BNXT_NEW_RM(bp)) { + hwr->rx = hw_resc->resv_rx_rings; + hwr->cp = hw_resc->resv_irqs; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + hwr->cp_p5 = hw_resc->resv_cp_rings; + hwr->grp = hw_resc->resv_hw_ring_grps; + hwr->vnic = hw_resc->resv_vnics; + hwr->stat = hw_resc->resv_stat_ctxs; + hwr->rss_ctx = hw_resc->resv_rsscos_ctxs; + } +} + +static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic && + hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); +} + +static int __bnxt_reserve_rings(struct bnxt *bp) +{ + struct bnxt_hw_rings hwr = {0}; + int rx_rings, rc; bool sh = false; int tx_cp; if (!bnxt_need_reserve_rings(bp)) return 0; + hwr.cp = bnxt_nq_rings_in_use(bp); + hwr.tx = bp->tx_nr_rings; + hwr.rx = bp->rx_nr_rings; if (bp->flags & BNXT_FLAG_SHARED_RINGS) sh = true; - if ((bp->flags & BNXT_FLAG_RFS) && - !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) - vnic = rx + 1; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + hwr.cp_p5 = hwr.rx + hwr.tx; + + hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx); + if (bp->flags & BNXT_FLAG_AGG_RINGS) - rx <<= 1; - grp = bp->rx_nr_rings; - stat = bnxt_get_func_stat_ctxs(bp); + hwr.rx <<= 1; + hwr.grp = bp->rx_nr_rings; + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); + hwr.stat = bnxt_get_func_stat_ctxs(bp); - rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); + rc = bnxt_hwrm_reserve_rings(bp, &hwr); if (rc) return rc; - tx = hw_resc->resv_tx_rings; - if (BNXT_NEW_RM(bp)) { - rx = hw_resc->resv_rx_rings; - cp = hw_resc->resv_irqs; - grp = hw_resc->resv_hw_ring_grps; - vnic = hw_resc->resv_vnics; - stat = hw_resc->resv_stat_ctxs; - } + bnxt_copy_reserved_rings(bp, &hwr); - rx_rings = rx; + rx_rings = hwr.rx; if (bp->flags & BNXT_FLAG_AGG_RINGS) { - if (rx >= 2) { - rx_rings = rx >> 1; + if (hwr.rx >= 2) { + rx_rings = hwr.rx >> 1; } else { if (netif_running(bp->dev)) return -ENOMEM; @@ -7404,17 +7464,17 @@ static int __bnxt_reserve_rings(struct bnxt *bp) bnxt_set_ring_params(bp); } } - rx_rings = min_t(int, rx_rings, grp); - cp = min_t(int, cp, bp->cp_nr_rings); - if (stat > bnxt_get_ulp_stat_ctxs(bp)) - stat -= bnxt_get_ulp_stat_ctxs(bp); - cp = min_t(int, cp, stat); - rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); + rx_rings = min_t(int, rx_rings, hwr.grp); + hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); + if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) + hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); + hwr.cp = min_t(int, hwr.cp, hwr.stat); + rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); if (bp->flags & BNXT_FLAG_AGG_RINGS) - rx = rx_rings << 1; - tx_cp = bnxt_num_tx_to_cp(bp, tx); - cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; - bp->tx_nr_rings = tx; + hwr.rx = rx_rings << 1; + tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); + hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; + bp->tx_nr_rings = hwr.tx; /* If we cannot reserve all the RX rings, reset the RSS map only * if absolutely necessary @@ -7431,9 +7491,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp) } } bp->rx_nr_rings = rx_rings; - bp->cp_nr_rings = cp; + bp->cp_nr_rings = hwr.cp; - if (!tx || !rx || !cp || !grp || !vnic || !stat) + if (!bnxt_rings_ok(bp, &hwr)) return -ENOMEM; if (!netif_is_rxfh_configured(bp->dev)) @@ -7442,9 +7502,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) return rc; } -static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, - int vnics) +static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_vf_cfg_input *req; u32 flags; @@ -7452,8 +7510,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, if (!BNXT_NEW_RM(bp)) return 0; - req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | @@ -7467,15 +7524,12 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return hwrm_req_send_silent(bp, req); } -static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, - int vnics) +static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_cfg_input *req; u32 flags; - req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; if (BNXT_NEW_RM(bp)) { flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | @@ -7493,20 +7547,15 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return hwrm_req_send_silent(bp, req); } -static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, - int vnics) +static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { if (bp->hwrm_spec_code < 0x10801) return 0; if (BNXT_PF(bp)) - return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, - ring_grps, cp_rings, stats, - vnics); + return bnxt_hwrm_check_pf_rings(bp, hwr); - return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + return bnxt_hwrm_check_vf_rings(bp, hwr); } static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) @@ -8952,6 +9001,10 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; if (flags & + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3; + + if (flags & CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO; @@ -9819,10 +9872,28 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) return __bnxt_setup_vnic(bp, vnic_id); } +static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id, + u16 start_rx_ring_idx, int rx_rings) +{ + int rc; + + rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", + vnic_id, rc); + return rc; + } + return bnxt_setup_vnic(bp, vnic_id); +} + static int bnxt_alloc_rfs_vnics(struct bnxt *bp) { int i, rc = 0; + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0, + bp->rx_nr_rings); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) return 0; @@ -9838,14 +9909,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) vnic->flags |= BNXT_VNIC_RFS_FLAG; if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; - rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); - if (rc) { - netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", - vnic_id, rc); - break; - } - rc = bnxt_setup_vnic(bp, vnic_id); - if (rc) + if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1)) break; } return rc; @@ -9886,7 +9950,7 @@ static bool bnxt_mc_list_updated(struct bnxt *, u32 *); static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; int rc = 0; unsigned int rx_nr_rings = bp->rx_nr_rings; @@ -9915,7 +9979,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) rx_nr_rings--; /* default vnic 0 */ - rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); + rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings); if (rc) { netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); goto err_out; @@ -9924,7 +9988,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) if (BNXT_VF(bp)) bnxt_hwrm_func_qcfg(bp); - rc = bnxt_setup_vnic(bp, 0); + rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT); if (rc) goto err_out; if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) @@ -11215,6 +11279,7 @@ static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) hw_resc->resv_rx_rings = 0; hw_resc->resv_hw_ring_grps = 0; hw_resc->resv_vnics = 0; + hw_resc->resv_rsscos_ctxs = 0; if (!fw_reset) { bp->tx_nr_rings = 0; bp->rx_nr_rings = 0; @@ -11583,7 +11648,7 @@ static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) { ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base); - l2_fltr = bp->vnic_info[0].l2_filters[0]; + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; atomic_inc(&l2_fltr->refcnt); ntp_fltr->l2_fltr = l2_fltr; if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) { @@ -12137,8 +12202,8 @@ void bnxt_get_ring_err_stats(struct bnxt *bp, static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; struct net_device *dev = bp->dev; - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; struct netdev_hw_addr *ha; u8 *haddr; int mc_count = 0; @@ -12172,7 +12237,7 @@ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) static bool bnxt_uc_list_updated(struct bnxt *bp) { struct net_device *dev = bp->dev; - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; struct netdev_hw_addr *ha; int off = 0; @@ -12199,7 +12264,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) if (!test_bit(BNXT_STATE_OPEN, &bp->state)) return; - vnic = &bp->vnic_info[0]; + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; mask = vnic->rx_mask; mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | @@ -12230,7 +12295,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) static int bnxt_cfg_rx_mode(struct bnxt *bp) { struct net_device *dev = bp->dev; - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; struct netdev_hw_addr *ha; int i, off = 0, rc; bool uc_update; @@ -12342,21 +12407,32 @@ static bool bnxt_rfs_supported(struct bnxt *bp) /* If runtime conditions support RFS */ static bool bnxt_rfs_capable(struct bnxt *bp) { - int vnics, max_vnics, max_rss_ctxs; + struct bnxt_hw_rings hwr = {0}; + int max_vnics, max_rss_ctxs; + hwr.rss_ctx = 1; + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { + /* 2 VNICS: default + Ntuple */ + hwr.vnic = 2; + hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * + hwr.vnic; + goto check_reserve_vnic; + } if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) return bnxt_rfs_supported(bp); if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) return false; - vnics = 1 + bp->rx_nr_rings; + hwr.vnic = 1 + bp->rx_nr_rings; +check_reserve_vnic: max_vnics = bnxt_get_max_func_vnics(bp); max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); - /* RSS contexts not a limiting factor */ - if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) - max_rss_ctxs = max_vnics; - if (vnics > max_vnics || vnics > max_rss_ctxs) { + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)) + hwr.rss_ctx = hwr.vnic; + + if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { if (bp->rx_nr_rings > 1) netdev_warn(bp->dev, "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", @@ -12367,15 +12443,19 @@ static bool bnxt_rfs_capable(struct bnxt *bp) if (!BNXT_NEW_RM(bp)) return true; - if (vnics == bp->hw_resc.resv_vnics) + if (hwr.vnic == bp->hw_resc.resv_vnics && + hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) return true; - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); - if (vnics <= bp->hw_resc.resv_vnics) + bnxt_hwrm_reserve_rings(bp, &hwr); + if (hwr.vnic <= bp->hw_resc.resv_vnics && + hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) return true; netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); + hwr.vnic = 1; + hwr.rss_ctx = 0; + bnxt_hwrm_reserve_rings(bp, &hwr); return false; } @@ -12414,14 +12494,24 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, return features; } +static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init, + bool link_re_init, u32 flags, bool update_tpa) +{ + bnxt_close_nic(bp, irq_re_init, link_re_init); + bp->flags = flags; + if (update_tpa) + bnxt_set_ring_params(bp); + return bnxt_open_nic(bp, irq_re_init, link_re_init); +} + static int bnxt_set_features(struct net_device *dev, netdev_features_t features) { + bool update_tpa = false, update_ntuple = false; struct bnxt *bp = netdev_priv(dev); u32 flags = bp->flags; u32 changes; int rc = 0; bool re_init = false; - bool update_tpa = false; flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; if (features & NETIF_F_GRO_HW) @@ -12452,6 +12542,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (changes & ~BNXT_FLAG_TPA) re_init = true; + if (changes & BNXT_FLAG_RFS) + update_ntuple = true; + if (flags != bp->flags) { u32 old_flags = bp->flags; @@ -12462,14 +12555,12 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) return rc; } - if (re_init) { - bnxt_close_nic(bp, false, false); - bp->flags = flags; - if (update_tpa) - bnxt_set_ring_params(bp); + if (update_ntuple) + return bnxt_reinit_features(bp, true, false, flags, update_tpa); + + if (re_init) + return bnxt_reinit_features(bp, false, false, flags, update_tpa); - return bnxt_open_nic(bp, false, false); - } if (update_tpa) { bp->flags = flags; rc = bnxt_set_tpa(bp, @@ -13299,9 +13390,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int tx_xdp) { int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; - int tx_rings_needed, stats; + struct bnxt_hw_rings hwr = {0}; int rx_rings = rx; - int cp, vnics; if (tcs) tx_sets = tcs; @@ -13314,26 +13404,27 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, if (bp->flags & BNXT_FLAG_AGG_RINGS) rx_rings <<= 1; - tx_rings_needed = tx * tx_sets + tx_xdp; - if (max_tx < tx_rings_needed) + hwr.rx = rx_rings; + hwr.tx = tx * tx_sets + tx_xdp; + if (max_tx < hwr.tx) return -ENOMEM; - vnics = 1; - if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == - BNXT_FLAG_RFS) - vnics += rx; + hwr.vnic = bnxt_get_total_vnics(bp, rx); - tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp); - cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; - if (max_cp < cp) + tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp); + hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; + if (max_cp < hwr.cp) return -ENOMEM; - stats = cp; + hwr.stat = hwr.cp; if (BNXT_NEW_RM(bp)) { - cp += bnxt_get_ulp_msix_num(bp); - stats += bnxt_get_ulp_stat_ctxs(bp); + hwr.cp += bnxt_get_ulp_msix_num(bp); + hwr.stat += bnxt_get_ulp_stat_ctxs(bp); + hwr.grp = rx; + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); } - return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, - stats, vnics); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + hwr.cp_p5 = hwr.tx + rx; + return bnxt_hwrm_check_rings(bp, &hwr); } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -14059,7 +14150,7 @@ u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, if (skb) return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; - vnic = &bp->vnic_info[0]; + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); } @@ -14154,7 +14245,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u32 flags; if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { - l2_fltr = bp->vnic_info[0].l2_filters[0]; + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; atomic_inc(&l2_fltr->refcnt); } else { struct bnxt_l2_key key; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 60bdd0673ec8..dd849e715c9b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1213,6 +1213,9 @@ struct bnxt_ring_grp_info { u16 cp_fw_ring_id; }; +#define BNXT_VNIC_DEFAULT 0 +#define BNXT_VNIC_NTUPLE 1 + struct bnxt_vnic_info { u16 fw_vnic_id; /* returned by Chimp during alloc */ #define BNXT_MAX_CTX_PER_VNIC 8 @@ -1252,11 +1255,24 @@ struct bnxt_vnic_info { #define BNXT_VNIC_MCAST_FLAG 4 #define BNXT_VNIC_UCAST_FLAG 8 #define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 +#define BNXT_VNIC_NTUPLE_FLAG 0x20 +}; + +struct bnxt_hw_rings { + int tx; + int rx; + int grp; + int cp; + int cp_p5; + int stat; + int vnic; + int rss_ctx; }; struct bnxt_hw_resc { u16 min_rsscos_ctxs; u16 max_rsscos_ctxs; + u16 resv_rsscos_ctxs; u16 min_cp_rings; u16 max_cp_rings; u16 resv_cp_rings; @@ -2314,12 +2330,16 @@ struct bnxt { #define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36) #define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37) #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38) + #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39) u32 fw_dbg_cap; #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) #define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \ ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC)) +#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \ + (BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3)) + u32 hwrm_spec_code; u16 hwrm_cmd_seq; u16 hwrm_cmd_kong_seq; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index dfb5944683f8..1d240a27455a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1314,7 +1314,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, if (!new_fltr) return -ENOMEM; - l2_fltr = bp->vnic_info[0].l2_filters[0]; + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; atomic_inc(&l2_fltr->refcnt); new_fltr->l2_fltr = l2_fltr; fmasks = &new_fltr->fmasks; @@ -1763,7 +1763,7 @@ static int bnxt_get_rxfh(struct net_device *dev, if (!bp->vnic_info) return 0; - vnic = &bp->vnic_info[0]; + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; if (rxfh->indir && bp->rss_indir_tbl) { tbl_size = bnxt_get_rxfh_indir_size(dev); for (i = 0; i < tbl_size; i++) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index cbbe004621bc..9ada89355747 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -476,6 +476,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv) ppd.wait_func = bcmgenet_mii_wait; ppd.wait_func_data = priv; ppd.bus_name = "bcmgenet MII bus"; + /* Pass a reference to our "main" clock which is used for MDIO + * transfers + */ + ppd.clk = priv->clk; /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD * and is 2 * 32-bits word long, 8 bytes total. @@ -674,7 +678,5 @@ void bcmgenet_mii_exit(struct net_device *dev) if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); - clk_prepare_enable(priv->clk); platform_device_unregister(priv->mii_pdev); - clk_disable_unprepare(priv->clk); } diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c index 68f8ee0ec8ba..6fe08427fdd4 100644 --- a/drivers/net/mdio/mdio-bcm-unimac.c +++ b/drivers/net/mdio/mdio-bcm-unimac.c @@ -94,6 +94,10 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) int ret; u32 cmd; + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + /* Prepare the read operation */ cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT); unimac_mdio_writel(priv, cmd, MDIO_CMD); @@ -103,7 +107,7 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) ret = priv->wait_func(priv->wait_func_data); if (ret) - return ret; + goto out; cmd = unimac_mdio_readl(priv, MDIO_CMD); @@ -112,10 +116,15 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) * that condition here and ignore the MDIO controller read failure * indication. */ - if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL)) - return -EIO; + if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL)) { + ret = -EIO; + goto out; + } - return cmd & 0xffff; + ret = cmd & 0xffff; +out: + clk_disable_unprepare(priv->clk); + return ret; } static int unimac_mdio_write(struct mii_bus *bus, int phy_id, @@ -123,6 +132,11 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id, { struct unimac_mdio_priv *priv = bus->priv; u32 cmd; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; /* Prepare the write operation */ cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | @@ -131,7 +145,10 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id, unimac_mdio_start(priv); - return priv->wait_func(priv->wait_func_data); + ret = priv->wait_func(priv->wait_func_data); + clk_disable_unprepare(priv->clk); + + return ret; } /* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with @@ -178,14 +195,19 @@ static int unimac_mdio_reset(struct mii_bus *bus) return 0; } -static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv) +static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv) { unsigned long rate; u32 reg, div; + int ret; /* Keep the hardware default values */ if (!priv->clk_freq) - return; + return 0; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; if (!priv->clk) rate = 250000000; @@ -195,7 +217,8 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv) div = (rate / (2 * priv->clk_freq)) - 1; if (div & ~MDIO_CLK_DIV_MASK) { pr_warn("Incorrect MDIO clock frequency, ignoring\n"); - return; + ret = 0; + goto out; } /* The MDIO clock is the reference clock (typically 250Mhz) divided by @@ -205,6 +228,9 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv) reg &= ~(MDIO_CLK_DIV_MASK << MDIO_CLK_DIV_SHIFT); reg |= div << MDIO_CLK_DIV_SHIFT; unimac_mdio_writel(priv, reg, MDIO_CFG); +out: + clk_disable_unprepare(priv->clk); + return ret; } static int unimac_mdio_probe(struct platform_device *pdev) @@ -235,24 +261,12 @@ static int unimac_mdio_probe(struct platform_device *pdev) return -ENOMEM; } - priv->clk = devm_clk_get_optional(&pdev->dev, NULL); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - - ret = clk_prepare_enable(priv->clk); - if (ret) - return ret; - if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq)) priv->clk_freq = 0; - unimac_mdio_clk_set(priv); - priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) { - ret = -ENOMEM; - goto out_clk_disable; - } + if (!priv->mii_bus) + return -ENOMEM; bus = priv->mii_bus; bus->priv = priv; @@ -261,17 +275,29 @@ static int unimac_mdio_probe(struct platform_device *pdev) priv->wait_func = pdata->wait_func; priv->wait_func_data = pdata->wait_func_data; bus->phy_mask = ~pdata->phy_mask; + priv->clk = pdata->clk; } else { bus->name = "unimac MII bus"; priv->wait_func_data = priv; priv->wait_func = unimac_mdio_poll; + priv->clk = devm_clk_get_optional(&pdev->dev, NULL); + } + + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + goto out_mdio_free; } + bus->parent = &pdev->dev; bus->read = unimac_mdio_read; bus->write = unimac_mdio_write; bus->reset = unimac_mdio_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); + ret = unimac_mdio_clk_set(priv); + if (ret) + goto out_mdio_free; + ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "MDIO bus registration failed\n"); @@ -286,8 +312,6 @@ static int unimac_mdio_probe(struct platform_device *pdev) out_mdio_free: mdiobus_free(bus); -out_clk_disable: - clk_disable_unprepare(priv->clk); return ret; } @@ -297,34 +321,17 @@ static void unimac_mdio_remove(struct platform_device *pdev) mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); - clk_disable_unprepare(priv->clk); -} - -static int __maybe_unused unimac_mdio_suspend(struct device *d) -{ - struct unimac_mdio_priv *priv = dev_get_drvdata(d); - - clk_disable_unprepare(priv->clk); - - return 0; } static int __maybe_unused unimac_mdio_resume(struct device *d) { struct unimac_mdio_priv *priv = dev_get_drvdata(d); - int ret; - ret = clk_prepare_enable(priv->clk); - if (ret) - return ret; - - unimac_mdio_clk_set(priv); - - return 0; + return unimac_mdio_clk_set(priv); } static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops, - unimac_mdio_suspend, unimac_mdio_resume); + NULL, unimac_mdio_resume); static const struct of_device_id unimac_mdio_ids[] = { { .compatible = "brcm,asp-v2.1-mdio", }, diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index e261e58bf158..1df0595c5ba9 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -232,6 +232,7 @@ config MARVELL_10G_PHY config MARVELL_88Q2XXX_PHY tristate "Marvell 88Q2XXX PHY" + depends on HWMON || HWMON=n help Support for the Marvell 88Q2XXX 100/1000BASE-T1 Automotive Ethernet PHYs. diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c index 1c3ff77de56b..6b4bd9883304 100644 --- a/drivers/net/phy/marvell-88q2xxx.c +++ b/drivers/net/phy/marvell-88q2xxx.c @@ -1,10 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 /* * Marvell 88Q2XXX automotive 100BASE-T1/1000BASE-T1 PHY driver + * + * Derived from Marvell Q222x API + * + * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH */ #include <linux/ethtool_netlink.h> #include <linux/marvell_phy.h> #include <linux/phy.h> +#include <linux/hwmon.h> + +#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1) #define MDIO_MMD_AN_MV_STAT 32769 #define MDIO_MMD_AN_MV_STAT_ANEG 0x0100 @@ -13,8 +20,38 @@ #define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000 #define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000 +#define MDIO_MMD_AN_MV_STAT2 32794 +#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800 +#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000 +#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000 + +#define MDIO_MMD_PCS_MV_INT_EN 32784 +#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040 +#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080 +#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000 + +#define MDIO_MMD_PCS_MV_GPIO_INT_STAT 32785 +#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP 0x0040 +#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_DOWN 0x0080 +#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_100BT1_GEN 0x1000 + +#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL 32787 +#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS 0x0800 + +#define MDIO_MMD_PCS_MV_TEMP_SENSOR1 32833 +#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT 0x0001 +#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT 0x0040 +#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT_EN 0x0080 + +#define MDIO_MMD_PCS_MV_TEMP_SENSOR2 32834 +#define MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK 0xc000 + +#define MDIO_MMD_PCS_MV_TEMP_SENSOR3 32835 +#define MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK 0xff00 +#define MDIO_MMD_PCS_MV_TEMP_SENSOR3_MASK 0x00ff + #define MDIO_MMD_PCS_MV_100BT1_STAT1 33032 -#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00FF +#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00ff #define MDIO_MMD_PCS_MV_100BT1_STAT1_JABBER 0x0100 #define MDIO_MMD_PCS_MV_100BT1_STAT1_LINK 0x0200 #define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_RX 0x1000 @@ -27,6 +64,71 @@ #define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004 #define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008 +#define MDIO_MMD_PCS_MV_100BT1_INT_EN 33042 +#define MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT 0x0400 + +#define MDIO_MMD_PCS_MV_COPPER_INT_STAT 33043 +#define MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT 0x0400 + +#define MDIO_MMD_PCS_MV_RX_STAT 33328 + +#define MDIO_MMD_PCS_MV_TDR_RESET 65226 +#define MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST 0x1000 + +#define MDIO_MMD_PCS_MV_TDR_OFF_SHORT_CABLE 65241 + +#define MDIO_MMD_PCS_MV_TDR_OFF_LONG_CABLE 65242 + +#define MDIO_MMD_PCS_MV_TDR_STATUS 65245 +#define MDIO_MMD_PCS_MV_TDR_STATUS_MASK 0x0003 +#define MDIO_MMD_PCS_MV_TDR_STATUS_OFF 0x0001 +#define MDIO_MMD_PCS_MV_TDR_STATUS_ON 0x0002 +#define MDIO_MMD_PCS_MV_TDR_STATUS_DIST_MASK 0xff00 +#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_MASK 0x00f0 +#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_SHORT 0x0030 +#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OPEN 0x00e0 +#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OK 0x0070 +#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_IN_PROGR 0x0080 +#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_NOISE 0x0050 + +#define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF 65246 + +struct mmd_val { + int devad; + u32 regnum; + u16 val; +}; + +static const struct mmd_val mv88q222x_revb0_init_seq0[] = { + { MDIO_MMD_PCS, 0x8033, 0x6801 }, + { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 }, + { MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_CTRL1_LPOWER | MDIO_PMA_CTRL1_SPEED1000 }, + { MDIO_MMD_PCS, 0xfe1b, 0x48 }, + { MDIO_MMD_PCS, 0xffe4, 0x6b6 }, + { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0 }, + { MDIO_MMD_PCS, MDIO_CTRL1, 0x0 }, +}; + +static const struct mmd_val mv88q222x_revb0_init_seq1[] = { + { MDIO_MMD_PCS, 0xfe79, 0x0 }, + { MDIO_MMD_PCS, 0xfe07, 0x125a }, + { MDIO_MMD_PCS, 0xfe09, 0x1288 }, + { MDIO_MMD_PCS, 0xfe08, 0x2588 }, + { MDIO_MMD_PCS, 0xfe11, 0x1105 }, + { MDIO_MMD_PCS, 0xfe72, 0x042c }, + { MDIO_MMD_PCS, 0xfbba, 0xcb2 }, + { MDIO_MMD_PCS, 0xfbbb, 0xc4a }, + { MDIO_MMD_AN, 0x8032, 0x2020 }, + { MDIO_MMD_AN, 0x8031, 0xa28 }, + { MDIO_MMD_AN, 0x8031, 0xc28 }, + { MDIO_MMD_PCS, 0xffdb, 0xfc10 }, + { MDIO_MMD_PCS, 0xfe1b, 0x58 }, + { MDIO_MMD_PCS, 0xfe79, 0x4 }, + { MDIO_MMD_PCS, 0xfe5f, 0xe8 }, + { MDIO_MMD_PCS, 0xfe05, 0x755c }, +}; + static int mv88q2xxx_soft_reset(struct phy_device *phydev) { int ret; @@ -50,20 +152,23 @@ static int mv88q2xxx_read_link_gbit(struct phy_device *phydev) /* Read vendor specific Auto-Negotiation status register to get local * and remote receiver status according to software initialization - * guide. + * guide. However, when not in polling mode the local and remote + * receiver status are not evaluated due to the Marvell 88Q2xxx APIs. */ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT); if (ret < 0) { return ret; - } else if ((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) && - (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) { + } else if (((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) && + (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) || + !phy_polling_mode(phydev)) { /* The link state is latched low so that momentary link * drops can be detected. Do not double-read the status * in polling mode to detect such short link drops except * the link was already down. */ if (!phy_polling_mode(phydev) || !phydev->link) { - ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT); + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, + MDIO_PCS_1000BT1_STAT); if (ret < 0) return ret; else if (ret & MDIO_PCS_1000BT1_STAT_LINK) @@ -71,7 +176,8 @@ static int mv88q2xxx_read_link_gbit(struct phy_device *phydev) } if (!link) { - ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT); + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, + MDIO_PCS_1000BT1_STAT); if (ret < 0) return ret; else if (ret & MDIO_PCS_1000BT1_STAT_LINK) @@ -94,8 +200,20 @@ static int mv88q2xxx_read_link_100m(struct phy_device *phydev) * the link was already down. In case we are not polling, * we always read the realtime status. */ - if (!phy_polling_mode(phydev) || !phydev->link) { - ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_100BT1_STAT1); + if (!phy_polling_mode(phydev)) { + phydev->link = false; + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_100BT1_STAT2); + if (ret < 0) + return ret; + + if (ret & MDIO_MMD_PCS_MV_100BT1_STAT2_LINK) + phydev->link = true; + + return 0; + } else if (!phydev->link) { + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_100BT1_STAT1); if (ret < 0) return ret; else if (ret & MDIO_MMD_PCS_MV_100BT1_STAT1_LINK) @@ -120,24 +238,90 @@ out: static int mv88q2xxx_read_link(struct phy_device *phydev) { - int ret; - /* The 88Q2XXX PHYs do not have the PMA/PMD status register available, * therefore we need to read the link status from the vendor specific * registers depending on the speed. */ + if (phydev->speed == SPEED_1000) - ret = mv88q2xxx_read_link_gbit(phydev); + return mv88q2xxx_read_link_gbit(phydev); + else if (phydev->speed == SPEED_100) + return mv88q2xxx_read_link_100m(phydev); + + phydev->link = false; + return 0; +} + +static int mv88q2xxx_read_master_slave_state(struct phy_device *phydev) +{ + int ret; + + phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT); + if (ret < 0) + return ret; + + if (ret & MDIO_MMD_AN_MV_STAT_LOCAL_MASTER) + phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; else - ret = mv88q2xxx_read_link_100m(phydev); + phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; + + return 0; +} + +static int mv88q2xxx_read_aneg_speed(struct phy_device *phydev) +{ + int ret; + + phydev->speed = SPEED_UNKNOWN; + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT2); + if (ret < 0) + return ret; + + if (!(ret & MDIO_MMD_AN_MV_STAT2_AN_RESOLVED)) + return 0; - return ret; + if (ret & MDIO_MMD_AN_MV_STAT2_100BT1) + phydev->speed = SPEED_100; + else if (ret & MDIO_MMD_AN_MV_STAT2_1000BT1) + phydev->speed = SPEED_1000; + + return 0; } static int mv88q2xxx_read_status(struct phy_device *phydev) { int ret; + if (phydev->autoneg == AUTONEG_ENABLE) { + /* We have to get the negotiated speed first, otherwise we are + * not able to read the link. + */ + ret = mv88q2xxx_read_aneg_speed(phydev); + if (ret < 0) + return ret; + + ret = mv88q2xxx_read_link(phydev); + if (ret < 0) + return ret; + + ret = genphy_c45_read_lpa(phydev); + if (ret < 0) + return ret; + + ret = genphy_c45_baset1_read_status(phydev); + if (ret < 0) + return ret; + + ret = mv88q2xxx_read_master_slave_state(phydev); + if (ret < 0) + return ret; + + phy_resolve_aneg_linkmode(phydev); + + return 0; + } + ret = mv88q2xxx_read_link(phydev); if (ret < 0) return ret; @@ -166,7 +350,9 @@ static int mv88q2xxx_get_features(struct phy_device *phydev) * sequence provided by Marvell. Disable it for now until a proper * workaround is found or a new PHY revision is released. */ - linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported); + if (phydev->drv->phy_id == MARVELL_PHY_ID_88Q2110) + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->supported); return 0; } @@ -179,28 +365,29 @@ static int mv88q2xxx_config_aneg(struct phy_device *phydev) if (ret) return ret; - return mv88q2xxx_soft_reset(phydev); + return phydev->drv->soft_reset(phydev); } static int mv88q2xxx_config_init(struct phy_device *phydev) { - int ret; - /* The 88Q2XXX PHYs do have the extended ability register available, but * register MDIO_PMA_EXTABLE where they should signalize it does not * work according to specification. Therefore, we force it here. */ phydev->pma_extable = MDIO_PMA_EXTABLE_BT1; - /* Read the current PHY configuration */ - ret = genphy_c45_read_pma(phydev); - if (ret) - return ret; + /* Configure interrupt with default settings, output is driven low for + * active interrupt and high for inactive. + */ + if (phy_interrupt_is_valid(phydev)) + return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_GPIO_INT_CTRL, + MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS); - return mv88q2xxx_config_aneg(phydev); + return 0; } -static int mv88q2xxxx_get_sqi(struct phy_device *phydev) +static int mv88q2xxx_get_sqi(struct phy_device *phydev) { int ret; @@ -208,7 +395,8 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev) /* Read the SQI from the vendor specific receiver status * register */ - ret = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8230); + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_RX_STAT); if (ret < 0) return ret; @@ -218,7 +406,7 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev) * but can be found in the Software Initialization Guide. Only * revisions >= A0 are supported. */ - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xFC5D, 0x00FF, 0x00AC); + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xfc5d, 0xff, 0xac); if (ret < 0) return ret; @@ -227,14 +415,386 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev) return ret; } - return ret & 0x0F; + return ret & 0x0f; } -static int mv88q2xxxx_get_sqi_max(struct phy_device *phydev) +static int mv88q2xxx_get_sqi_max(struct phy_device *phydev) { return 15; } +static int mv88q2xxx_config_intr(struct phy_device *phydev) +{ + int ret; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + /* Enable interrupts for 1000BASE-T1 link up and down events + * and enable general interrupts for 100BASE-T1. + */ + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_INT_EN, + MDIO_MMD_PCS_MV_INT_EN_LINK_UP | + MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN | + MDIO_MMD_PCS_MV_INT_EN_100BT1); + if (ret < 0) + return ret; + + /* Enable interrupts for 100BASE-T1 link events */ + return phy_write_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_100BT1_INT_EN, + MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT); + } else { + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_INT_EN, 0); + if (ret < 0) + return ret; + + return phy_write_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_100BT1_INT_EN, 0); + } +} + +static irqreturn_t mv88q2xxx_handle_interrupt(struct phy_device *phydev) +{ + bool trigger_machine = false; + int irq; + + /* Before we can acknowledge the 100BT1 general interrupt, that is in + * the 1000BT1 interrupt status register, we have to acknowledge any + * interrupts that are related to it. Therefore we read first the 100BT1 + * interrupt status register, followed by reading the 1000BT1 interrupt + * status register. + */ + + irq = phy_read_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_COPPER_INT_STAT); + if (irq < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + /* Check link status for 100BT1 */ + if (irq & MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT) + trigger_machine = true; + + irq = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_GPIO_INT_STAT); + if (irq < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + /* Check link status for 1000BT1 */ + if ((irq & MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP) || + (irq & MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_DOWN)) + trigger_machine = true; + + if (!trigger_machine) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + +static int mv88q2xxx_suspend(struct phy_device *phydev) +{ + int ret; + + /* Disable PHY interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_DISABLED; + ret = mv88q2xxx_config_intr(phydev); + if (ret) + return ret; + } + + return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); +} + +static int mv88q2xxx_resume(struct phy_device *phydev) +{ + int ret; + + /* Enable PHY interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_ENABLED; + ret = mv88q2xxx_config_intr(phydev); + if (ret) + return ret; + } + + return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); +} + +#if IS_ENABLED(CONFIG_HWMON) +static const struct hwmon_channel_info * const mv88q2xxx_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_ALARM), + NULL +}; + +static umode_t mv88q2xxx_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (attr) { + case hwmon_temp_input: + return 0444; + case hwmon_temp_max: + return 0644; + case hwmon_temp_alarm: + return 0444; + default: + return 0; + } +} + +static int mv88q2xxx_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int ret; + + switch (attr) { + case hwmon_temp_input: + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_TEMP_SENSOR3); + if (ret < 0) + return ret; + + ret = FIELD_GET(MDIO_MMD_PCS_MV_TEMP_SENSOR3_MASK, ret); + *val = (ret - 75) * 1000; + return 0; + case hwmon_temp_max: + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_TEMP_SENSOR3); + if (ret < 0) + return ret; + + ret = FIELD_GET(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK, + ret); + *val = (ret - 75) * 1000; + return 0; + case hwmon_temp_alarm: + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_TEMP_SENSOR1); + if (ret < 0) + return ret; + + *val = !!(ret & MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int mv88q2xxx_hwmon_write(struct device *dev, + enum hwmon_sensor_types type, u32 attr, + int channel, long val) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + + switch (attr) { + case hwmon_temp_max: + clamp_val(val, -75000, 180000); + val = (val / 1000) + 75; + val = FIELD_PREP(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK, + val); + return phy_modify_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_TEMP_SENSOR3, + MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK, + val); + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_ops mv88q2xxx_hwmon_hwmon_ops = { + .is_visible = mv88q2xxx_hwmon_is_visible, + .read = mv88q2xxx_hwmon_read, + .write = mv88q2xxx_hwmon_write, +}; + +static const struct hwmon_chip_info mv88q2xxx_hwmon_chip_info = { + .ops = &mv88q2xxx_hwmon_hwmon_ops, + .info = mv88q2xxx_hwmon_info, +}; + +static int mv88q2xxx_hwmon_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct device *hwmon; + char *hwmon_name; + int ret; + + /* Enable temperature sense */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2, + MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0); + if (ret < 0) + return ret; + + hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev)); + if (IS_ERR(hwmon_name)) + return PTR_ERR(hwmon_name); + + hwmon = devm_hwmon_device_register_with_info(dev, + hwmon_name, + phydev, + &mv88q2xxx_hwmon_chip_info, + NULL); + + return PTR_ERR_OR_ZERO(hwmon); +} + +#else +static int mv88q2xxx_hwmon_probe(struct phy_device *phydev) +{ + return 0; +} +#endif + +static int mv88q2xxx_probe(struct phy_device *phydev) +{ + return mv88q2xxx_hwmon_probe(phydev); +} + +static int mv88q222x_soft_reset(struct phy_device *phydev) +{ + int ret; + + /* Enable RESET of DCL */ + if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000) { + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x48); + if (ret < 0) + return ret; + } + + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_CTRL, + MDIO_PCS_1000BT1_CTRL_RESET); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xffe4, 0xc); + if (ret < 0) + return ret; + + /* Disable RESET of DCL */ + if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000) + return phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x58); + + return 0; +} + +static int mv88q222x_revb0_config_init(struct phy_device *phydev) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq0); i++) { + ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq0[i].devad, + mv88q222x_revb0_init_seq0[i].regnum, + mv88q222x_revb0_init_seq0[i].val); + if (ret < 0) + return ret; + } + + usleep_range(5000, 10000); + + for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq1); i++) { + ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq1[i].devad, + mv88q222x_revb0_init_seq1[i].regnum, + mv88q222x_revb0_init_seq1[i].val); + if (ret < 0) + return ret; + } + + return mv88q2xxx_config_init(phydev); +} + +static int mv88q222x_cable_test_start(struct phy_device *phydev) +{ + int ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF, 0x0058); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_TDR_OFF_LONG_CABLE, 0x00eb); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_TDR_OFF_SHORT_CABLE, 0x010e); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_RESET, + 0x0d90); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_STATUS, + MDIO_MMD_PCS_MV_TDR_STATUS_ON); + if (ret < 0) + return ret; + + /* According to the Marvell API the test is finished within 500 ms */ + msleep(500); + + return 0; +} + +static int mv88q222x_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + int ret, status; + u32 dist; + + status = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_STATUS); + if (status < 0) + return status; + + ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_RESET, + MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST | 0xd90); + if (ret < 0) + return ret; + + /* Test could not be finished */ + if (FIELD_GET(MDIO_MMD_PCS_MV_TDR_STATUS_MASK, status) != + MDIO_MMD_PCS_MV_TDR_STATUS_OFF) + return -ETIMEDOUT; + + *finished = true; + /* Fault length reported in meters, convert to centimeters */ + dist = FIELD_GET(MDIO_MMD_PCS_MV_TDR_STATUS_DIST_MASK, status) * 100; + switch (status & MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_MASK) { + case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OPEN: + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_RESULT_CODE_OPEN); + ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, + dist); + break; + case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_SHORT: + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT); + ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, + dist); + break; + case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OK: + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_RESULT_CODE_OK); + break; + default: + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC); + } + + return 0; +} + static struct phy_driver mv88q2xxx_driver[] = { { .phy_id = MARVELL_PHY_ID_88Q2110, @@ -246,8 +806,29 @@ static struct phy_driver mv88q2xxx_driver[] = { .read_status = mv88q2xxx_read_status, .soft_reset = mv88q2xxx_soft_reset, .set_loopback = genphy_c45_loopback, - .get_sqi = mv88q2xxxx_get_sqi, - .get_sqi_max = mv88q2xxxx_get_sqi_max, + .get_sqi = mv88q2xxx_get_sqi, + .get_sqi_max = mv88q2xxx_get_sqi_max, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0), + .name = "mv88q2220", + .flags = PHY_POLL_CABLE_TEST, + .probe = mv88q2xxx_probe, + .get_features = mv88q2xxx_get_features, + .config_aneg = mv88q2xxx_config_aneg, + .aneg_done = genphy_c45_aneg_done, + .config_init = mv88q222x_revb0_config_init, + .read_status = mv88q2xxx_read_status, + .soft_reset = mv88q222x_soft_reset, + .config_intr = mv88q2xxx_config_intr, + .handle_interrupt = mv88q2xxx_handle_interrupt, + .set_loopback = genphy_c45_loopback, + .cable_test_start = mv88q222x_cable_test_start, + .cable_test_get_status = mv88q222x_cable_test_get_status, + .get_sqi = mv88q2xxx_get_sqi, + .get_sqi_max = mv88q2xxx_get_sqi_max, + .suspend = mv88q2xxx_suspend, + .resume = mv88q2xxx_resume, }, }; @@ -255,6 +836,7 @@ module_phy_driver(mv88q2xxx_driver); static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = { { MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK }, + { PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0), }, { /*sentinel*/ } }; MODULE_DEVICE_TABLE(mdio, mv88q2xxx_tbl); diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c index ea1073adc5a1..b2d36a3a96f1 100644 --- a/drivers/net/phy/mxl-gpy.c +++ b/drivers/net/phy/mxl-gpy.c @@ -274,6 +274,14 @@ static int gpy_config_init(struct phy_device *phydev) return ret < 0 ? ret : 0; } +static int gpy21x_config_init(struct phy_device *phydev) +{ + __set_bit(PHY_INTERFACE_MODE_2500BASEX, phydev->possible_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, phydev->possible_interfaces); + + return gpy_config_init(phydev); +} + static int gpy_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; @@ -867,7 +875,7 @@ static struct phy_driver gpy_drivers[] = { .phy_id_mask = PHY_ID_GPY21xB_MASK, .name = "Maxlinear Ethernet GPY211B", .get_features = genphy_c45_pma_read_abilities, - .config_init = gpy_config_init, + .config_init = gpy21x_config_init, .probe = gpy_probe, .suspend = genphy_suspend, .resume = genphy_resume, @@ -884,7 +892,7 @@ static struct phy_driver gpy_drivers[] = { PHY_ID_MATCH_MODEL(PHY_ID_GPY211C), .name = "Maxlinear Ethernet GPY211C", .get_features = genphy_c45_pma_read_abilities, - .config_init = gpy_config_init, + .config_init = gpy21x_config_init, .probe = gpy_probe, .suspend = genphy_suspend, .resume = genphy_resume, @@ -902,7 +910,7 @@ static struct phy_driver gpy_drivers[] = { .phy_id_mask = PHY_ID_GPY21xB_MASK, .name = "Maxlinear Ethernet GPY212B", .get_features = genphy_c45_pma_read_abilities, - .config_init = gpy_config_init, + .config_init = gpy21x_config_init, .probe = gpy_probe, .suspend = genphy_suspend, .resume = genphy_resume, @@ -919,7 +927,7 @@ static struct phy_driver gpy_drivers[] = { PHY_ID_MATCH_MODEL(PHY_ID_GPY212C), .name = "Maxlinear Ethernet GPY212C", .get_features = genphy_c45_pma_read_abilities, - .config_init = gpy_config_init, + .config_init = gpy21x_config_init, .probe = gpy_probe, .suspend = genphy_suspend, .resume = genphy_resume, @@ -937,7 +945,7 @@ static struct phy_driver gpy_drivers[] = { .phy_id_mask = PHY_ID_GPYx15B_MASK, .name = "Maxlinear Ethernet GPY215B", .get_features = genphy_c45_pma_read_abilities, - .config_init = gpy_config_init, + .config_init = gpy21x_config_init, .probe = gpy_probe, .suspend = genphy_suspend, .resume = genphy_resume, @@ -954,7 +962,7 @@ static struct phy_driver gpy_drivers[] = { PHY_ID_MATCH_MODEL(PHY_ID_GPY215C), .name = "Maxlinear Ethernet GPY215C", .get_features = genphy_c45_pma_read_abilities, - .config_init = gpy_config_init, + .config_init = gpy21x_config_init, .probe = gpy_probe, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index c69568e7695e..fa5145c9328e 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -208,7 +208,8 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev) adv_l_mask = MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP | MDIO_AN_T1_ADV_L_PAUSE_ASYM; - adv_m_mask = MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L; + adv_m_mask = MDIO_AN_T1_ADV_M_1000BT1 | MDIO_AN_T1_ADV_M_100BT1 | + MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L; switch (phydev->master_slave_set) { case MASTER_SLAVE_CFG_MASTER_FORCE: |